repo_name
stringlengths 7
90
| path
stringlengths 4
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 762
838k
| license
stringclasses 15
values |
---|---|---|---|---|---|
tridesclous/tridesclous | tridesclous/export.py | 1 | 4852 | import os
from collections import OrderedDict
import numpy as np
import scipy.io
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class GenericSpikeExporter:
def __call__(self,spikes, catalogue, seg_num, chan_grp, export_path,
split_by_cluster=False,
use_cell_label=True,
#~ use_index=True,
):
if not os.path.exists(export_path):
os.makedirs(export_path)
#~ print('export', spikes.size, seg_num, export_path)
#~ print('split_by_cluster', split_by_cluster, 'use_cell_label', use_cell_label)
clusters = catalogue['clusters']
spike_labels = spikes['cluster_label']
if use_cell_label:
spike_labels = spikes['cluster_label'].copy()
for l in clusters:
mask = spike_labels==l['cluster_label']
spike_labels[mask] = l['cell_label']
spike_indexes = spikes['index']
out_data = OrderedDict()
if split_by_cluster:
if use_cell_label:
possible_labels = np.unique(clusters['cell_label'])
label_name = 'cell'
else:
possible_labels = clusters['cluster_label']
label_name = 'cluster'
for k in possible_labels:
keep = k == spike_labels
out_data[label_name + '#'+ str(k)] = (spike_indexes[keep], spike_labels[keep])
else:
out_data['cell#all'] = (spike_indexes, spike_labels)
name = 'spikes - segNum {} - chanGrp {}'.format(seg_num, chan_grp)
filename = os.path.join(export_path, name)
self.write_out_data(out_data, filename)
class CsvSpikeExporter(GenericSpikeExporter):
ext = 'csv'
def write_out_data(self, out_data, filename):
for key, (spike_indexes, spike_labels) in out_data.items():
filename2 = filename +' - '+key+'.csv'
self._write_one_file(filename2, spike_indexes, spike_labels)
def _write_one_file(self, filename, labels, indexes):
rows = [''] * len(labels)
for i in range(len(labels)):
rows[i]='{},{}\n'.format(labels[i], indexes[i])
with open(filename, 'w') as out:
out.writelines(rows)
export_csv = CsvSpikeExporter()
class MatlabSpikeExporter(GenericSpikeExporter):
ext = 'mat'
def write_out_data(self, out_data, filename):
mdict = {}
for key, (spike_indexes, spike_labels) in out_data.items():
mdict['index_'+key] = spike_indexes
mdict['label_'+key] =spike_labels
scipy.io.savemat(filename+'.mat', mdict)
export_matlab = MatlabSpikeExporter()
class ExcelSpikeExporter(GenericSpikeExporter):
ext = 'xslx'
def write_out_data(self, out_data, filename):
assert HAS_PANDAS
writer = pd.ExcelWriter(filename+'.xlsx')
for key, (spike_indexes, spike_labels) in out_data.items():
df = pd.DataFrame()
df['index'] = spike_indexes
df['label'] = spike_labels
df.to_excel(writer, sheet_name=key, index=False)
writer.save()
export_excel = ExcelSpikeExporter()
# list
export_list = [export_csv, export_matlab, ]
if HAS_PANDAS:
export_list.append(export_excel)
export_dict = {e.ext:e for e in export_list}
def export_catalogue_spikes(cc, export_path=None, formats=None):
"""
This export spikes from catalogue.
Usefull when when catalogue peak sampler mode is all.
This avoid the peeler.
"""
dataio = cc.dataio
chan_grp = cc.chan_grp
sampler_mode = cc.info['peak_sampler']['mode']
if sampler_mode != 'all':
print('You are trying to export peak from catalogue but peak_sampler mode is not "all"')
if export_path is None:
export_path = os.path.join(dataio.dirname, 'export_catalogue_chan_grp_{}'.format(chan_grp))
catalogue = {}
catalogue['clusters'] = cc.clusters.copy()
if formats is None:
exporters = export_list
elif isinstance(formats, str):
assert formats in export_dict
exporters = [ export_dict[formats] ]
elif isinstance(format, list):
exporters = [ export_dict[format] for format in formats]
else:
raise ValueError()
for seg_num in range(dataio.nb_segment):
in_segment = (cc.all_peaks['segment'] == seg_num)
pos_label = (cc.all_peaks['cluster_label'] >= 0)
spikes = cc.all_peaks[in_segment & pos_label]
if spikes is None: continue
args = (spikes, catalogue, seg_num, chan_grp, export_path,)
kargs = dict(split_by_cluster=False, use_cell_label=False)
for exporter in exporters:
exporter(*args, **kargs)
| mit |
lefthandedroo/Cosmo-models | zprev versions/Models_py_backup/stats copy.py | 1 | 12889 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 23 16:02:10 2018
@author: BallBlueMeercat
"""
import matplotlib.pyplot as plt
from emcee import EnsembleSampler
import numpy as np
import time
import os.path
import datasim
import tools
import ln
import plots
def stats(test_params, data_dict, sigma, nsteps,
save_path, firstderivs_key):
"""
Takes in:
test_params = dictionary of parameters to be emcee fitted
'm':int/float = e_m(t)/ec(t0) at t=t0;
'gamma':int/float = interaction term;
'zeta':int/float = interaction term;
'alpha':int/float = SN peak mag correlation parameter;
'beta' :int/float = SN peak mag correlation parameter;
data_dict = dictionary of parameters from data
'colour': numpy.ndarray = SN colour;
'x1': numpy.ndarray = SN stretch correction as;
'zpicks':list of redshifts sorted in accending order;
'mag':list of apparent magnitudes;
sigma = standard deviation of error on the data;
nsteps = int, steps to be taken by each emcee walker;
save_path = string, directory for saving output;
firstderivs_key = string, name of IVCDM model to use for model mag.
Returns:
"""
# print('-stats has been called')
zpicks = data_dict.get('zpicks',0)
mag = data_dict.get('mag',0)
if firstderivs_key == 'exotic':
pass
elif firstderivs_key == 'LCDM':
test_params['gamma'] = 0
del test_params['gamma']
test_params['zeta'] = 0
del test_params['zeta']
else:
test_params['zeta'] = 0
del test_params['zeta']
# emcee parameters:
ndim = len(test_params)
nwalkers = int(ndim * 2)
# Initializing walkers.
poslist = list(test_params.values())
pos = []
for i in poslist:
pos.append(i)
startpos = np.array(pos)
pos = [startpos + 0.001*np.random.randn(ndim) for i in range(nwalkers)]
# Are walkers starting outside of prior?
i = 0
while i < nwalkers:
theta = pos[i]
lp = ln.lnprior(theta, firstderivs_key)
if not np.isfinite(lp):
print('~~~~~~~pos[%s] (outside of prior) = %s ~~~~~~~'%(i, theta))
i += 1
# Sampler setup.
times0 = time.time() # starting sampler timer
sampler = EnsembleSampler(nwalkers, ndim, ln.lnprob,
args=(data_dict, sigma, firstderivs_key, ndim))
# Burnin.
burnin = int(nsteps/4) # steps to discard
print('_____ burnin start')
timeb0 = time.time() # starting burnin timer
pos, prob, state = sampler.run_mcmc(pos, burnin)
timeb1=time.time() # stopping burnin timer
print('_____ burnin end')
sampler.reset()
# Starting sampler after burnin.
print('_____ sampler start')
sampler.run_mcmc(pos, nsteps)
print('_____ sampler end')
times1=time.time() # stopping sampler timer
# Walker steps.
lnprob = sampler.flatlnprobability
# Index of best parameters found by emcee.
bi = np.argmax(sampler.flatlnprobability) # index with highest post prob
trace = sampler.chain[:, burnin:, :].reshape(-1, ndim)
# Extracting results:
thetabest = np.zeros(ndim)
parambest = {}
true = []
propert = {}
propert['trace'] = trace
colours = ['coral', 'orchid', 'apple', 'orange', 'aquamarine', 'black']
def stat(i, sampler, string, test_params, propert):
best_output = sampler.flatchain[bi,i]
# Input m = e_m(z)/ec(z=0).
param_true = test_params.get(string, 0)
true.append(param_true)
# Output m.
output = sampler.flatchain[:,i]
# Standard deviation and mean of the m distribution.
propert[string+'_sd'] = np.std(output)
propert[string+'_mean'] = np.mean(output)
propert[string] = sampler.flatchain[bi,i]
return best_output, output, param_true, propert
for i in range(ndim):
if i == 0:
# mbest = sampler.flatchain[bi,i]
# thetabest[i] = mbest
# parambest['m'] = mbest
# # Input m = e_m(z)/ec(z=0).
# m_true = test_params.get('m', 0)
# true.append(m_true)
# # Output m.
# m = sampler.flatchain[:,i]
# # Standard deviation and mean of the m distribution.
# m_sd = np.std(m)
# m_mean = np.mean(m)
# propert['m_sd'] = m_sd
# propert['m_mean'] = m_mean
# propert['m'] = mbest
# plots.stat('coral', m, m_true, 'Matter', lnprob, zpicks,
# mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
best, output, param_true, propert = stat(i, sampler, 'm', test_params, propert)
plots.stat(colours[i], output, param_true, 'matter', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['m'] = best
elif i == 1:
# Mbest = sampler.flatchain[bi,i]
# thetabest[i] = Mbest
# parambest['M'] = Mbest
# # Input M.
# M_true = test_params.get('M',0)
# true.append(M_true)
# # Output alpha.
# M = sampler.flatchain[:,i]
# # Standard deviation and mean of the alpha distribution
# M_sd = np.std(M)
# M_mean = np.mean(M)
# propert['M_sd'] = M_sd
# propert['M_mean'] = M_mean
# propert['M'] = Mbest
# plots.stat('orchid', M, M_true, 'Mcorr', lnprob, zpicks,
# mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
best, output, param_true, propert = stat(i, sampler, 'M', test_params, propert)
plots.stat(colours[i], output, param_true, 'Mcorr', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['M'] = best
elif i == 2:
# alphabest = sampler.flatchain[bi,i]
# thetabest[i] = alphabest
# parambest['alpha'] = alphabest
# # Input interaction term.
# a_true = test_params.get('alpha',0)
# true.append(a_true)
# # Output gamma.
# alpha = sampler.flatchain[:,i]
# # Standard deviation and mean of the gamme distribution.
# alpha_sd = np.std(alpha)
# alpha_mean = np.mean(alpha)
# propert['alpha_sd'] = alpha_sd
# propert['alpha_mean'] = alpha_mean
# propert['alpha'] = alphabest
# plots.stat('apple', alpha, a_true, 'alpha', lnprob, zpicks,
# mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
best, output, param_true, propert = stat(i, sampler, 'a', test_params, propert)
plots.stat(colours[i], output, param_true, 'alpha', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['alpha'] = best
elif i == 3:
# betabest = sampler.flatchain[bi,i]
# thetabest[i] = betabest
# parambest['beta'] = betabest
# # Input interaction term.
# b_true = test_params.get('beta',0)
# true.append(b_true)
# # Output gamma.
# beta = sampler.flatchain[:,i]
# # Standard deviation and mean of the gamme distribution.
# beta_sd = np.std(beta)
# beta_mean = np.mean(beta)
# propert['beta_sd'] = beta_sd
# propert['beta_mean'] = beta_mean
# propert['beta'] = betabest
# plots.stat('orange', beta, b_true, 'beta', lnprob, zpicks,
# mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
best, output, param_true, propert = stat(i, sampler, 'b', test_params, propert)
plots.stat(colours[i], output, param_true, 'beta', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['beta'] = best
elif i == 4:
# gammabest = sampler.flatchain[bi,i]
# thetabest[i] = gammabest
# parambest['gamma'] = gammabest
# # Input interaction term.
# g_true = test_params.get('gamma',0)
# true.append(g_true)
# # Output gamma.
# gamma = sampler.flatchain[:,i]
# # Standard deviation and mean of the gamme distribution.
# gamma_sd = np.std(gamma)
# gamma_mean = np.mean(gamma)
# propert['gamma_sd'] = gamma_sd
# propert['gamma_mean'] = gamma_mean
# propert['gamma'] = gammabest
# plots.stat('aquamarine', gamma, g_true, 'Gamma', lnprob, zpicks,
# mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
best, output, param_true, propert = stat(i, sampler, 'g', test_params, propert)
plots.stat(colours[i], output, param_true, 'gamma', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['gamma'] = best
elif i == 5:
# zetabest = sampler.flatchain[bi,i]
# thetabest[i] = zetabest
# parambest['zeta'] = zetabest
# # Input interaction term.
# z_true = test_params.get('zeta',0)
# true.append(z_true)
# # Output zeta.
# zeta = sampler.flatchain[:,i]
# # Standard deviation and mean of the gamme distribution.
# zeta_sd = np.std(zeta)
# zeta_mean = np.mean(zeta)
# propert['zeta_sd'] = zeta_sd
# propert['zeta_mean'] = zeta_mean
# propert['zeta'] = zetabest
# plots.stat('black', zeta, z_true, 'Zeta', lnprob, zpicks,
# mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
best, output, param_true, propert = stat(i, sampler, 'z', test_params, propert)
plots.stat(colours[i], output, param_true, 'zeta', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['zeta'] = best
# Checking if best found parameters are within prior.
lp = ln.lnprior(thetabest, firstderivs_key)
if not np.isfinite(lp):
print('')
print('best emcee parameters outside of prior (magbest calculation)')
print('')
# Plot of data mag and redshifts, overlayed with
# mag simulated using emcee best parameters and data redshifts.
magbest = datasim.magn(parambest, data_dict, firstderivs_key)
plt.figure()
plt.title('model: '+firstderivs_key
+'\n Evolution of magnitude with redshift \n nsteps: '
+str(nsteps)+', noise: '+str(sigma)+', npoints: '+str(len(zpicks)))
data = plt.errorbar(zpicks, mag, yerr=sigma, fmt='.', alpha=0.3)
best_fit = plt.scatter(zpicks, magbest, lw='1', c='xkcd:tomato')
plt.ylabel('magnitude')
plt.xlabel('z')
plt.legend([data, best_fit], ['LCDM', firstderivs_key])
stamp = str(int(time.time()))
filename = str(stamp)+'____magz__nsteps_'+str(nsteps)+'_nwalkers_' \
+str(nwalkers)+'_noise_'+str(sigma)+'_numpoints_'+str(len(zpicks))+'.png'
filename = os.path.join(save_path, filename)
plt.savefig(filename)
plt.show(block=False)
# Corner plot (walkers' walk + histogram).
import corner
# samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
samples = sampler.chain[:, :, :].reshape((-1, ndim))
corner.corner(samples, labels=["$m$", "$M$", "$alpha$", "$beta$", "$g$", "$z$"],
truths=true)
# Results getting printed:
if bi == 0:
print('@@@@@@@@@@@@@@@@@')
print('best index =',str(bi))
print('@@@@@@@@@@@@@@@@@')
print('best parameters =',str(parambest))
print('m.a.f.:', np.mean(sampler.acceptance_fraction))
print('nsteps:', str(nsteps))
print('sigma:', str(sigma))
print('npoints:', str(len(zpicks)))
print('model:', firstderivs_key)
tools.timer('burnin', timeb0, timeb1)
tools.timer('sampler', times0, times1)
return propert, sampler | mit |
dungvtdev/upsbayescpm | bayespy/inference/vmp/nodes/CovarianceFunctions.py | 5 | 28645 | ################################################################################
# Copyright (C) 2011-2012 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import itertools
import numpy as np
#import scipy as sp
import scipy.sparse as sp # prefer CSC format
#import scipy.linalg.decomp_cholesky as decomp
#import scipy.linalg as linalg
#import scipy.special as special
#import matplotlib.pyplot as plt
#import time
#import profile
import scipy.spatial.distance as dist
#import scikits.sparse.distance as spdist
from . import node as ef
from bayespy.utils import misc as utils
# Covariance matrices can be either arrays or matrices so be careful
# with products and powers! Use explicit multiply or dot instead of
# *-operator.
def gp_cov_se(D2, overwrite=False):
if overwrite:
K = D2
K *= -0.5
np.exp(K, out=K)
else:
K = np.exp(-0.5*D2)
return K
def gp_cov_pp2_new(r, d, derivative=False):
# Dimension dependent parameter
q = 2
j = np.floor(d/2) + q + 1
# Polynomial coefficients
a2 = j**2 + 4*j + 3
a1 = 3*j + 6
a0 = 3
# Two parts of the covariance function
k1 = (1-r) ** (j+2)
k2 = (a2*r**2 + a1*r + 3)
# TODO: Check that derivative is 0, 1 or 2!
if derivative == 0:
# Return covariance
return k1 * k2 / 3
dk1 = - (j+2) * (1-r)**(j+1)
dk2 = 2*a2*r + a1
if derivative == 1:
# Return first derivative of the covariance
return (k1 * dk2 + dk1 * k2) / 3
ddk1 = (j+2) * (j+1) * (1-r)**j
ddk2 = 2*a2
if derivative == 2:
# Return second derivative of the covariance
return (ddk1*k2 + 2*dk1*dk2 + k1*ddk2) / 3
def gp_cov_pp2(r, d, gradient=False):
# Dimension dependent parameter
j = np.floor(d/2) + 2 + 1
# Polynomial coefficients
a2 = j**2 + 4*j + 3
a1 = 3*j + 6
a0 = 3
# Two parts of the covariance function
k1 = (1-r) ** (j+2)
k2 = (a2*r**2 + a1*r + 3)
# The covariance function
k = k1 * k2 / 3
if gradient:
# The gradient w.r.t. r
dk = k * (j+2) / (r-1) + k1 * (2*a2*r + a1) / 3
return (k, dk)
else:
return k
def gp_cov_delta(N):
# TODO: Use sparse matrices here!
if N > 0:
#print('in gpcovdelta', N, sp.identity(N).shape)
return sp.identity(N)
else:
# Sparse matrices do not allow zero-length dimensions
return np.identity(N)
#return np.identity(N)
#return np.asmatrix(np.identity(N))
def squared_distance(x1, x2):
## # Reshape arrays to 2-D arrays
## sh1 = np.shape(x1)[:-1]
## sh2 = np.shape(x2)[:-1]
## d = np.shape(x1)[-1]
## x1 = np.reshape(x1, (-1,d))
## x2 = np.reshape(x2, (-1,d))
(m1,n1) = x1.shape
(m2,n2) = x2.shape
if m1 == 0 or m2 == 0:
D2 = np.empty((m1,m2))
else:
# Compute squared Euclidean distance
D2 = dist.cdist(x1, x2, metric='sqeuclidean')
#D2 = np.asmatrix(D2)
# Reshape the result
#D2 = np.reshape(D2, sh1 + sh2)
return D2
# General rule for the parameters for covariance functions:
#
# (value, [ [dvalue1, ...], [dvalue2, ...], [dvalue3, ...], ...])
#
# For instance,
#
# k = covfunc_se((1.0, []), (15, [ [1,update_grad] ]))
# K = k((x1, [ [dx1,update_grad] ]), (x2, []))
#
# Plain values are converted as:
# value -> (value, [])
def gp_standardize_input(x):
if np.size(x) == 0:
x = np.reshape(x, (0,0))
elif np.ndim(x) == 0:
x = np.reshape(x, (1,1))
elif np.ndim(x) == 1:
x = np.reshape(x, (-1,1))
elif np.ndim(x) == 2:
x = np.atleast_2d(x)
else:
raise Exception("Standard GP inputs must be 2-dimensional")
return x
def gp_preprocess_inputs(x1,x2=None):
#args = list(args)
#if len(args) < 1 or len(args) > 2:
#raise Exception("Number of inputs must be one or two")
if x2 is None:
x1 = gp_standardize_input(x1)
return x1
else:
if x1 is x2:
x1 = gp_standardize_input(x1)
x2 = x1
else:
x1 = gp_standardize_input(x1)
x2 = gp_standardize_input(x2)
return (x1, x2)
#return args
## def gp_preprocess_inputs(x1,x2=None):
## #args = list(args)
## #if len(args) < 1 or len(args) > 2:
## #raise Exception("Number of inputs must be one or two")
## if x2 is not None: len(args) == 2:
## if args[0] is args[1]:
## args[0] = gp_standardize_input(args[0])
## args[1] = args[0]
## else:
## args[1] = gp_standardize_input(args[1])
## args[0] = gp_standardize_input(args[0])
## else:
## args[0] = gp_standardize_input(args[0])
## return args
# TODO:
# General syntax for these covariance functions:
# covfunc(hyper1,
# hyper2,
# ...
# hyperN,
# x1,
# x2=None,
# gradient=list_of_booleans_for_each_hyperparameter)
def covfunc_zeros(x1, x2=None, gradient=False):
inputs = gp_preprocess_inputs(*inputs)
# Compute distance and covariance matrix
if x2 is None:
x1 = gp_preprocess_inputs(x1)
# Only variance vector asked
N = np.shape(x1)[0]
# TODO: Use sparse matrices!
K = np.zeros(N)
#K = np.asmatrix(np.zeros((N,1)))
else:
(x1,x2) = gp_preprocess_inputs(x1,x2)
# Full covariance matrix asked
#x1 = inputs[0]
#x2 = inputs[1]
# Number of inputs x1
N1 = np.shape(x1)[0]
N2 = np.shape(x2)[0]
# TODO: Use sparse matrices!
K = np.zeros((N1,N2))
#K = np.asmatrix(np.zeros((N1,N2)))
if gradient is not False:
return (K, [])
else:
return K
def covfunc_delta(amplitude, x1, x2=None, gradient=False):
# Make sure that amplitude is a scalar, not an array object
amplitude = utils.array_to_scalar(amplitude)
## if gradient:
## gradient_amplitude = gradient[0]
## else:
## gradient_amplitude = []
## inputs = gp_preprocess_inputs(*inputs)
# Compute distance and covariance matrix
if x2 is None:
x1 = gp_preprocess_inputs(x1)
# Only variance vector asked
#x = inputs[0]
N = np.shape(x1)[0]
K = np.ones(N) * amplitude**2
else:
(x1,x2) = gp_preprocess_inputs(x1,x2)
# Full covariance matrix asked
#x1 = inputs[0]
#x2 = inputs[1]
# Number of inputs x1
N1 = np.shape(x1)[0]
# x1 == x2?
if x1 is x2:
delta = True
# Delta covariance
#
# FIXME: Broadcasting doesn't work with sparse matrices,
# so must use scalar multiplication
K = gp_cov_delta(N1) * amplitude**2
#K = gp_cov_delta(N1).multiply(amplitude**2)
else:
delta = False
# Number of inputs x2
N2 = np.shape(x2)[0]
# Zero covariance
if N1 > 0 and N2 > 0:
K = sp.csc_matrix((N1,N2))
else:
K = np.zeros((N1,N2))
# Gradient w.r.t. amplitude
if gradient:
# FIXME: Broadcasting doesn't work with sparse matrices,
# so must use scalar multiplication
gradient_amplitude = K*(2/amplitude)
print("noise grad", gradient_amplitude)
return (K, (gradient_amplitude,))
else:
return K
def covfunc_pp2(amplitude, lengthscale, x1, x2, gradient=False):
# Make sure that hyperparameters are scalars, not an array objects
amplitude = utils.array_to_scalar(amplitude)
lengthscale = utils.array_to_scalar(lengthscale)
#amplitude = theta[0]
#lengthscale = theta[1]
## if gradient:
## gradient_amplitude = gradient[0]
## gradient_lengthscale = gradient[1]
## else:
## gradient_amplitude = []
## gradient_lengthscale = []
## inputs = gp_preprocess_inputs(*inputs)
# Compute covariance matrix
if x2 is None:
x1 = gp_preprocess_inputs(x1)
# Compute variance vector
K = np.ones(np.shape(x)[:-1])
K *= amplitude**2
# Compute gradient w.r.t. lengthscale
if gradient:
gradient_lengthscale = np.zeros(np.shape(x1)[:-1])
else:
(x1,x2) = gp_preprocess_inputs(x1,x2)
# Compute (sparse) distance matrix
if x1 is x2:
x1 = inputs[0] / (lengthscale)
x2 = x1
D2 = spdist.pdist(x1, 1.0, form="full", format="csc")
else:
x1 = inputs[0] / (lengthscale)
x2 = inputs[1] / (lengthscale)
D2 = spdist.cdist(x1, x2, 1.0, format="csc")
r = np.sqrt(D2.data)
N1 = np.shape(x1)[0]
N2 = np.shape(x2)[0]
# Compute the covariances
if gradient:
(k, dk) = gp_cov_pp2(r, np.shape(x1)[-1], gradient=True)
else:
k = gp_cov_pp2(r, np.shape(x1)[-1])
k *= amplitude**2
# Compute gradient w.r.t. lengthscale
if gradient:
if N1 >= 1 and N2 >= 1:
dk *= r * (-amplitude**2 / lengthscale)
gradient_lengthscale = sp.csc_matrix((dk, D2.indices, D2.indptr),
shape=(N1,N2))
else:
gradient_lengthscale = np.empty((N1,N2))
# Form sparse covariance matrix
if N1 >= 1 and N2 >= 1:
## K = sp.csc_matrix((k, ij), shape=(N1,N2))
K = sp.csc_matrix((k, D2.indices, D2.indptr), shape=(N1,N2))
else:
K = np.empty((N1, N2))
#print(K.__class__)
# Gradient w.r.t. amplitude
if gradient:
gradient_amplitude = K * (2 / amplitude)
# Return values
if gradient:
print("pp2 grad", gradient_lengthscale)
return (K, (gradient_amplitude, gradient_lengthscale))
else:
return K
def covfunc_se(amplitude, lengthscale, x1, x2=None, gradient=False):
# Make sure that hyperparameters are scalars, not an array objects
amplitude = utils.array_to_scalar(amplitude)
lengthscale = utils.array_to_scalar(lengthscale)
# Compute covariance matrix
if x2 is None:
x1 = gp_preprocess_inputs(x1)
#x = inputs[0]
# Compute variance vector
N = np.shape(x1)[0]
K = np.ones(N)
np.multiply(K, amplitude**2, out=K)
# Compute gradient w.r.t. lengthscale
if gradient:
# TODO: Use sparse matrices?
gradient_lengthscale = np.zeros(N)
else:
(x1,x2) = gp_preprocess_inputs(x1,x2)
x1 = x1 / (lengthscale)
x2 = x2 / (lengthscale)
# Compute distance matrix
K = squared_distance(x1, x2)
# Compute gradient partly
if gradient:
gradient_lengthscale = np.divide(K, lengthscale)
# Compute covariance matrix
gp_cov_se(K, overwrite=True)
np.multiply(K, amplitude**2, out=K)
# Compute gradient w.r.t. lengthscale
if gradient:
gradient_lengthscale *= K
# Gradient w.r.t. amplitude
if gradient:
gradient_amplitude = K * (2 / amplitude)
# Return values
if gradient:
print("se grad", gradient_amplitude, gradient_lengthscale)
return (K, (gradient_amplitude, gradient_lengthscale))
else:
return K
class CovarianceFunctionWrapper():
def __init__(self, covfunc, *params):
# Parse parameter values and their gradients to separate lists
self.covfunc = covfunc
self.params = list(params)
self.gradient_params = list()
## print(params)
for ind in range(len(params)):
if isinstance(params[ind], tuple):
# Parse the value and the list of gradients from the
# form:
# ([value, ...], [ [grad1, ...], [grad2, ...], ... ])
self.gradient_params.append(params[ind][1])
self.params[ind] = params[ind][0][0]
else:
# No gradients, parse from the form:
# [value, ...]
self.gradient_params.append([])
self.params[ind] = params[ind][0]
def fixed_covariance_function(self, *inputs, gradient=False):
# What if this is called several times??
if gradient:
## grads = [[grad[0] for grad in self.gradient_params[ind]]
## for ind in range(len(self.gradient_params))]
## (K, dK) = self.covfunc(self.params,
## *inputs,
## gradient=self.gradient_params)
arguments = tuple(self.params) + tuple(inputs)
(K, dK) = self.covfunc(*arguments,
gradient=True)
## (K, dK) = self.covfunc(self.params,
## *inputs,
## gradient=grads)
DK = []
for ind in range(len(dK)):
# Gradient w.r.t. covariance function's ind-th
# hyperparameter
dk = dK[ind]
# Chain rule: Multiply by the gradient of the
# hyperparameter w.r.t. parent node and append the
# list DK:
# DK = [ (dx1_1, callback), ..., (dx1_n, callback) ]
for grad in self.gradient_params[ind]:
#print(grad[0])
#print(grad[1:])
#print(dk)
if sp.issparse(dk):
print(dk.shape)
print(grad[0].shape)
DK += [ [dk.multiply(grad[0])] + grad[1:] ]
else:
DK += [ [np.multiply(dk,grad[0])] + grad[1:] ]
#DK += [ [np.multiply(grad[0], dk)] + grad[1:] ]
## DK += [ (np.multiply(grad, dk),) + grad[1:]
## for grad in self.gradient_params[ind] ]
## for grad in self.gradient_params[ind]:
## DK += ( (np.multiply(grad, dk),) + grad[1:] )
## DK = []
## for ind in range(len(dK)):
## for (grad, dk) in zip(self.gradient_params[ind], dK[ind]):
## DK += [ [dk] + grad[1:] ]
K = [K]
return (K, DK)
else:
arguments = tuple(self.params) + tuple(inputs)
#print(arguments)
K = self.covfunc(*arguments,
gradient=False)
return [K]
class CovarianceFunction(ef.Node):
def __init__(self, covfunc, *args, **kwargs):
self.covfunc = covfunc
params = list(args)
for i in range(len(args)):
# Check constant parameters
if utils.is_numeric(args[i]):
params[i] = ef.NodeConstant([np.asanyarray(args[i])],
dims=[np.shape(args[i])])
# TODO: Parameters could be constant functions? :)
ef.Node.__init__(self, *params, dims=[(np.inf, np.inf)], **kwargs)
def __call__(self, x1, x2):
""" Compute covariance matrix for inputs x1 and x2. """
covfunc = self.message_to_child()
return covfunc(x1, x2)[0]
def message_to_child(self, gradient=False):
params = [parent.message_to_child(gradient=gradient) for parent in self.parents]
covfunc = self.get_fixed_covariance_function(*params)
return covfunc
def get_fixed_covariance_function(self, *params):
get_cov_func = CovarianceFunctionWrapper(self.covfunc, *params)
return get_cov_func.fixed_covariance_function
## def covariance_function(self, *params):
## # Parse parameter values and their gradients to separate lists
## params = list(params)
## gradient_params = list()
## print(params)
## for ind in range(len(params)):
## if isinstance(params[ind], tuple):
## # Parse the value and the list of gradients from the
## # form:
## # ([value, ...], [ [grad1, ...], [grad2, ...], ... ])
## gradient_params.append(params[ind][1])
## params[ind] = params[ind][0][0]
## else:
## # No gradients, parse from the form:
## # [value, ...]
## gradient_params.append([])
## params[ind] = params[ind][0]
## # This gradient_params changes mysteriously..
## print('grad_params before')
## if isinstance(self, SquaredExponential):
## print(gradient_params)
## def cov(*inputs, gradient=False):
## if gradient:
## print('grad_params after')
## print(gradient_params)
## grads = [[grad[0] for grad in gradient_params[ind]]
## for ind in range(len(gradient_params))]
## print('CovarianceFunction.cov')
## #if isinstance(self, SquaredExponential):
## #print(self.__class__)
## #print(grads)
## (K, dK) = self.covfunc(params,
## *inputs,
## gradient=grads)
## for ind in range(len(dK)):
## for (grad, dk) in zip(gradient_params[ind], dK[ind]):
## grad[0] = dk
## K = [K]
## dK = []
## for grad in gradient_params:
## dK += grad
## return (K, dK)
## else:
## K = self.covfunc(params,
## *inputs,
## gradient=False)
## return [K]
## return cov
class Sum(CovarianceFunction):
def __init__(self, *args, **kwargs):
CovarianceFunction.__init__(self,
None,
*args,
**kwargs)
def get_fixed_covariance_function(self, *covfunc_parents):
def covfunc(*inputs, gradient=False):
K_sum = None
if gradient:
dK_sum = list()
for k in covfunc_parents:
if gradient:
(K, dK) = k(*inputs, gradient=gradient)
print("dK in sum", dK)
dK_sum += dK
#print("dK_sum in sum", dK_sum)
else:
K = k(*inputs, gradient=gradient)
if K_sum is None:
K_sum = K[0]
else:
try:
K_sum += K[0]
except:
# You have to do this way, for instance, if
# K_sum is sparse and K[0] is dense.
K_sum = K_sum + K[0]
if gradient:
#print("dK_sum on: ", dK_sum)
#print('covsum', dK_sum)
return ([K_sum], dK_sum)
else:
return [K_sum]
return covfunc
class Delta(CovarianceFunction):
def __init__(self, amplitude, **kwargs):
CovarianceFunction.__init__(self,
covfunc_delta,
amplitude,
**kwargs)
class Zeros(CovarianceFunction):
def __init__(self, **kwargs):
CovarianceFunction.__init__(self,
covfunc_zeros,
**kwargs)
class SquaredExponential(CovarianceFunction):
def __init__(self, amplitude, lengthscale, **kwargs):
CovarianceFunction.__init__(self,
covfunc_se,
amplitude,
lengthscale,
**kwargs)
class PiecewisePolynomial2(CovarianceFunction):
def __init__(self, amplitude, lengthscale, **kwargs):
CovarianceFunction.__init__(self,
covfunc_pp2,
amplitude,
lengthscale,
**kwargs)
# TODO: Rename to Blocks or Joint ?
class Multiple(CovarianceFunction):
def __init__(self, covfuncs, **kwargs):
self.d = len(covfuncs)
#self.sparse = sparse
parents = [covfunc for row in covfuncs for covfunc in row]
CovarianceFunction.__init__(self,
None,
*parents,
**kwargs)
def get_fixed_covariance_function(self, *covfuncs):
def cov(*inputs, gradient=False):
# Computes the covariance matrix from blocks which all
# have their corresponding covariance functions
if len(inputs) < 2:
# For one input, return the variance vector instead of
# the covariance matrix
x1 = inputs[0]
# Collect variance vectors from the covariance
# functions corresponding to the diagonal blocks
K = [covfuncs[i*self.d+i](x1[i], gradient=gradient)[0]
for i in range(self.d)]
# Form the variance vector from the collected vectors
if gradient:
raise Exception('Gradient not yet implemented.')
else:
## print("in cov multiple")
## for (k,kf) in zip(K,covfuncs):
## print(np.shape(k), k.__class__, kf)
#K = np.vstack(K)
K = np.concatenate(K)
else:
x1 = inputs[0]
x2 = inputs[1]
# Collect the covariance matrix (and possibly
# gradients) from each block.
#print('cov mat collection begins')
K = [[covfuncs[i*self.d+j](x1[i], x2[j], gradient=gradient)
for j in range(self.d)]
for i in range(self.d)]
#print('cov mat collection ends')
# Remove matrices that have zero length dimensions?
if gradient:
K = [[K[i][j]
for j in range(self.d)
if np.shape(K[i][j][0][0])[1] != 0]
for i in range(self.d)
if np.shape(K[i][0][0][0])[0] != 0]
else:
K = [[K[i][j]
for j in range(self.d)
if np.shape(K[i][j][0])[1] != 0]
for i in range(self.d)
if np.shape(K[i][0][0])[0] != 0]
n_blocks = len(K)
#print("nblocks", n_blocks)
#print("K", K)
# Check whether all blocks are sparse
is_sparse = True
for i in range(n_blocks):
for j in range(n_blocks):
if gradient:
A = K[i][j][0][0]
else:
A = K[i][j][0]
if not sp.issparse(A):
is_sparse = False
if gradient:
## Compute the covariance matrix and the gradients
# Create block matrices of zeros. This helps in
# computing the gradient.
if is_sparse:
# Empty sparse matrices. Some weird stuff here
# because sparse matrices can't have zero
# length dimensions.
Z = [[sp.csc_matrix(np.shape(K[i][j][0][0]))
for j in range(n_blocks)]
for i in range(n_blocks)]
else:
# Empty dense matrices
Z = [[np.zeros(np.shape(K[i][j][0][0]))
for j in range(n_blocks)]
for i in range(n_blocks)]
## for j in range(self.d)]
## for i in range(self.d)]
# Compute gradients block by block
dK = list()
for i in range(n_blocks):
for j in range(n_blocks):
# Store the zero block
z_old = Z[i][j]
# Go through the gradients for the (i,j)
# block
for dk in K[i][j][1]:
# Keep other blocks at zero and set
# the gradient to (i,j) block. Form
# the matrix from blocks
if is_sparse:
Z[i][j] = dk[0]
dk[0] = sp.bmat(Z).tocsc()
else:
if sp.issparse(dk[0]):
Z[i][j] = dk[0].toarray()
else:
Z[i][j] = dk[0]
#print("Z on:", Z)
dk[0] = np.asarray(np.bmat(Z))
# Append the computed gradient matrix
# to the list of gradients
dK.append(dk)
# Restore the zero block
Z[i][j] = z_old
## Compute the covariance matrix but not the
## gradients
if is_sparse:
# Form the full sparse covariance matrix from
# blocks. Ignore blocks having a zero-length
# axis because sparse matrices consider zero
# length as an invalid shape (BUG IN SCIPY?).
K = [[K[i][j][0][0]
for j in range(n_blocks)]
for i in range(n_blocks)]
K = sp.bmat(K).tocsc()
else:
# Form the full dense covariance matrix from
# blocks. Transform sparse blocks to dense
# blocks.
K = [[K[i][j][0][0]
if not sp.issparse(K[i][j][0][0]) else
K[i][j][0][0].toarray()
for j in range(n_blocks)]
for i in range(n_blocks)]
K = np.asarray(np.bmat(K))
else:
## Compute the covariance matrix but not the
## gradients
if is_sparse:
# Form the full sparse covariance matrix from
# blocks. Ignore blocks having a zero-length
# axis because sparse matrices consider zero
# length as an invalid shape (BUG IN SCIPY?).
K = [[K[i][j][0]
for j in range(n_blocks)]
for i in range(n_blocks)]
K = sp.bmat(K).tocsc()
else:
# Form the full dense covariance matrix from
# blocks. Transform sparse blocks to dense
# blocks.
K = [[K[i][j][0]
if not sp.issparse(K[i][j][0]) else
K[i][j][0].toarray()
for j in range(n_blocks)]
for i in range(n_blocks)]
K = np.asarray(np.bmat(K))
if gradient:
return ([K], dK)
else:
return [K]
return cov
| mit |
micahhausler/pandashells | pandashells/test/p_crypt_test.py | 9 | 2552 | #! /usr/bin/env python
from mock import patch, MagicMock
from unittest import TestCase
from pandashells.bin.p_crypt import main
class MainTests(TestCase):
@patch(
'pandashells.bin.p_crypt.sys.argv',
'p.crypt -i my_in -o my_out'.split())
@patch('pandashells.bin.p_crypt.os.system')
@patch('pandashells.bin.p_crypt.os.path.isfile')
def test_proper_encrypt(self, isfile_mock, system_mock):
isfile_mock.return_value = True
main()
system_mock.assert_called_with(
'cat my_in | openssl enc -aes-256-cbc -salt > my_out')
@patch(
'pandashells.bin.p_crypt.sys.argv',
'p.crypt -i my_in -o my_out -v'.split())
@patch('pandashells.bin.p_crypt.sys.stdout')
@patch('pandashells.bin.p_crypt.os.system')
@patch('pandashells.bin.p_crypt.os.path.isfile')
def test_proper_encrypt_verbose(
self, isfile_mock, system_mock, stdout_mock):
stdout_mock.write = MagicMock()
isfile_mock.return_value = True
main()
system_mock.assert_called_with(
'cat my_in | openssl enc -aes-256-cbc -salt > my_out')
self.assertTrue(stdout_mock.write.called)
@patch(
'pandashells.bin.p_crypt.sys.argv',
'p.crypt -i my_in -o my_out --password xx'.split())
@patch('pandashells.bin.p_crypt.os.system')
@patch('pandashells.bin.p_crypt.os.path.isfile')
def test_proper_encypt_with_password(self, isfile_mock, system_mock):
isfile_mock.return_value = True
main()
system_mock.assert_called_with(
"cat my_in | openssl enc -aes-256-cbc -salt -k 'xx' > my_out")
@patch(
'pandashells.bin.p_crypt.sys.argv',
'p.crypt -i my_in -o my_out --password xx'.split())
@patch('pandashells.bin.p_crypt.sys.stderr')
@patch('pandashells.bin.p_crypt.os.system')
@patch('pandashells.bin.p_crypt.os.path.isfile')
def test_proper_encypt_no_input_file(
self, isfile_mock, stderr_mock, system_mock):
isfile_mock.return_value = False
with self.assertRaises(SystemExit):
main()
@patch(
'pandashells.bin.p_crypt.sys.argv',
'p.crypt -i my_in -o my_out -d'.split())
@patch('pandashells.bin.p_crypt.os.system')
@patch('pandashells.bin.p_crypt.os.path.isfile')
def test_proper_decrypt(self, isfile_mock, system_mock):
isfile_mock.return_value = True
main()
system_mock.assert_called_with(
'cat my_in | openssl enc -d -aes-256-cbc > my_out')
| bsd-2-clause |
robcarver17/pysystemtrade | sysquant/optimisation/full_handcrafting.py | 1 | 47297 | # This is the *full* handcrafting code
# It can be used for long only
# It is *not* the code actually used in pysystemtrade
# It is completely self contained with no pysystemtrade imports
# CAVEATS:
# Uses weekly returns (resample needed first)
# Doesn't deal with missing assets
from copy import copy
import numpy as np
import pandas as pd
import scipy.stats as stats
from scipy.stats import norm
import scipy.cluster.hierarchy as sch
FLAG_BAD_RETURN = -9999999.9
from scipy.optimize import minimize
from collections import namedtuple
CALENDAR_DAYS_IN_YEAR = 365.25
WEEKS_IN_YEAR = CALENDAR_DAYS_IN_YEAR / 7.0
MAX_CLUSTER_SIZE = 3 # Do not change
WARN_ON_SUBPORTFOLIO_SIZE = (
0.2 # change if you like, sensible values are between 0 and 0.5
)
APPROX_MIN_WEIGHT_IN_CORR_WEIGHTS = 0.1
FUDGE_FACTOR_FOR_CORR_WEIGHT_UNCERTAINTY = 4.0
MAX_ROWS_FOR_CORR_ESTIMATION = 100
PSTEP_FOR_CORR_ESTIMATION = 0.25
# Convenience objects
NO_SUB_PORTFOLIOS = object()
NO_RISK_TARGET = object()
NO_TOP_LEVEL_WEIGHTS = object()
class diagobject(object):
def __init__(self):
pass
def __repr__(self):
return "%s \n %s " % (self.calcs, self.description)
def norm_weights(list_of_weights):
norm_weights = list(np.array(list_of_weights) / np.sum(list_of_weights))
return norm_weights
# To make comparision easier we compare sorted correlations to sorted correlations; otherwise we'd need many more than 10
# candidate matrices to cope with different ordering of the same matrix
def get_weights_using_uncertainty_method(cmatrix, data_points=100):
if len(cmatrix) == 1:
return [1.0]
if len(cmatrix) == 2:
return [0.5, 0.5]
if len(cmatrix) > MAX_CLUSTER_SIZE:
raise Exception("Cluster too big")
average_weights = optimised_weights_given_correlation_uncertainty(cmatrix, data_points)
weights = apply_min_weight(average_weights)
return weights
def optimised_weights_given_correlation_uncertainty(corr_matrix, data_points, p_step=PSTEP_FOR_CORR_ESTIMATION):
dist_points = np.arange(p_step, stop=(1-p_step)+0.000001, step=p_step)
list_of_weights = []
for conf1 in dist_points:
for conf2 in dist_points:
for conf3 in dist_points:
conf_intervals = labelledCorrelations(conf1, conf2, conf3)
weights = optimise_for_corr_matrix_with_uncertainty(corr_matrix, conf_intervals, data_points)
list_of_weights.append(weights)
array_of_weights = np.array(list_of_weights)
average_weights = np.nanmean(array_of_weights, axis=0)
return average_weights
labelledCorrelations = namedtuple("labelledCorrelations", 'ab ac bc')
def optimise_for_corr_matrix_with_uncertainty(corr_matrix, conf_intervals, data_points):
labelled_correlations = extract_asset_pairwise_correlations_from_matrix(corr_matrix)
labelled_correlation_points = calculate_correlation_points_from_tuples(labelled_correlations, conf_intervals, data_points)
corr_matrix_at_distribution_point = three_asset_corr_matrix(labelled_correlation_points)
weights = optimise_for_corr_matrix(corr_matrix_at_distribution_point)
return weights
def extract_asset_pairwise_correlations_from_matrix(corr_matrix):
ab = corr_matrix[0][1]
ac = corr_matrix[0][2]
bc = corr_matrix[1][2]
return labelledCorrelations(ab=ab, ac=ac, bc=bc)
def calculate_correlation_points_from_tuples(labelled_correlations, conf_intervals, data_points):
correlation_point_list = [get_correlation_distribution_point(corr_value, data_points, confidence_interval)
for corr_value, confidence_interval in
zip(labelled_correlations, conf_intervals)]
labelled_correlation_points = labelledCorrelations(*correlation_point_list)
return labelled_correlation_points
def get_correlation_distribution_point(corr_value, data_points, conf_interval):
fisher_corr = fisher_transform(corr_value)
point_in_fisher_units = \
get_fisher_confidence_point(fisher_corr, data_points, conf_interval)
point_in_natural_units = inverse_fisher(point_in_fisher_units)
return point_in_natural_units
def fisher_transform(corr_value):
if corr_value>=1.0:
corr_value = 0.99999999999999
elif corr_value<=-1.0:
corr_value = -0.99999999999999
return 0.5*np.log((1+corr_value) / (1-corr_value)) # also arctanh
def get_fisher_confidence_point(fisher_corr, data_points, conf_interval):
if conf_interval<0.5:
confidence_in_fisher_units = fisher_confidence(data_points, conf_interval)
point_in_fisher_units = fisher_corr - confidence_in_fisher_units
elif conf_interval>0.5:
confidence_in_fisher_units = fisher_confidence(data_points, 1-conf_interval)
point_in_fisher_units = fisher_corr + confidence_in_fisher_units
else:
point_in_fisher_units = fisher_corr
return point_in_fisher_units
def fisher_confidence(data_points, conf_interval):
data_point_root =fisher_stdev(data_points)*FUDGE_FACTOR_FOR_CORR_WEIGHT_UNCERTAINTY
conf_point = get_confidence_point(conf_interval)
return data_point_root * conf_point
def fisher_stdev(data_points):
data_point_root = 1/((data_points-3)**.5)
return data_point_root
def get_confidence_point(conf_interval):
conf_point = norm.ppf(1-(conf_interval/2))
return conf_point
def inverse_fisher(fisher_corr_value):
return (np.exp(2*fisher_corr_value) - 1) / (np.exp(2*fisher_corr_value) + 1)
def three_asset_corr_matrix(labelled_correlations):
"""
:return: np.array 2 dimensions, size
"""
ab = labelled_correlations.ab
ac = labelled_correlations.ac
bc = labelled_correlations.bc
m = [[1.0, ab, ac], [ab, 1.0, bc], [ac, bc, 1.0]]
m = np.array(m)
return m
def optimise_for_corr_matrix(corr_matrix):
## arbitrary
mean_list = [.05]*3
std = .1
stdev_list = np.full(len(mean_list), std)
sigma = sigma_from_corr_and_std(stdev_list, corr_matrix)
return optimise(sigma, mean_list)
def apply_min_weight(average_weights):
weights_with_min = [min_weight(weight) for weight in average_weights]
adj_weights = norm_weights(weights_with_min)
return adj_weights
def min_weight(weight):
if weight<APPROX_MIN_WEIGHT_IN_CORR_WEIGHTS:
return APPROX_MIN_WEIGHT_IN_CORR_WEIGHTS
else:
return weight
"""
SR adjustment
"""
def multiplier_from_relative_SR(relative_SR, avg_correlation, years_of_data):
# Return a multiplier
# 1 implies no adjustment required
ratio = mini_bootstrap_ratio_given_SR_diff(
relative_SR, avg_correlation, years_of_data
)
return ratio
def mini_bootstrap_ratio_given_SR_diff(
SR_diff,
avg_correlation,
years_of_data,
avg_SR=0.5,
std=0.15,
how_many_assets=2,
p_step=0.01,
):
"""
Do a parametric bootstrap of portfolio weights to tell you what the ratio should be between an asset which
has a higher backtested SR (by SR_diff) versus another asset(s) with average Sharpe Ratio (avg_SR)
All assets are assumed to have same standard deviation and correlation
:param SR_diff: Difference in performance in Sharpe Ratio (SR) units between one asset and the rest
:param avg_correlation: Average correlation across portfolio
:param years_of_data: How many years of data do you have (can be float for partial years)
:param avg_SR: Should be realistic for your type of trading
:param std: Standard deviation (doesn't affect results, just a scaling parameter)
:param how_many_assets: How many assets in the imaginary portfolio
:param p_step: Step size to go through in the CDF of the mean estimate
:return: float, ratio of weight of asset with different SR to 1/n weight
"""
dist_points = np.arange(
p_step,
stop=(
1 -
p_step) +
0.00000001,
step=p_step)
list_of_weights = [
weights_given_SR_diff(
SR_diff,
avg_correlation,
confidence_interval,
years_of_data,
avg_SR=avg_SR,
std=std,
how_many_assets=how_many_assets,
)
for confidence_interval in dist_points
]
array_of_weights = np.array(list_of_weights)
average_weights = np.nanmean(array_of_weights, axis=0)
ratio_of_weights = weight_ratio(average_weights)
if np.sign(ratio_of_weights - 1.0) != np.sign(SR_diff):
# This shouldn't happen, and only occurs because weight distributions
# get curtailed at zero
return 1.0
return ratio_of_weights
def weight_ratio(weights):
"""
Return the ratio of weight of first asset to other weights
:param weights:
:return: float
"""
one_over_N_weight = 1.0 / len(weights)
weight_first_asset = weights[0]
return weight_first_asset / one_over_N_weight
def weights_given_SR_diff(
SR_diff,
avg_correlation,
confidence_interval,
years_of_data,
avg_SR=0.5,
std=0.15,
how_many_assets=2,
):
"""
Return the ratio of weight to 1/N weight for an asset with unusual SR
:param SR_diff: Difference between the SR and the average SR. 0.0 indicates same as average
:param avg_correlation: Average correlation amongst assets
:param years_of_data: How long has this been going one
:param avg_SR: Average SR to use for other asset
:param confidence_interval: How confident are we about our mean estimate (i.e. cdf point)
:param how_many_assets: .... are we optimising over (I only consider 2, but let's keep it general)
:param std: Standard deviation to use
:return: Ratio of weight, where 1.0 means no difference
"""
average_mean = avg_SR * std
asset1_mean = (SR_diff + avg_SR) * std
mean_difference = asset1_mean - average_mean
# Work out what the mean is with appropriate confidence
confident_mean_difference = calculate_confident_mean_difference(
std, years_of_data, mean_difference, confidence_interval, avg_correlation)
confident_asset1_mean = confident_mean_difference + average_mean
mean_list = [confident_asset1_mean] + \
[average_mean] * (how_many_assets - 1)
weights = optimise_using_correlation(mean_list, avg_correlation, std)
return list(weights)
def optimise_using_correlation(mean_list, avg_correlation, std):
corr_matrix = boring_corr_matrix(len(mean_list), offdiag=avg_correlation)
stdev_list = np.full(len(mean_list), std)
sigma = sigma_from_corr_and_std(stdev_list, corr_matrix)
return optimise(sigma, mean_list)
def boring_corr_matrix(size, offdiag=0.99, diag=1.0):
"""
Create a boring correlation matrix
:param size: dimensions
:param offdiag: value to put in off diagonal
:param diag: value to put in diagonal
:return: np.array 2 dimensions, size
"""
size_index = range(size)
def _od(i, j, offdiag, diag):
if i == j:
return diag
else:
return offdiag
m = [[_od(i, j, offdiag, diag) for i in size_index] for j in size_index]
m = np.array(m)
return m
def calculate_confident_mean_difference(
std, years_of_data, mean_difference, confidence_interval, avg_correlation
):
omega_difference = calculate_omega_difference(
std, years_of_data, avg_correlation)
confident_mean_difference = stats.norm(
mean_difference, omega_difference).ppf(confidence_interval)
return confident_mean_difference
def calculate_omega_difference(std, years_of_data, avg_correlation):
omega_one_asset = std / (years_of_data) ** 0.5
omega_variance_difference = 2 * \
(omega_one_asset ** 2) * (1 - avg_correlation)
omega_difference = omega_variance_difference ** 0.5
return omega_difference
def adjust_weights_for_SR(weights, SR_list, years_of_data, avg_correlation):
"""
Adjust weights according to heuristic method
:param weights: List of float, starting weights
:param SR_list: np.array of Sharpe Ratios
:param years_of_data: float
:return: list of adjusted weights
"""
assert len(weights) == len(SR_list)
avg_SR = np.nanmean(SR_list)
relative_SR_list = SR_list - avg_SR
multipliers = [
float(multiplier_from_relative_SR(relative_SR, avg_correlation, years_of_data))
for relative_SR in relative_SR_list
]
new_weights = list(np.array(weights) * np.array(multipliers))
norm_new_weights = norm_weights(new_weights)
return norm_new_weights
class Portfolio:
"""
Portfolios; what do they contain: a list of instruments, return characteristics, [vol weights], [cash weights]
can contain sub portfolios
they are initially created with some returns
"""
def __init__(
self,
instrument_returns,
allow_leverage=False,
risk_target=NO_RISK_TARGET,
use_SR_estimates=True,
top_level_weights=NO_TOP_LEVEL_WEIGHTS,
log=print,
):
"""
:param instrument_returns: A pandas data frame labelled with instrument names, containing weekly instrument_returns
:param allow_leverage: bool. Ignored if NO_RISK_TARGET
:param risk_target: (optionally) float, annual standard deviation estimate
:param use_SR_estimates: bool
:param top_level_weights: (optionally) pass a list, same length as top level. Used for partioning to hit risk target.
"""
instrument_returns = self._clean_instruments_remove_missing(
instrument_returns)
self.instrument_returns = instrument_returns
self.instruments = list(instrument_returns.columns)
self.corr_matrix = calc_correlation(instrument_returns)
self.vol_vector = np.array(
instrument_returns.std() * (WEEKS_IN_YEAR ** 0.5))
self.returns_vector = np.array(
instrument_returns.mean() * WEEKS_IN_YEAR)
self.sharpe_ratio = self.returns_vector / self.vol_vector
self.years_of_data = minimum_many_years_of_data_in_dataframe(
instrument_returns)
self.allow_leverage = allow_leverage
self.risk_target = risk_target
self.use_SR_estimates = use_SR_estimates
self.top_level_weights = top_level_weights
self.log = log
def __repr__(self):
return "Portfolio with %d instruments" % len(self.instruments)
def _missing_data_instruments(self, instrument_returns, min_periods=2):
"""
This will only affect top level portfolios
:return: list of instruments without enough data for correlation estimate
"""
instrument_returns[instrument_returns == 0.0] = np.nan
missing_values = instrument_returns.isna().sum()
total_data_length = len(instrument_returns)
missing_instruments = [
instrument for instrument,
missing_value_this_instrument in zip(
instrument_returns.columns,
missing_values) if (
total_data_length -
missing_value_this_instrument) < min_periods]
return missing_instruments
def _clean_instruments_remove_missing(self, instrument_returns):
"""
:return: pd.DataFrame with only valid instruments left in
"""
all_instruments = instrument_returns.columns
missing_instruments = self._missing_data_instruments(
instrument_returns)
valid_instruments = [
x for x in all_instruments if x not in missing_instruments]
self.all_instruments = all_instruments
self.missing_instruments = missing_instruments
self.valid_instruments = valid_instruments
return instrument_returns[valid_instruments]
def _cluster_breakdown(self) -> list:
"""
Creates clusters from the portfolio (doesn't create sub portfolios, but tells you which ones to make)
Credit to this notebook: https://github.com/TheLoneNut/CorrelationMatrixClustering/blob/master/CorrelationMatrixClustering.ipynb
:return: list of int same length as instruments
"""
corr_matrix = self.corr_matrix.values
ind = cluster_correlation_matrix(corr_matrix, max_cluster_size=MAX_CLUSTER_SIZE)
return ind
def _cluster_breakdown_using_risk_partition(self):
"""
Creates clusters, using a risk partitioning method
:return: list of int, same length as instruments
"""
risk_target = self.risk_target
self.log(
"Partioning into two groups to hit risk target of %f" %
risk_target)
assert risk_target is not NO_RISK_TARGET
vol_vector = self.vol_vector
count_is_higher_risk = sum(
[instrument_vol > risk_target for instrument_vol in vol_vector]
)
if count_is_higher_risk == 0:
raise Exception(
"Risk target greater than vol of any instrument: will be impossible to hit risk target"
)
if count_is_higher_risk < (
len(self.instruments) * WARN_ON_SUBPORTFOLIO_SIZE):
self.log(
"Not many instruments have risk higher than target; portfolio will be concentrated to hit risk target"
)
def _cluster_id(instrument_vol, risk_target):
# hard coded do not change; high vol is second group
if instrument_vol > risk_target:
return 2
else:
return 1
cluster_list = [
_cluster_id(
instrument_vol,
risk_target) for instrument_vol in vol_vector]
return cluster_list
def _create_single_subportfolio(self, instrument_list):
"""
Create a single sub portfolio object
:param instrument_list: a subset of the instruments in self.instruments
:return: a new Portfolio object
"""
sub_portfolio_returns = self.instrument_returns[instrument_list]
# IMPORTANT NOTE: Sub portfolios don't inherit risk targets or
# leverage... that is only applied at top level
sub_portfolio = Portfolio(
sub_portfolio_returns, use_SR_estimates=self.use_SR_estimates
)
return sub_portfolio
def _create_child_subportfolios(self):
"""
Create sub portfolios. This doesn't create the entire 'tree', just the level below us (our children)
:return: a list of new portfolio objects (also modifies self.sub_portfolios)
"""
# get clusters
if len(self.instruments) <= MAX_CLUSTER_SIZE:
return NO_SUB_PORTFOLIOS
if self._require_partioned_portfolio():
# Break into two groups to hit a risk target
self.log("Applying partition to hit risk target")
cluster_list = self._cluster_breakdown_using_risk_partition()
else:
self.log("Natural top level grouping used")
cluster_list = self._cluster_breakdown()
unique_clusters = list(set(cluster_list))
instruments_by_cluster = [
[
self.instruments[idx]
for idx, i in enumerate(cluster_list)
if i == cluster_id
]
for cluster_id in unique_clusters
]
sub_portfolios = [
self._create_single_subportfolio(instruments_for_this_cluster)
for instruments_for_this_cluster in instruments_by_cluster
]
return sub_portfolios
def _require_partioned_portfolio(self):
"""
If risk_target set and no leverage allowed will be True,
OR if top level weights are passed
otherwise False
:return: bool
"""
if self.top_level_weights is not NO_TOP_LEVEL_WEIGHTS:
# if top level weights are passed we need to partition
return True
elif (self.risk_target is not NO_RISK_TARGET) and (not self.allow_leverage):
# if a risk target is set, but also no leverage allowed, we need to
# partition
return True
return False
def _create_all_subportfolios(self):
"""
Decluster the entire portfolio into a tree of subportfolios
:return: None [populates self.subportfolios] or NO_SUB_PORTFOLIOS
"""
# Create the first level of sub portfolios underneath us
sub_portfolios = self._create_child_subportfolios()
if sub_portfolios is NO_SUB_PORTFOLIOS:
# nothing to do
return NO_SUB_PORTFOLIOS
# Create the rest of the tree
for single_sub_portfolio in sub_portfolios:
# This will create all nested portfolios
single_sub_portfolio._create_all_subportfolios()
return sub_portfolios
def show_subportfolio_tree(self, prefix=""):
"""
Display the sub portfolio tree
:return: None
"""
descrlist = []
if self.sub_portfolios is NO_SUB_PORTFOLIOS:
descrlist = ["%s Contains %s" % (prefix, str(self.instruments))]
return descrlist
descrlist.append("%s Contains %d sub portfolios" %
(prefix, len(self.sub_portfolios)))
for idx, sub_portfolio in enumerate(self.sub_portfolios):
descrlist.append(
sub_portfolio.show_subportfolio_tree(
prefix="%s[%d]" %
(prefix, idx)))
return descrlist
def _diags_as_dataframe(self):
"""
:return: A list of tuples (label, dataframes) showing how the portfolio weights were built up
"""
diag = diagobject()
# not used - make sure everything is available
vw = self.volatility_weights
if self.sub_portfolios is NO_SUB_PORTFOLIOS:
description = "Portfolio containing %s instruments " % (
str(self.instruments)
)
diag.description = description
vol_weights = self.volatility_weights
raw_weights = self.raw_weights
SR = self.sharpe_ratio
diagmatrix = pd.DataFrame(
[raw_weights, vol_weights, list(SR)],
columns=self.instruments,
index=["Raw vol (no SR adj)", "Vol (with SR adj)", "Sharpe Ratio"],
)
diag.calcs = diagmatrix
diag.cash = "No cash calculated"
diag.aggregate = "Not an aggregate portfolio"
return diag
description = "Portfolio containing %d sub portfolios" % len(
self.sub_portfolios
)
diag.description = description
# do instrument level
dm_by_instrument_list = self.dm_by_instrument_list
instrument_vol_weight_in_sub_list = self.instrument_vol_weight_in_sub_list
sub_portfolio_vol_weight_list = self.sub_portfolio_vol_weight_list
vol_weights = self.volatility_weights
diagmatrix = pd.DataFrame(
[
instrument_vol_weight_in_sub_list,
sub_portfolio_vol_weight_list,
dm_by_instrument_list,
vol_weights,
],
columns=self.instruments,
index=[
"Vol wt in group",
"Vol wt. of group",
"Div mult of group",
"Vol wt.",
],
)
diag.calcs = diagmatrix
# do aggregate next
diag.aggregate = diagobject()
diag.aggregate.description = description + " aggregate"
vol_weights = self.aggregate_portfolio.volatility_weights
raw_weights = self.aggregate_portfolio.raw_weights
div_mult = [
sub_portfolio.div_mult for sub_portfolio in self.sub_portfolios]
sharpe_ratios = list(self.aggregate_portfolio.sharpe_ratio)
# unlabelled, sub portfolios don't get names
diagmatrix = pd.DataFrame(
[raw_weights, vol_weights, sharpe_ratios, div_mult],
index=[
"Raw vol (no SR adj or DM)",
"Vol (with SR adj no DM)",
"SR",
"Div mult",
],
)
diag.aggregate.calcs = diagmatrix
# do cash
diag.cash = diagobject()
description = "Portfolio containing %d instruments (cash calculations)" % len(
self.instruments)
diag.cash.description = description
vol_weights = self.volatility_weights
cash_weights = self.cash_weights
vol_vector = list(self.vol_vector)
diagmatrix = pd.DataFrame(
[vol_weights, vol_vector, cash_weights],
columns=self.instruments,
index=["Vol weights", "Std.", "Cash weights"],
)
diag.cash.calcs = diagmatrix
return diag
def _calculate_weights_standalone_portfolio(self):
"""
For a standalone portfolio, calculates volatility weights
Uses the candidate matching method
:return: list of weights
"""
assert len(self.instruments) <= MAX_CLUSTER_SIZE
assert self.sub_portfolios is NO_SUB_PORTFOLIOS
raw_weights = get_weights_using_uncertainty_method(
self.corr_matrix.values, len(self.instrument_returns.index))
self.raw_weights = raw_weights
use_SR_estimates = self.use_SR_estimates
if use_SR_estimates:
SR_list = self.sharpe_ratio
years_of_data = self.years_of_data
avg_correlation = get_avg_corr(self.corr_matrix.values)
adjusted_weights = adjust_weights_for_SR(
raw_weights, SR_list, years_of_data, avg_correlation
)
else:
adjusted_weights = raw_weights
return adjusted_weights
def _calculate_portfolio_returns(self):
"""
If we have some weights, calculate the returns of the entire portfolio
Needs cash weights
:return: pd.Series of returns
"""
cash_weights = self.cash_weights
instrument_returns = self.instrument_returns
cash_weights_as_df = pd.DataFrame(
[cash_weights] * len(instrument_returns.index), instrument_returns.index
)
cash_weights_as_df.columns = instrument_returns.columns
portfolio_returns_df = cash_weights_as_df * instrument_returns
portfolio_returns = portfolio_returns_df.sum(axis=1)
return portfolio_returns
def _calculate_portfolio_returns_std(self):
return self.portfolio_returns.std() * (WEEKS_IN_YEAR ** 0.5)
def _calculate_diversification_mult(self):
"""
Calculates the diversification multiplier for a portfolio
:return: float
"""
corr_matrix = self.corr_matrix.values
vol_weights = np.array(self.volatility_weights)
div_mult = 1.0 / (
(np.dot(np.dot(vol_weights, corr_matrix), vol_weights.transpose())) ** 0.5
)
return div_mult
def _calculate_sub_portfolio_returns(self):
"""
Return a matrix of returns with sub portfolios each representing a single asset
:return: pd.DataFrame
"""
assert self.sub_portfolios is not NO_SUB_PORTFOLIOS
sub_portfolio_returns = [
sub_portfolio.portfolio_returns for sub_portfolio in self.sub_portfolios]
sub_portfolio_returns = pd.concat(sub_portfolio_returns, axis=1)
return sub_portfolio_returns
def _calculate_weights_aggregated_portfolio(self):
"""
Calculate weights when we have sub portfolios
This is done by pulling in the weights from each sub portfolio, giving weights to each sub portfolio, and then getting the product
:return: list of weights
"""
sub_portfolio_returns = self._calculate_sub_portfolio_returns()
# create another Portfolio object made up of the sub portfolios
aggregate_portfolio = Portfolio(
sub_portfolio_returns, use_SR_estimates=self.use_SR_estimates
)
# store to look at later if you want
self.aggregate_portfolio = aggregate_portfolio
# calculate the weights- these will be the weight on each sub portfolio
if self.top_level_weights is NO_TOP_LEVEL_WEIGHTS:
# calculate the weights in the normal way
aggregate_weights = aggregate_portfolio.volatility_weights
raw_weights = aggregate_portfolio.raw_weights
else:
# override with top_level_weights - used when risk targeting
try:
assert len(self.top_level_weights) == len(
aggregate_portfolio.instruments
)
except BaseException:
raise Exception(
"Top level weights length %d is different from number of top level groups %d" %
(len(
self.top_level_weights) == len(
self.aggregate_portfolio.instruments)))
aggregate_weights = self.top_level_weights
raw_weights = aggregate_weights
# calculate the product of div_mult, aggregate weights and sub
# portfolio weights, return as list
vol_weights = []
dm_by_instrument_list = []
instrument_vol_weight_in_sub_list = []
sub_portfolio_vol_weight_list = []
for instrument_code in self.instruments:
weight = None
for sub_portfolio, sub_weight in zip(
self.sub_portfolios, aggregate_weights
):
if instrument_code in sub_portfolio.instruments:
if weight is not None:
raise Exception(
"Instrument %s in multiple sub portfolios" %
instrument_code)
# A weight is the product of: the diversification multiplier for the subportfolio it comes from,
# the weight of that instrument within that subportfolio, and
# the weight of the subportfolio within the larger
# portfolio
div_mult = sub_portfolio.div_mult
instrument_idx = sub_portfolio.instruments.index(
instrument_code)
instrument_weight = sub_portfolio.volatility_weights[instrument_idx]
weight = div_mult * instrument_weight * sub_weight
# useful diagnostics
dm_by_instrument_list.append(div_mult)
instrument_vol_weight_in_sub_list.append(instrument_weight)
sub_portfolio_vol_weight_list.append(sub_weight)
if weight is None:
raise Exception(
"Instrument %s missing from all sub portfolios" %
instrument_code)
vol_weights.append(weight)
vol_weights = norm_weights(vol_weights)
# store diags
self.dm_by_instrument_list = dm_by_instrument_list
self.instrument_vol_weight_in_sub_list = instrument_vol_weight_in_sub_list
self.sub_portfolio_vol_weight_list = sub_portfolio_vol_weight_list
self.raw_weights = raw_weights
return vol_weights
def _calculate_volatility_weights(self):
"""
Calculates the volatility weights of the portfolio
If the portfolio contains sub_portfolios; it will calculate the volatility weights of each sub_portfolio, and then
weight towards sub_portfolios, and then calculate the multiplied out weights
If the portfolio does not contain sub_portfolios; just calculate the weights
:return: volatility weights, also sets self.volatility_weights
"""
if self.sub_portfolios is NO_SUB_PORTFOLIOS:
vol_weights = self._calculate_weights_standalone_portfolio()
else:
vol_weights = self._calculate_weights_aggregated_portfolio()
return vol_weights
def _calculate_cash_weights_no_risk_target(self):
"""
Calculate cash weights without worrying about risk targets
:return: list of cash weights
"""
vol_weights = self.volatility_weights
instrument_std = self.vol_vector
raw_cash_weights = [
vweight / vol for vweight, vol in zip(vol_weights, instrument_std)
]
raw_cash_weights = norm_weights(raw_cash_weights)
return raw_cash_weights
def _calculate_cash_weights_with_risk_target_partitioned(self):
"""
Readjust partitioned top level groups to hit a risk target
(https://qoppac.blogspot.com/2018/12/portfolio-construction-through_7.html)
:return: list of weights
"""
assert self._require_partioned_portfolio()
assert len(self.sub_portfolios) == 2
# hard coded - high vol is second group. Don't change!
high_vol_sub_portfolio = self.sub_portfolios[1]
low_vol_sub_portfolio = self.sub_portfolios[0]
high_vol_std = high_vol_sub_portfolio.portfolio_std
low_vol_std = low_vol_sub_portfolio.portfolio_std
risk_target_std = self.risk_target
assert high_vol_std > low_vol_std
# Now for the correlation estimate
# first create another Portfolio object made up of the sub portfolios
sub_portfolio_returns = self._calculate_sub_portfolio_returns()
assert (
len(sub_portfolio_returns.columns) == 2
) # should be guaranteed by partioning but just to check
correlation = sub_portfolio_returns.corr().values[0][
1
] # only works for groups of 2
# formula from
# https://qoppac.blogspot.com/2018/12/portfolio-construction-through_7.html
a_value = (
(high_vol_std ** 2)
+ (low_vol_std ** 2)
- (2 * high_vol_std * low_vol_std * correlation)
)
b_value = (2 * high_vol_std * low_vol_std * correlation) - 2 * (
low_vol_std ** 2
)
c_value = (low_vol_std ** 2) - (risk_target_std ** 2)
# standard formula for solving a quadratic
high_cash_weight = (
-b_value + (((b_value ** 2) - (4 * a_value * c_value)) ** 0.5)
) / (2 * a_value)
try:
assert high_cash_weight >= 0.0
except BaseException:
raise Exception(
"Something went wrong; cash weight target on high risk portfolio is negative!"
)
try:
assert high_cash_weight <= 1.0
except BaseException:
raise Exception(
"Can't hit risk target of %f - make it lower or include riskier assets!" %
risk_target_std)
# new_weight is the weight on the HIGH_VOL portfolio
low_cash_weight = 1.0 - high_cash_weight
# These are cash weights; change to a vol weight
high_vol_weight = high_cash_weight * high_vol_std
low_vol_weight = low_cash_weight * low_vol_std
self.log(
"Need to limit low cash group to %f (vol) %f (cash) of portfolio to hit risk target of %f" %
(low_vol_weight, low_cash_weight, risk_target_std))
# Hard coded - high vol is second group
top_level_weights = norm_weights([low_vol_weight, high_vol_weight])
#p.top_level_weights = top_level_weights
# We create an adjusted portfolio with the required top level weights as constraints
# we also need to pass the risk target to get same partitioning
# and use_SR_estimates to guarantee weights are the same
#
adjusted_portfolio = Portfolio(
self.instrument_returns,
use_SR_estimates=self.use_SR_estimates,
top_level_weights=top_level_weights,
risk_target=self.risk_target,
)
return adjusted_portfolio.cash_weights
def _calculate_cash_weights_with_risk_target(self):
"""
Calculate cash weights given a risk target
:return: list of weights
"""
target_std = self.risk_target
self.log("Calculating weights to hit a risk target of %f" % target_std)
# create version without risk target to check natural risk
# note all sub portfolios are like this
natural_portfolio = Portfolio(
self.instrument_returns, risk_target=NO_RISK_TARGET
)
natural_std = natural_portfolio.portfolio_std
natural_cash_weights = natural_portfolio.cash_weights
# store for diagnostics
self.natural_cash_weights = natural_cash_weights
self.natural_std = natural_std
if natural_std > target_std:
# Too much risk
# blend with cash
cash_required = (natural_std - target_std) / natural_std
portfolio_capital_left = 1.0 - cash_required
self.log(
"Too much risk %f of the portfolio will be cash" %
cash_required)
cash_weights = list(
np.array(natural_cash_weights) *
portfolio_capital_left)
# stored as diag
self.cash_required = cash_required
return cash_weights
elif natural_std < target_std:
# Not enough risk
if self.allow_leverage:
# calc leverage
leverage = target_std / natural_std
self.log(
"Not enough risk leverage factor of %f applied" %
leverage)
cash_weights = list(np.array(natural_cash_weights) * leverage)
# stored as diag
self.leverage = leverage
return cash_weights
else:
# no leverage allowed
# need to adjust weights
self.log(
"Not enough risk, no leverage allowed, using partition method")
return self._calculate_cash_weights_with_risk_target_partitioned()
# will only get here if the target and natural std are identical...
# unlikely - but!
return natural_cash_weights
def _calculate_cash_weights(self):
"""
Calculate cash weights
Note - this will apply a risk target if required
Note 2 - only top level portfolios have risk targets - sub portfolios don't
:return: list of weights
"""
target_std = self.risk_target
if target_std is NO_RISK_TARGET:
# no risk target, can use natural weights
return self._calculate_cash_weights_no_risk_target()
elif self.top_level_weights is not NO_TOP_LEVEL_WEIGHTS:
# top level weights passed, use natural weights
return self._calculate_cash_weights_no_risk_target()
else:
# need a risk target
return self._calculate_cash_weights_with_risk_target()
"""
Functions to return including missing data
"""
def _weights_with_missing_data(self, original_weights):
"""
:param original_weights:
:return: weights adding back original instruments
"""
original_weights_valid_only = dict(
[
(instrument, weight)
for instrument, weight in zip(self.valid_instruments, original_weights)
]
)
new_weights = []
for instrument in self.all_instruments:
if instrument in self.missing_instruments:
new_weights.append(np.nan)
elif instrument in self.valid_instruments:
new_weights.append(original_weights_valid_only[instrument])
else:
raise Exception("Gone horribly wrong")
return new_weights
def volatility_weights_with_missing_data(self):
"""
:return: vol weights, adding back any missing instruments
"""
vol_weights_valid_only = self.volatility_weights
vol_weights = self._weights_with_missing_data(vol_weights_valid_only)
return vol_weights
def cash_weights_with_missing_data(self):
"""
:return: cash weights, adding back any missing instruments
"""
cash_weights_valid_only = self.cash_weights
cash_weights = self._weights_with_missing_data(cash_weights_valid_only)
return cash_weights
"""
Boilerplate getter functions
"""
@property
def volatility_weights(self):
if hasattr(self, "_volatility_weights"):
return self._volatility_weights
else:
weights_vol = self._calculate_volatility_weights()
self._volatility_weights = weights_vol
return weights_vol
@property
def cash_weights(self):
if hasattr(self, "_cash_weights"):
return self._cash_weights
else:
weights_cash = self._calculate_cash_weights()
self._cash_weights = weights_cash
return weights_cash
@property
def sub_portfolios(self):
if hasattr(self, "_sub_portfolios"):
return self._sub_portfolios
else:
sub_portfolios = self._create_all_subportfolios()
self._sub_portfolios = sub_portfolios
return sub_portfolios
@property
def portfolio_returns(self):
if hasattr(self, "_portfolio_returns"):
return self._portfolio_returns
else:
portfolio_returns = self._calculate_portfolio_returns()
self._portfolio_returns = portfolio_returns
return portfolio_returns
@property
def portfolio_std(self):
if hasattr(self, "_portfolio_returns_std"):
return self._portfolio_returns_std
else:
portfolio_returns_std = self._calculate_portfolio_returns_std()
self._portfolio_returns_std = portfolio_returns_std
return portfolio_returns_std
@property
def div_mult(self):
if hasattr(self, "_div_mult"):
return self._div_mult
else:
div_mult = self._calculate_diversification_mult()
self._div_mult = div_mult
return div_mult
@property
def diags(self):
if hasattr(self, "_diags"):
return self._diags
else:
diags = self._diags_as_dataframe()
self._diags = diags
return diags
def calc_correlation(instrument_returns):
recent_instrument_returns = instrument_returns[-MAX_ROWS_FOR_CORR_ESTIMATION:]
corr = recent_instrument_returns.corr()
return corr
def minimum_many_years_of_data_in_dataframe(data):
years_of_data_dict = how_many_years_of_data_in_dataframe(data)
years_of_data_values = years_of_data_dict.values()
min_years_of_data = min(years_of_data_values)
return min_years_of_data
def how_many_years_of_data_in_dataframe(data):
"""
How many years of non NA data do we have?
Assumes daily timestamp
:param data: pd.DataFrame with labelled columns
:return: dict of floats,
"""
result_dict = dict(data.apply(how_many_years_of_data_in_pd_series, axis=0))
return result_dict
from syscore.pdutils import pd_readcsv
def how_many_years_of_data_in_pd_series(data_series):
"""
How many years of actual data do we have
Assume daily timestamp which is fairly regular
:param data_series:
:return: float
"""
first_valid_date = data_series.first_valid_index()
last_valid_date = data_series.last_valid_index()
date_difference = last_valid_date - first_valid_date
date_difference_days = date_difference.days
date_difference_years = float(date_difference_days) / CALENDAR_DAYS_IN_YEAR
return date_difference_years
def get_avg_corr(sigma):
"""
>>> sigma=np.array([[1.0,0.0,0.5], [0.0, 1.0, 0.75],[0.5, 0.75, 1.0]])
>>> get_avg_corr(sigma)
0.41666666666666669
>>> sigma=np.array([[1.0,np.nan], [np.nan, 1.0]])
>>> get_avg_corr(sigma)
nan
"""
new_sigma = copy(sigma)
np.fill_diagonal(new_sigma, np.nan)
if np.all(np.isnan(new_sigma)):
return np.nan
avg_corr = np.nanmean(new_sigma)
return avg_corr
def cluster_correlation_matrix(corr_matrix: np.array, max_cluster_size = 3) -> list:
d = sch.distance.pdist(corr_matrix)
L = sch.linkage(d, method="complete")
ind = sch.fcluster(L, max_cluster_size, criterion="maxclust")
ind = list(ind)
return ind
def optimise(sigma, mean_list):
# will replace nans with big negatives
mean_list = fix_mus(mean_list)
# replaces nans with zeros
sigma = fix_sigma(sigma)
mus = np.array(mean_list, ndmin=2).transpose()
number_assets = sigma.shape[1]
start_weights = [1.0 / number_assets] * number_assets
# Constraints - positive weights, adding to 1.0
bounds = [(0.0, 1.0)] * number_assets
cdict = [{"type": "eq", "fun": addem}]
ans = minimize(
neg_SR,
start_weights,
(sigma, mus),
method="SLSQP",
bounds=bounds,
constraints=cdict,
tol=0.00001,
)
# anything that had a nan will now have a zero weight
weights = ans["x"]
# put back the nans
weights = un_fix_weights(mean_list, weights)
return weights
def sigma_from_corr_and_std(stdev_list, corrmatrix):
sigma = np.diag(stdev_list).dot(corrmatrix).dot(np.diag(stdev_list))
return sigma
def fix_mus(mean_list):
"""
Replace nans with unfeasibly large negatives
result will be zero weights for these assets
"""
def _fixit(x):
if np.isnan(x):
return FLAG_BAD_RETURN
else:
return x
mean_list = [_fixit(x) for x in mean_list]
return mean_list
def un_fix_weights(mean_list, weights):
"""
When mean has been replaced, use nan weight
"""
def _unfixit(xmean, xweight):
if xmean == FLAG_BAD_RETURN:
return np.nan
else:
return xweight
fixed_weights = [
_unfixit(
xmean,
xweight) for (
xmean,
xweight) in zip(
mean_list,
weights)]
return fixed_weights
def fix_sigma(sigma):
"""
Replace nans with zeros
"""
def _fixit(x):
if np.isnan(x):
return 0.0
else:
return x
sigma = [[_fixit(x) for x in sigma_row] for sigma_row in sigma]
sigma = np.array(sigma)
return sigma
def neg_SR(weights, sigma, mus):
# Returns minus the Sharpe Ratio (as we're minimising)
weights = np.matrix(weights)
estreturn = (weights * mus)[0, 0]
std_dev = variance(weights, sigma) ** 0.5
return -estreturn / std_dev
def addem(weights):
# Used for constraints
return 1.0 - sum(weights)
def variance(weights, sigma):
# returns the variance (NOT standard deviation) given weights and sigma
return (weights * sigma * weights.transpose())[0, 0]
| gpl-3.0 |
midnightradio/gensim | gensim/sklearn_api/tfidf.py | 3 | 6995 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit-learn interface for :class:`~gensim.models.tfidfmodel.TfidfModel`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.sklearn_api import TfIdfTransformer
>>>
>>> # Transform the word counts inversely to their global frequency using the sklearn interface.
>>> model = TfIdfTransformer(dictionary=common_dictionary)
>>> tfidf_corpus = model.fit_transform(common_corpus)
"""
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim.models import TfidfModel
import gensim
class TfIdfTransformer(TransformerMixin, BaseEstimator):
"""Base TfIdf module, wraps :class:`~gensim.models.tfidfmodel.TfidfModel`.
For more information see `tf-idf <https://en.wikipedia.org/wiki/Tf%E2%80%93idf>`_.
"""
def __init__(self, id2word=None, dictionary=None, wlocal=gensim.utils.identity,
wglobal=gensim.models.tfidfmodel.df2idf, normalize=True, smartirs="nfc",
pivot=None, slope=0.65):
"""
Parameters
----------
id2word : {dict, :class:`~gensim.corpora.Dictionary`}, optional
Mapping from int id to word token, that was used for converting input data to bag of words format.
dictionary : :class:`~gensim.corpora.Dictionary`, optional
If specified it will be used to directly construct the inverse document frequency mapping.
wlocals : function, optional
Function for local weighting, default for `wlocal` is :func:`~gensim.utils.identity` which does nothing.
Other options include :func:`math.sqrt`, :func:`math.log1p`, etc.
wglobal : function, optional
Function for global weighting, default is :func:`~gensim.models.tfidfmodel.df2idf`.
normalize : bool, optional
It dictates how the final transformed vectors will be normalized. `normalize=True` means set to unit length
(default); `False` means don't normalize. You can also set `normalize` to your own function that accepts
and returns a sparse vector.
smartirs : str, optional
SMART (System for the Mechanical Analysis and Retrieval of Text) Information Retrieval System,
a mnemonic scheme for denoting tf-idf weighting variants in the vector space model.
The mnemonic for representing a combination of weights takes the form XYZ,
for example 'ntc', 'bpn' and so on, where the letters represents the term weighting of the document vector.
local_letter : str
Term frequency weighing, one of:
* `b` - binary,
* `t` or `n` - raw,
* `a` - augmented,
* `l` - logarithm,
* `d` - double logarithm,
* `L` - log average.
global_letter : str
Document frequency weighting, one of:
* `x` or `n` - none,
* `f` - idf,
* `t` - zero-corrected idf,
* `p` - probabilistic idf.
normalization_letter : str
Document normalization, one of:
* `x` or `n` - none,
* `c` - cosine,
* `u` - pivoted unique,
* `b` - pivoted character length.
Default is `nfc`.
For more info, visit `"Wikipedia" <https://en.wikipedia.org/wiki/SMART_Information_Retrieval_System>`_.
pivot : float, optional
It is the point around which the regular normalization curve is `tilted` to get the new pivoted
normalization curve. In the paper `Amit Singhal, Chris Buckley, Mandar Mitra:
"Pivoted Document Length Normalization" <http://singhal.info/pivoted-dln.pdf>`_ it is the point where the
retrieval and relevance curves intersect.
This parameter along with `slope` is used for pivoted document length normalization.
When `pivot` is None, `smartirs` specifies the pivoted unique document normalization scheme, and either
`corpus` or `dictionary` are specified, then the pivot will be determined automatically. Otherwise, no
pivoted document length normalization is applied.
slope : float, optional
It is the parameter required by pivoted document length normalization which determines the slope to which
the `old normalization` can be tilted. This parameter only works when pivot is defined by user and is not
None.
See Also
--------
~gensim.models.tfidfmodel.TfidfModel : Class that also uses the SMART scheme.
~gensim.models.tfidfmodel.resolve_weights : Function that also uses the SMART scheme.
"""
self.gensim_model = None
self.id2word = id2word
self.dictionary = dictionary
self.wlocal = wlocal
self.wglobal = wglobal
self.normalize = normalize
self.smartirs = smartirs
self.slope = slope
self.pivot = pivot
def fit(self, X, y=None):
"""Fit the model from the given training data.
Parameters
----------
X : iterable of iterable of (int, int)
Input corpus
y : None
Ignored. TF-IDF is an unsupervised model.
Returns
-------
:class:`~gensim.sklearn_api.tfidf.TfIdfTransformer`
The trained model.
"""
self.gensim_model = TfidfModel(
corpus=X, id2word=self.id2word, dictionary=self.dictionary, wlocal=self.wlocal,
wglobal=self.wglobal, normalize=self.normalize, smartirs=self.smartirs,
pivot=self.pivot, slope=self.slope,
)
return self
def transform(self, docs):
"""Get the tf-idf scores for `docs` in a bag-of-words representation.
Parameters
----------
docs: {iterable of list of (int, number)}
Document or corpus in bag-of-words format.
Returns
-------
iterable of list (int, float) 2-tuples.
The bag-of-words representation of each input document.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# Is the input a single document?
if isinstance(docs[0], tuple):
docs = [docs] # Yes => convert it to a corpus (of 1 document).
return [self.gensim_model[doc] for doc in docs]
| gpl-3.0 |
LucianU/ThinkStats2 | code/populations.py | 68 | 2609 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
| gpl-3.0 |
shaunwbell/FOCI_Analysis | ReanalysisRetreival_orig/GOA_Winds/GOA_Winds_NARR_model_prep.py | 1 | 10003 | #!/usr/bin/env
"""
GOA_Winds_NARR_model_prep.py
Retrieve NARR winds for two locations:
GorePoint - 58deg 58min N, 150deg 56min W
and Globec3 59.273701N, 148.9653W
Filter NARR winds with a triangular filter (1/4, 1/2, 1/4) and output every 3hrs
Provide U, V
Save in EPIC NetCDF standard
"""
#System Stack
import datetime
import sys
#Science Stack
import numpy as np
from netCDF4 import Dataset
# User Stack
import general_utilities.haversine as sphered
from utilities import ncutilities as ncutil
# Visual Stack
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, shiftgrid
__author__ = 'Shaun Bell'
__email__ = '[email protected]'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR','GLOBEC3', 'Gorept','3hr filtered', 'U,V','Winds', 'Gulf of Alaska'
"""------------------------General Modules-------------------------------------------"""
def from_netcdf(infile):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
print "Parameters available: "
print params
ncdata = ncutil.ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind)
ncutil.ncclose(nchandle)
return ncdata
def latlon_grid(infile):
nchandle = ncutil.ncopen(infile)
lat_lon = ncutil.get_geocoords(nchandle)
ncutil.ncclose(nchandle)
return (lat_lon)
def write2epic( file_name, stationid, time, lat_lon, data ):
ncinstance = ncutil.EPIC_NC(savefile=file_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts()
ncinstance.PMELglobal_atts(Station_Name=stationid, file_name=( __file__.split('/')[-1]) )
ncinstance.dimension_init(len_time=len(time[0]))
ncinstance.variable_init()
ncinstance.add_coord_data(time1=time[0], time2=time[1], latitude=lat_lon[0], longitude=-1 * lat_lon[1], \
depth_level=10. )
ncinstance.add_data('WU_422', data[0])
ncinstance.add_data('WV_423', data[1])
ncinstance.close()
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
""" Ingest EPIC date or NCEP Date and provide python serial date"""
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
elif file_flag == 'NARR':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
elif file_flag == 'NCEP':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
def pydate2EPIC(file_time):
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
time1 = np.floor(file_time) + offset #truncate to get day and add 2440000 for true julian day
time2 = ( file_time - np.floor(file_time) ) * (1000. * 60. * 60.* 24.) #milliseconds since 0000GMT
return(time1, time2)
"---"
def triangle_smoothing(data_in):
weights=np.array([0.25,0.5,0.25])
filtered_data = np.convolve(data_in,np.array(weights),'same') #edge effects
return filtered_data
"""------------------------- Topo Modules -------------------------------------------"""
def etopo5_data():
""" read in etopo5 topography/bathymetry. """
file = '/Users/bell/Data_Local/MapGrids/etopo5.nc'
etopodata = Dataset(file)
topoin = etopodata.variables['bath'][:]
lons = etopodata.variables['X'][:]
lats = etopodata.variables['Y'][:]
etopodata.close()
topoin,lons = shiftgrid(0.,topoin,lons,start=False) # -360 -> 0
lons, lats = np.meshgrid(lons, lats)
return(topoin, lats, lons)
"""------------------------- Main Modules -------------------------------------------"""
### list of files
NARR = '/Users/bell/Data_Local/Reanalysis_Files/NARR/3hourly/'
infile = [NARR + 'uwnd.10m.2003.nc']
### Grab grid points for future slicing - assume grid is same in all model output
lat_lon = latlon_grid(infile[0])
station_name = ['Globec3','GorePt']
sta_lat = [59.273701,58.9666666666666667]
sta_long = [148.9653,150.9333333333333333]
#Find NARR nearest point to moorings - haversine formula
# NARR data is -180->180 (positive east), Moorings are usually expressed +W for FOCI
globec_pt = sphered.nearest_point([sta_lat[0],-1 * sta_long[0]],lat_lon['lat'],lat_lon['lon'], '2d')
gorept_pt = sphered.nearest_point([sta_lat[1],-1 * sta_long[1]],lat_lon['lat'],lat_lon['lon'], '2d')
globec_modelpt = [lat_lon['lat'][globec_pt[3],globec_pt[4]],lat_lon['lon'][globec_pt[3],globec_pt[4]]]
gorept_modelpt = [lat_lon['lat'][gorept_pt[3],gorept_pt[4]],lat_lon['lon'][gorept_pt[3],gorept_pt[4]]]
print "Globec nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[0], sta_long[0], globec_modelpt[0], globec_modelpt[1])
print "GorePt nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[1], sta_long[1], gorept_modelpt[0], gorept_modelpt[1])
#loop over all requested data
#years = arange(1984,2014,1)
#years = [1984, 1987, 1989, 1991, 1994, 2001, 2002, 2003, 2004, 2005, 2006, 2011, 2013]
years = [1986,]
for yy in years:
# retrieve only these location's data
# uwnd
infile = NARR + 'uwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
globec3_data = from_netcdf_1dsplice(infile, None, globec_pt[3], globec_pt[4])
gorept_data = from_netcdf_1dsplice(infile, None, gorept_pt[3], gorept_pt[4])
#filter data
globec3u_f = triangle_smoothing(globec3_data['uwnd'])
goreptu_f = triangle_smoothing(gorept_data['uwnd'])
# retrieve only these location's data
# vwnd
infile = NARR + 'vwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
globec3_data = from_netcdf_1dsplice(infile, None, globec_pt[3], globec_pt[4])
gorept_data = from_netcdf_1dsplice(infile, None, gorept_pt[3], gorept_pt[4])
#filter data
globec3v_f = triangle_smoothing(globec3_data['vwnd'])
goreptv_f = triangle_smoothing(gorept_data['vwnd'])
#convert to EPIC time
pydate = date2pydate(globec3_data['time'], file_flag='NARR')
epic_time, epic_time1 = pydate2EPIC(pydate)
# output u,v wind components from model grid points
save_to_nc = True
if save_to_nc:
# write to NetCDF
outfile = 'data/NARR_globec_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
write2epic( outfile, station_name[1], [epic_time, epic_time1], globec_modelpt, [globec3u_f, globec3v_f])
outfile = 'data/NARR_gorept_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
write2epic( outfile, station_name[0], [epic_time, epic_time1], gorept_modelpt, [goreptu_f, goreptv_f])
plot_geoloc = True
if plot_geoloc:
(topoin, elats, elons) = etopo5_data()
fig = plt.figure()
ax = plt.subplot(111)
m = Basemap(resolution='i',projection='merc', llcrnrlat=55, \
urcrnrlat=62,llcrnrlon=-155,urcrnrlon=-145, lat_ts=45)
# Mooring Data
x_moor, y_moor = m([-1. * sta_long[0], -1. * sta_long[1]],sta_lat)
x_close, y_close = m([globec_modelpt[1],gorept_modelpt[1]], [globec_modelpt[0],gorept_modelpt[0]])
#ETOPO 5 contour data
ex, ey = m(elons, elats)
CS = m.contourf(ex,ey,topoin, levels=range(250,5000,250), cmap='gray_r', alpha=.75) #colors='black'
CS = m.contour(ex,ey,topoin, levels=range(250,5000,250), linewidths=0.2, colors='black', alpha=.75) #
CS = m.contour(ex,ey,topoin, levels=[-1000, -200, -100], linestyle='--', linewidths=0.2, colors='black', alpha=.75) #
#plot points
m.scatter(x_close,y_close,20,marker='+',color='b')
m.scatter(x_moor,y_moor,20,marker='o',color='g')
m.drawcountries(linewidth=0.5)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(55,62,2.),labels=[1,0,0,0],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(-155,-145,2.),labels=[0,0,0,1],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw meridians
#m.fillcontinents(color='black')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]) )
plt.savefig('images/Gorepoint_region.png', bbox_inches='tight', dpi = (100))
plt.close()
fig = plt.figure()
ax = plt.subplot(111)
m = Basemap(resolution='i',projection='merc', llcrnrlat=55, \
urcrnrlat=62,llcrnrlon=-155,urcrnrlon=-145, lat_ts=45)
| mit |
ch3ll0v3k/scikit-learn | examples/cluster/plot_lena_compress.py | 271 | 2229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
paris-saclay-cds/ramp-workflow | rampwf/tests/kits/mars_craters/problem.py | 1 | 3667 | import os
import numpy as np
import pandas as pd
import rampwf as rw
problem_title = 'Mars craters detection and classification'
# A type (class) which will be used to create wrapper objects for y_pred
Predictions = rw.prediction_types.make_detection()
# An object implementing the workflow
workflow = rw.workflows.ObjectDetector()
# The overlap between adjacent patches is 56 pixels
# The scoring region is chosen so that despite the overlap,
# no crater is scored twice, hence the boundaries of
# 28 = 56 / 2 and 196 = 224 - 56 / 2
minipatch = [28, 196, 28, 196]
score_types = [
rw.score_types.OSPA(minipatch=minipatch),
rw.score_types.SCP(shape=(224, 224), minipatch=minipatch),
rw.score_types.DetectionAveragePrecision(name='ap'),
rw.score_types.DetectionPrecision(
name='prec(0)', iou_threshold=0.0, minipatch=minipatch),
rw.score_types.DetectionPrecision(
name='prec(0.5)', iou_threshold=0.5, minipatch=minipatch),
rw.score_types.DetectionPrecision(
name='prec(0.9)', iou_threshold=0.9, minipatch=minipatch),
rw.score_types.DetectionRecall(
name='rec(0)', iou_threshold=0.0, minipatch=minipatch),
rw.score_types.DetectionRecall(
name='rec(0.5)', iou_threshold=0.5, minipatch=minipatch),
rw.score_types.DetectionRecall(
name='rec(0.9)', iou_threshold=0.9, minipatch=minipatch),
rw.score_types.MADCenter(name='madc', minipatch=minipatch),
rw.score_types.MADRadius(name='madr', minipatch=minipatch)
]
def get_cv(X, y):
# 3 quadrangles for training have not exactly the same size,
# but for simplicity just cut in 3
# for each fold use one quadrangle as test set, the other two as training
n_tot = len(X)
n1 = n_tot // 3
n2 = n1 * 2
return [(np.r_[0:n2], np.r_[n2:n_tot]),
(np.r_[n1:n_tot], np.r_[0:n1]),
(np.r_[0:n1, n2:n_tot], np.r_[n1:n2])]
def _read_data(path, typ):
"""
Read and process data and labels.
Parameters
----------
path : path to directory that has 'data' subdir
typ : {'train', 'test'}
Returns
-------
X, y data
"""
suffix = '_mini'
try:
data_path = os.path.join(path, 'data',
'data_{0}{1}.npy'.format(typ, suffix))
src = np.load(data_path, mmap_mode='r')
labels_path = os.path.join(path, 'data',
'labels_{0}{1}.csv'.format(typ, suffix))
labels = pd.read_csv(labels_path)
except IOError:
raise IOError("'data/data_{0}.npy' and 'data/labels_{0}.csv' are not "
"found. Ensure you ran 'python download_data.py' to "
"obtain the train/test data".format(typ))
# convert the dataframe with crater positions to list of
# list of (x, y, radius) tuples (list of arrays of shape (n, 3) with n
# true craters on an image
# determine locations of craters for each patch in the labels array
n_true_patches = labels.groupby('i').size().reindex(
range(src.shape[0]), fill_value=0).values
# make cumulative sum to obtain start/stop to slice the labels
n_cum = np.array(n_true_patches).cumsum()
n_cum = np.insert(n_cum, 0, 0)
labels_array = labels[['row_p', 'col_p', 'radius_p']].values
y = [[tuple(x) for x in labels_array[i:j]]
for i, j in zip(n_cum[:-1], n_cum[1:])]
# convert list to object array of lists
y_array = np.empty(len(y), dtype=object)
y_array[:] = y
return src, y_array
def get_test_data(path='.'):
return _read_data(path, 'test')
def get_train_data(path='.'):
return _read_data(path, 'train')
| bsd-3-clause |
errantlinguist/tangrams-analysis | tangrams_analysis/cross_validation.py | 1 | 5008 | """
Functionalities for cross-validating words-as-classifiers reference resolution (not yet finished!).
"""
__author__ = "Todd Shore <[email protected]>"
__copyright__ = "Copyright 2017 Todd Shore"
__license__ = "Apache License, Version 2.0"
import csv
import itertools
from collections import namedtuple
from typing import Callable, Iterable, Iterator, Mapping, Optional, Tuple
import pandas as pd
from . import game_utterances
from . import iristk
from . import session_data as sd
CATEGORICAL_VAR_COL_NAMES = (
game_utterances.EventColumn.ENTITY_SHAPE.value, game_utterances.EventColumn.EVENT_SUBMITTER.value)
# NOTE: For some reason, "pandas.get_dummies(..., columns=[col_name_1,...])" works with list objects but not with tuples
CATEGORICAL_DEPENDENT_VAR_COL_NAMES = [game_utterances.EventColumn.ENTITY_SHAPE.value]
assert all(col_name in CATEGORICAL_VAR_COL_NAMES for col_name in CATEGORICAL_DEPENDENT_VAR_COL_NAMES)
RESULTS_FILE_ENCODING = "utf-8"
__RESULTS_FILE_DTYPES = {"Cleaning.DISFLUENCIES": bool, "Cleaning.DUPLICATES": bool, "Cleaning.FILLERS": bool}
CrossValidationDataFrames = namedtuple("CrossValidationDataFrames", ("training", "testing"))
class CachingSessionDataFrameFactory(object):
def __init__(self, session_data_frame_factory: Optional[Callable[[sd.SessionData], pd.DataFrame]] = None):
self.session_data_frame_factory = game_utterances.SessionGameRoundUtteranceSequenceFactory() if session_data_frame_factory is None else session_data_frame_factory
self.cache = {}
def __call__(self, infile: str, session: sd.SessionData) -> pd.DataFrame:
try:
result = self.cache[infile]
except KeyError:
result = self.session_data_frame_factory(session)
result[game_utterances.EventColumn.DYAD_ID.value] = infile
self.cache[infile] = result
return result
class CrossValidationData(object):
def __init__(self, testing_data: Tuple[str, sd.SessionData], training_data: Mapping[str, sd.SessionData]):
self.testing_data = testing_data
self.training_data = training_data
@property
def __key(self):
return self.testing_data, self.training_data
def __eq__(self, other):
return (self is other or (isinstance(other, type(self))
and self.__key == other.__key))
def __hash__(self):
return hash(self.__key)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return self.__class__.__name__ + str(self.__dict__)
class CrossValidationDataFrameFactory(object):
@staticmethod
def __categoricize_data(training_feature_df: pd.DataFrame, testing_feature_df: pd.DataFrame):
for col_name in CATEGORICAL_VAR_COL_NAMES:
unique_values = tuple(sorted(frozenset(
itertools.chain(training_feature_df[col_name].unique(), testing_feature_df[col_name].unique()))))
training_feature_df[col_name] = pd.Categorical(training_feature_df[col_name], categories=unique_values,
ordered=False)
testing_feature_df[col_name] = pd.Categorical(testing_feature_df[col_name], categories=unique_values,
ordered=False)
def __init__(self, session_data_frame_factory: Optional[Callable[[str, sd.SessionData], pd.DataFrame]]):
self.session_data_frame_factory = CachingSessionDataFrameFactory() if session_data_frame_factory is None else session_data_frame_factory
def __call__(self, named_session_data=Iterable[Tuple[str, sd.SessionData]]) -> Iterator[CrossValidationDataFrames]:
for testing_session_name, testing_session_data in named_session_data:
training_sessions = dict(
(infile, training_session_data) for (infile, training_session_data) in named_session_data if
testing_session_data != training_session_data)
cross_validation_set = CrossValidationData((testing_session_name, testing_session_data),
training_sessions)
yield self.__create_cross_validation_data_frames(cross_validation_set)
def __create_cross_validation_data_frames(self,
cross_validation_data: CrossValidationData) -> CrossValidationDataFrames:
training_feature_df = pd.concat(self.session_data_frame_factory(infile, session) for (infile, session) in
cross_validation_data.training_data.items())
testing_feature_df = self.session_data_frame_factory(*cross_validation_data.testing_data)
# noinspection PyTypeChecker
self.__categoricize_data(training_feature_df, testing_feature_df)
dummified_training_feature_df = pd.get_dummies(training_feature_df, columns=CATEGORICAL_DEPENDENT_VAR_COL_NAMES)
dummified_testing_feature_df = pd.get_dummies(testing_feature_df, columns=CATEGORICAL_DEPENDENT_VAR_COL_NAMES)
return CrossValidationDataFrames(dummified_training_feature_df, dummified_testing_feature_df)
def read_results_file(inpath: str) -> pd.DataFrame:
return pd.read_csv(inpath, sep=csv.excel_tab.delimiter, dialect=csv.excel_tab, float_precision="round_trip",
encoding=RESULTS_FILE_ENCODING, memory_map=True, parse_dates=["TIME", "EVENT_TIME"],
date_parser=iristk.parse_timestamp,
dtype=__RESULTS_FILE_DTYPES)
| apache-2.0 |
wkfwkf/statsmodels | docs/source/plots/graphics_gofplots_qqplot.py | 38 | 1911 | # -*- coding: utf-8 -*-
"""
Created on Sun May 06 05:32:15 2012
Author: Josef Perktold
editted by: Paul Hobson (2012-08-19)
"""
from scipy import stats
from matplotlib import pyplot as plt
import statsmodels.api as sm
#example from docstring
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog, prepend=True)
mod_fit = sm.OLS(data.endog, data.exog).fit()
res = mod_fit.resid
left = -1.8 #x coordinate for text insert
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1)
sm.graphics.qqplot(res, ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, 'no keywords', verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 2)
sm.graphics.qqplot(res, line='s', ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='s'", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 3)
sm.graphics.qqplot(res, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='45', \nfit=True", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 4)
sm.graphics.qqplot(res, dist=stats.t, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "dist=stats.t, \nline='45', \nfit=True",
verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
fig.tight_layout()
plt.gcf()
# example with the new ProbPlot class
import numpy as np
x = np.random.normal(loc=8.25, scale=3.5, size=37)
y = np.random.normal(loc=8.00, scale=3.25, size=37)
pp_x = sm.ProbPlot(x, fit=True)
pp_y = sm.ProbPlot(y, fit=True)
# probability of exceedance
fig2 = pp_x.probplot(exceed=True)
# compare x quantiles to y quantiles
fig3 = pp_x.qqplot(other=pp_y, line='45')
# same as above with probabilities/percentiles
fig4 = pp_x.ppplot(other=pp_y, line='45')
| bsd-3-clause |
justthetips/PerformanceAnalytics | performanceanalytics/drawdowns.py | 1 | 6741 | # MIT License
# Copyright (c) 2017 Jacob Bourne
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pandas as pd
def find_drawdowns(series: pd.Series):
"""
find the drawdowns of a series, returns a list of drawdown holder objects
:param series: the series
:return: list of drawdown holders
"""
if not isinstance(series, pd.Series):
raise ValueError("Only works for Pandas Series, you passed in {}".format(type(series)))
# first turn the series into the cumprod
dd_series = (1 + series).cumprod()
# now walk through the time series finding the dd
prior_max = dd_series.iloc[0]
prior_min = prior_max
in_drawdown = False
current_dd = None
dd_list = []
for dt, value in dd_series.iteritems():
# if the value is lower than the previous we are in a drawdown
if value < prior_max:
# if we are not already in a drawdown we are now
if not in_drawdown:
in_drawdown = True
dd = DrawdownHolder(dt)
dd.max_value = prior_max
dd.min_value = value
dd.trough_date = dt
prior_min = value
current_dd = dd
elif value < prior_min:
# if we are in a drawdown, check to see if we are at the min
current_dd.min_value = value
current_dd.trough_date = dt
prior_min = value
else:
if in_drawdown:
# the drawdown is over
current_dd.end_date = dt
prior_max = value
in_drawdown = False
dd_list.append(current_dd)
else:
prior_max = value
return dd_list
class DrawdownHolder(object):
"""
Custom class to hold all the information about a drawdown
"""
def __init__(self, dd_start):
"""
initialization, must pass in the start date
:param dd_start:
"""
self._dd_start = dd_start
@property
def start_date(self):
"""
the start date
:return: the start date of the drawdown
"""
return self._dd_start
@property
def trough_date(self):
"""
the date of the trough of the drawdown
:return: the date
"""
return self._trough_date
@trough_date.setter
def trough_date(self, td):
"""
set the trough date
:param td: the date
:return:
"""
self._trough_date = td
@property
def end_date(self):
"""
the end date of the drawdown
:return: the date
"""
return self._end_date
@end_date.setter
def end_date(self, ed):
"""
the end date of the drawdown
:param ed: the date
:return:
"""
self._end_date = ed
@property
def max_value(self):
"""
the max value before the drawdown began
:return: the value
"""
return self._max_value
@max_value.setter
def max_value(self, mv):
"""
the max value before the drawdown began
:param mv: the value
:return:
"""
self._max_value = mv
@property
def min_value(self):
"""
the min value of the drawdown
:return: the value
"""
return self._min_value
@min_value.setter
def min_value(self, mv):
"""
the min value of the drawdown
:param mv: the value
:return:
"""
self._min_value = mv
@property
def depth(self):
"""
the depth of the drawdown (min / max) - 1
:return: the depth
"""
if (self.min_value is None) or (self.max_value is None):
raise AttributeError("Cannot be called until min value and max value are set")
return (self.min_value / self.max_value) - 1
@property
def length(self):
"""
the length of the drawdown in days
:return: the length
"""
if self.end_date is None:
raise AttributeError("Cannot be called until the end date is set")
return (self.end_date - self.start_date).days
@property
def recovery(self):
"""
the length of the recovery in days
:return: the length
"""
if (self.trough_date is None) or (self.end_date is None):
raise AttributeError("Cannot be called until trough date and end date are set")
return (self.end_date - self.trough_date).days
@property
def to_trough(self):
"""
the length from the start to the trough in days
:return: the length
"""
if self.trough_date is None:
raise AttributeError("Cannot be called until trough date is set")
return (self.trough_date - self.start_date).days
def __repr__(self):
return '{}: {} {} {}'.format(self.__class__.__name__,
self.start_date,
self.end_date, self.depth)
def __lt__(self, other):
return self.depth < other.depth
def __le__(self, other):
return self.depth <= other.depth
def __gt__(self, other):
return self.depth > other.depth
def __ge__(self, other):
return self.depth >= other.depth
def __eq__(self, other):
return self.start_date == other.start_date and self.trough_date == other.trough_date and self.end_date == other.end_date
def __ne__(self, other):
return self.start_date != other.start_date or self.trough_date != other.trough_date or self.end_date != other.end_date
| mit |
bartosh/zipline | zipline/testing/predicates.py | 1 | 15559 | from contextlib import contextmanager
import datetime
from functools import partial
import inspect
import re
from nose.tools import ( # noqa
assert_almost_equal,
assert_almost_equals,
assert_dict_contains_subset,
assert_false,
assert_greater,
assert_greater_equal,
assert_in,
assert_is,
assert_is_instance,
assert_is_none,
assert_is_not,
assert_is_not_none,
assert_less,
assert_less_equal,
assert_multi_line_equal,
assert_not_almost_equal,
assert_not_almost_equals,
assert_not_equal,
assert_not_equals,
assert_not_in,
assert_not_is_instance,
assert_raises,
assert_raises_regexp,
assert_regexp_matches,
assert_true,
assert_tuple_equal,
)
import numpy as np
import pandas as pd
from pandas.util.testing import (
assert_frame_equal,
assert_panel_equal,
assert_series_equal,
assert_index_equal,
)
from six import iteritems, viewkeys, PY2
from toolz import dissoc, keyfilter
import toolz.curried.operator as op
from zipline.testing.core import ensure_doctest
from zipline.dispatch import dispatch
from zipline.lib.adjustment import Adjustment
from zipline.lib.labelarray import LabelArray
from zipline.utils.functional import dzip_exact, instance
from zipline.utils.math_utils import tolerant_equals
@instance
@ensure_doctest
class wildcard(object):
"""An object that compares equal to any other object.
This is useful when using :func:`~zipline.testing.predicates.assert_equal`
with a large recursive structure and some fields to be ignored.
Examples
--------
>>> wildcard == 5
True
>>> wildcard == 'ayy'
True
# reflected
>>> 5 == wildcard
True
>>> 'ayy' == wildcard
True
"""
@staticmethod
def __eq__(other):
return True
@staticmethod
def __ne__(other):
return False
def __repr__(self):
return '<%s>' % type(self).__name__
__str__ = __repr__
def keywords(func):
"""Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
Notes
-----
Taken from odo.utils
"""
if isinstance(func, type):
return keywords(func.__init__)
elif isinstance(func, partial):
return keywords(func.func)
return inspect.getargspec(func).args
def filter_kwargs(f, kwargs):
"""Return a dict of valid kwargs for `f` from a subset of `kwargs`
Examples
--------
>>> def f(a, b=1, c=2):
... return a + b + c
...
>>> raw_kwargs = dict(a=1, b=3, d=4)
>>> f(**raw_kwargs)
Traceback (most recent call last):
...
TypeError: f() got an unexpected keyword argument 'd'
>>> kwargs = filter_kwargs(f, raw_kwargs)
>>> f(**kwargs)
6
Notes
-----
Taken from odo.utils
"""
return keyfilter(op.contains(keywords(f)), kwargs)
def _s(word, seq, suffix='s'):
"""Adds a suffix to ``word`` if some sequence has anything other than
exactly one element.
word : str
The string to add the suffix to.
seq : sequence
The sequence to check the length of.
suffix : str, optional.
The suffix to add to ``word``
Returns
-------
maybe_plural : str
``word`` with ``suffix`` added if ``len(seq) != 1``.
"""
return word + (suffix if len(seq) != 1 else '')
def _fmt_path(path):
"""Format the path for final display.
Parameters
----------
path : iterable of str
The path to the values that are not equal.
Returns
-------
fmtd : str
The formatted path to put into the error message.
"""
if not path:
return ''
return 'path: _' + ''.join(path)
def _fmt_msg(msg):
"""Format the message for final display.
Parameters
----------
msg : str
The message to show to the user to provide additional context.
returns
-------
fmtd : str
The formatted message to put into the error message.
"""
if not msg:
return ''
return msg + '\n'
def _safe_cls_name(cls):
try:
return cls.__name__
except AttributeError:
return repr(cls)
def assert_is_subclass(subcls, cls, msg=''):
"""Assert that ``subcls`` is a subclass of ``cls``.
Parameters
----------
subcls : type
The type to check.
cls : type
The type to check ``subcls`` against.
msg : str, optional
An extra assertion message to print if this fails.
"""
assert issubclass(subcls, cls), (
'%s is not a subclass of %s\n%s' % (
_safe_cls_name(subcls),
_safe_cls_name(cls),
msg,
)
)
def assert_regex(result, expected, msg=''):
"""Assert that ``expected`` matches the result.
Parameters
----------
result : str
The string to search.
expected : str or compiled regex
The pattern to search for in ``result``.
msg : str, optional
An extra assertion message to print if this fails.
"""
assert re.search(expected, result), (
'%s%r not found in %r' % (_fmt_msg(msg), expected, result)
)
@contextmanager
def assert_raises_regex(exc, pattern, msg=''):
"""Assert that some exception is raised in a context and that the message
matches some pattern.
Parameters
----------
exc : type or tuple[type]
The exception type or types to expect.
pattern : str or compiled regex
The pattern to search for in the str of the raised exception.
msg : str, optional
An extra assertion message to print if this fails.
"""
try:
yield
except exc as e:
assert re.search(pattern, str(e)), (
'%s%r not found in %r' % (_fmt_msg(msg), pattern, str(e))
)
else:
raise AssertionError('%s%s was not raised' % (_fmt_msg(msg), exc))
@dispatch(object, object)
def assert_equal(result, expected, path=(), msg='', **kwargs):
"""Assert that two objects are equal using the ``==`` operator.
Parameters
----------
result : object
The result that came from the function under test.
expected : object
The expected result.
Raises
------
AssertionError
Raised when ``result`` is not equal to ``expected``.
"""
assert result == expected, '%s%s != %s\n%s' % (
_fmt_msg(msg),
result,
expected,
_fmt_path(path),
)
@assert_equal.register(float, float)
def assert_float_equal(result,
expected,
path=(),
msg='',
float_rtol=10e-7,
float_atol=10e-7,
float_equal_nan=True,
**kwargs):
assert tolerant_equals(
result,
expected,
rtol=float_rtol,
atol=float_atol,
equal_nan=float_equal_nan,
), '%s%s != %s with rtol=%s and atol=%s%s\n%s' % (
_fmt_msg(msg),
result,
expected,
float_rtol,
float_atol,
(' (with nan != nan)' if not float_equal_nan else ''),
_fmt_path(path),
)
def _check_sets(result, expected, msg, path, type_):
"""Compare two sets. This is used to check dictionary keys and sets.
Parameters
----------
result : set
expected : set
msg : str
path : tuple
type : str
The type of an element. For dict we use ``'key'`` and for set we use
``'element'``.
"""
if result != expected:
if result > expected:
diff = result - expected
msg = 'extra %s in result: %r' % (_s(type_, diff), diff)
elif result < expected:
diff = expected - result
msg = 'result is missing %s: %r' % (_s(type_, diff), diff)
else:
in_result = result - expected
in_expected = expected - result
msg = '%s only in result: %s\n%s only in expected: %s' % (
_s(type_, in_result),
in_result,
_s(type_, in_expected),
in_expected,
)
raise AssertionError(
'%s%ss do not match\n%s' % (
_fmt_msg(msg),
type_,
_fmt_path(path),
),
)
@assert_equal.register(dict, dict)
def assert_dict_equal(result, expected, path=(), msg='', **kwargs):
_check_sets(
viewkeys(result),
viewkeys(expected),
msg,
path + ('.%s()' % ('viewkeys' if PY2 else 'keys'),),
'key',
)
failures = []
for k, (resultv, expectedv) in iteritems(dzip_exact(result, expected)):
try:
assert_equal(
resultv,
expectedv,
path=path + ('[%r]' % (k,),),
msg=msg,
**kwargs
)
except AssertionError as e:
failures.append(str(e))
if failures:
raise AssertionError('\n'.join(failures))
@assert_equal.register(list, list)
@assert_equal.register(tuple, tuple)
def assert_sequence_equal(result, expected, path=(), msg='', **kwargs):
result_len = len(result)
expected_len = len(expected)
assert result_len == expected_len, (
'%s%s lengths do not match: %d != %d\n%s' % (
_fmt_msg(msg),
type(result).__name__,
result_len,
expected_len,
_fmt_path(path),
)
)
for n, (resultv, expectedv) in enumerate(zip(result, expected)):
assert_equal(
resultv,
expectedv,
path=path + ('[%d]' % n,),
msg=msg,
**kwargs
)
@assert_equal.register(set, set)
def assert_set_equal(result, expected, path=(), msg='', **kwargs):
_check_sets(
result,
expected,
msg,
path,
'element',
)
@assert_equal.register(np.ndarray, np.ndarray)
def assert_array_equal(result,
expected,
path=(),
msg='',
array_verbose=True,
array_decimal=None,
**kwargs):
f = (
np.testing.assert_array_equal
if array_decimal is None else
partial(np.testing.assert_array_almost_equal, decimal=array_decimal)
)
try:
f(
result,
expected,
verbose=array_verbose,
err_msg=msg,
)
except AssertionError as e:
raise AssertionError('\n'.join((str(e), _fmt_path(path))))
@assert_equal.register(LabelArray, LabelArray)
def assert_labelarray_equal(result, expected, path=(), **kwargs):
assert_equal(
result.categories,
expected.categories,
path=path + ('.categories',),
**kwargs
)
assert_equal(
result.as_int_array(),
expected.as_int_array(),
path=path + ('.as_int_array()',),
**kwargs
)
def _register_assert_equal_wrapper(type_, assert_eq):
"""Register a new check for an ndframe object.
Parameters
----------
type_ : type
The class to register an ``assert_equal`` dispatch for.
assert_eq : callable[type_, type_]
The function which checks that if the two ndframes are equal.
Returns
-------
assert_ndframe_equal : callable[type_, type_]
The wrapped function registered with ``assert_equal``.
"""
@assert_equal.register(type_, type_)
def assert_ndframe_equal(result, expected, path=(), msg='', **kwargs):
try:
assert_eq(
result,
expected,
**filter_kwargs(assert_eq, kwargs)
)
except AssertionError as e:
raise AssertionError(
_fmt_msg(msg) + '\n'.join((str(e), _fmt_path(path))),
)
return assert_ndframe_equal
assert_frame_equal = _register_assert_equal_wrapper(
pd.DataFrame,
assert_frame_equal,
)
assert_panel_equal = _register_assert_equal_wrapper(
pd.Panel,
assert_panel_equal,
)
assert_series_equal = _register_assert_equal_wrapper(
pd.Series,
assert_series_equal,
)
assert_index_equal = _register_assert_equal_wrapper(
pd.Index,
assert_index_equal,
)
@assert_equal.register(pd.Categorical, pd.Categorical)
def assert_categorical_equal(result, expected, path=(), msg='', **kwargs):
assert_equal(
result.categories,
expected.categories,
path=path + ('.categories',),
msg=msg,
**kwargs
)
assert_equal(
result.codes,
expected.codes,
path=path + ('.codes',),
msg=msg,
**kwargs
)
@assert_equal.register(Adjustment, Adjustment)
def assert_adjustment_equal(result, expected, path=(), **kwargs):
for attr in ('first_row', 'last_row', 'first_col', 'last_col', 'value'):
assert_equal(
getattr(result, attr),
getattr(expected, attr),
path=path + ('.' + attr,),
**kwargs
)
@assert_equal.register(
(datetime.datetime, np.datetime64),
(datetime.datetime, np.datetime64),
)
def assert_timestamp_and_datetime_equal(result,
expected,
path=(),
msg='',
allow_datetime_coercions=False,
compare_nat_equal=True,
**kwargs):
"""
Branch for comparing python datetime (which includes pandas Timestamp) and
np.datetime64 as equal.
Returns raises unless ``allow_datetime_coercions`` is passed as True.
"""
assert allow_datetime_coercions or type(result) == type(expected), (
"%sdatetime types (%s, %s) don't match and "
"allow_datetime_coercions was not set.\n%s" % (
_fmt_msg(msg),
type(result),
type(expected),
_fmt_path(path),
)
)
result = pd.Timestamp(result)
expected = pd.Timestamp(result)
if compare_nat_equal and pd.isnull(result) and pd.isnull(expected):
return
assert_equal.dispatch(object, object)(
result,
expected,
path=path,
**kwargs
)
@assert_equal.register(slice, slice)
def assert_slice_equal(result, expected, path=(), msg=''):
diff_start = (
('starts are not equal: %s != %s' % (result.start, result.stop))
if result.start != expected.start else
''
)
diff_stop = (
('stops are not equal: %s != %s' % (result.stop, result.stop))
if result.stop != expected.stop else
''
)
diff_step = (
('steps are not equal: %s != %s' % (result.step, result.stop))
if result.step != expected.step else
''
)
diffs = diff_start, diff_stop, diff_step
assert not any(diffs), '%s%s\n%s' % (
_fmt_msg(msg),
'\n'.join(filter(None, diffs)),
_fmt_path(path),
)
def assert_isidentical(result, expected, msg=''):
assert result.isidentical(expected), (
'%s%s is not identical to %s' % (_fmt_msg(msg), result, expected)
)
try:
# pull the dshape cases in
from datashape.util.testing import assert_dshape_equal
except ImportError:
pass
else:
assert_equal.funcs.update(
dissoc(assert_dshape_equal.funcs, (object, object)),
)
| apache-2.0 |
pnedunuri/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
harveywwu/vnpy | vnpy/trader/gateway/tkproGateway/DataApi/utils.py | 4 | 3883 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from collections import namedtuple
import datetime as dt
import pandas as pd
import numpy as np
long_nan = 9223372036854775807
def is_long_nan(v):
if v == long_nan:
return True
else:
return False
def to_nan(x):
if is_long_nan(x):
return np.nan
else:
return x
def _to_date(row):
date = int(row['DATE'])
return pd.datetime(year=date // 10000, month=date // 100 % 100, day=date % 100)
def _to_datetime(row):
date = int(row['DATE'])
time = int(row['TIME']) // 1000
return pd.datetime(year=date // 10000, month=date / 100 % 100, day=date % 100,
hour=time // 10000, minute=time // 100 % 100, second=time % 100)
def _to_dataframe(cloumset, index_func=None, index_column=None):
df = pd.DataFrame(cloumset)
for col in df.columns:
if df.dtypes.loc[col] == np.int64:
df.loc[:, col] = df.loc[:, col].apply(to_nan)
if index_func:
df.index = df.apply(index_func, axis=1)
elif index_column:
df.index = df[index_column]
del df.index.name
return df
def _error_to_str(error):
if error:
if 'message' in error:
return str(error['error']) + "," + error['message']
else:
return str(error['error']) + ","
else:
return ","
def to_obj(class_name, data):
try:
if type(data) == list or type(data) == tuple:
result = []
for d in data:
result.append(namedtuple(class_name, list(d.keys()))(*list(d.values())))
return result
elif type(data) == dict:
result = namedtuple(class_name, list(data.keys()))(*list(data.values()))
return result
else:
return data
except Exception as e:
print(class_name, data, e)
return data
def to_date_int(date):
if isinstance(date, str):
t = dt.datetime.strptime(date, "%Y-%m-%d")
date_int = t.year * 10000 + t.month * 100 + t.day
return date_int
elif isinstance(date, (int, np.integer)):
return date
else:
return -1
def to_time_int(time):
if isinstance(time, str):
t = dt.datetime.strptime(time, "%H:%M:%S")
time_int = t.hour * 10000 + t.minute * 100 + t.second
return time_int
elif isinstance(time, (int, np.integer)):
return time
else:
return -1
def extract_result(cr, data_format="", index_column=None, class_name=""):
"""
format supports pandas, obj.
"""
err = _error_to_str(cr['error']) if 'error' in cr else None
if 'result' in cr:
if data_format == "pandas":
if index_column:
return (_to_dataframe(cr['result'], None, index_column), err)
# if 'TIME' in cr['result']:
# return (_to_dataframe(cr['result'], _to_datetime), err)
# elif 'DATE' in cr['result']:
# return (_to_dataframe(cr['result'], _to_date), err)
else:
return (_to_dataframe(cr['result']), err)
elif data_format == "obj" and cr['result'] and class_name:
r = cr['result']
if type(r) == list or type(r) == tuple:
result = []
for d in r:
result.append(namedtuple(class_name, list(d.keys()))(*list(d.values())))
elif type(r) == dict:
result = namedtuple(class_name, list(r.keys()))(*list(r.values()))
else:
result = r
return (result, err)
else:
return (cr['result'], err)
else:
return (None, err)
| mit |
nlpaueb/aueb.twitter.sentiment | regularization.py | 1 | 1421 | from sklearn import preprocessing
import numpy as np
import math
#reguralize features to [-1,1] , xi=xi-meam/3*variance
def regularize(features):
#regularize per column
for i in range(0,len(features[0])):
try:
#take evary column
feat=features[:,i]
#mean and variance of every column
mean=np.mean(feat)
var=np.var(feat)
if(var!=0):
features[:,i]=(features[:,i]-mean)/float(3*var)
else :
features[:,i]=0
except:
pass
features[features>1]=1
features[features<-1]=-1
return features
#reguralize features to [-1,1] horizontally, yi=yi/norm(yi,2)
def regularizeHorizontally(features):
for i in range(0,features.shape[0]):
if (features[i] == np.zeros(features[i].shape)).all() == True:
pass
else:
features[i] = features[i]/np.linalg.norm(features[i],ord=2)
features[features>1]=1
features[features<-1]=-1
return features
#xi=xi-xmin/xman-xmin
def regularizeMaxMin(features):
#regularize per column
for i in range(0,len(features[0])):
#take evary column
feat=features[:,i]
#max and min value of every feature
xmax=max(feat)
xmin=min(feat)
if((xmax-xmin)!=0):
features[:,i]=(features[:,i]-xmin)/float(xmax-xmin)
else :
features[:,i]=0
return features
| gpl-3.0 |
pseudocubic/neutronpy | neutronpy/data/plot.py | 1 | 14569 | import numpy as np
from ..lsfit import Fitter
from ..lsfit.tools import convert_params
class PlotData(object):
"""Class containing data plotting methods
Methods
-------
plot
plot_line
plot_contour
plot_volume
"""
def plot(self, x=None, y=None, z=None, w=None, show_err=True, to_bin=None,
plot_options=None, fit_options=None, smooth_options=None,
output_file='', show_plot=True, **kwargs):
r"""Plots the data in the class. x and y must at least be specified,
and z and/or w being specified will produce higher dimensional plots
(contour and volume, respectively).
Parameters
----------
x : str, optional
`data_column` key defining the x-axis.
Default: :py:attr:`plot_default_x`.
y : str, optional
`data_column` key defining the y-axis.
Default: :py:attr:`plot_default_y`.
z : str, optional
`data_column` key defining the z-axis.
Default: None
w : str, optional
`data_column` key defining the w-axis.
Default: None
bounds : dict, optional
If set, data will be rebinned to the specified parameters, in the
format `[min, max, num points]` for each `data_column` key. See
documentation for :py:meth:`.Data.bin`. Default: None
show_err : bool, optional
Plot error bars. Only applies to xy scatter plots. Default: True
show_plot : bool, optional
Execute `plt.show()` to show the plot. Incompatible with
`output_file` param. Default: True
output_file : str, optional
If set, the plot will be saved to the location given, in the format
specified, provided that the format is supported. Default: None
plot_options : dict, optional
Plot options to be passed to the the matplotlib plotting routine.
Default: None
fit_options : dict, optional
Fitting options to be passed to the Fitter routine. Default: None
smooth_otions : dict, optional
Smoothing options for Gaussian smoothing from
`scipy.ndimage.filters.gaussian_filter`. Default: None
kwargs : optional
Additional plotting keyword arguments passed to the plotting
function.
"""
if to_bin is None:
to_bin = dict()
if plot_options is None:
plot_options = dict()
if fit_options is None:
fit_options = dict()
if smooth_options is None:
smooth_options = dict(sigma=0)
if x is None:
try:
x = self.plot_default_x
except AttributeError:
raise
if y is None:
try:
y = self.plot_default_y
except AttributeError:
raise
if w is not None:
self.plot_volume(x, y, z, w, to_bin, plot_options, smooth_options,
output_file, show_plot, **kwargs)
elif w is None and z is not None:
self.plot_contour(x, y, z, to_bin, plot_options, smooth_options,
output_file, show_plot, **kwargs)
elif w is None and z is None:
self.plot_line(x, y, show_err, to_bin, plot_options, fit_options,
smooth_options, output_file, show_plot, **kwargs)
def plot_volume(self, x, y, z, w, to_bin=None, plot_options=None, smooth_options=None, output_file='', show_plot=True, **kwargs):
r"""Plots a 3D volume of 4D data
Parameters
----------
x : str
`data_column` key defining the x-axis.
Default: :py:attr:`plot_default_x`.
y : str
`data_column` key defining the y-axis.
Default: :py:attr:`plot_default_y`.
z : str
`data_column` key defining the z-axis.
Default: None
w : str
`data_column` key defining the w-axis.
Default: None
bounds : dict, optional
If set, data will be rebinned to the specified parameters, in the
format `[min, max, num points]` for each `data_column` key. See
documentation for :py:meth:`.Data.bin`. Default: None
show_err : bool, optional
Plot error bars. Only applies to xy scatter plots. Default: True
show_plot : bool, optional
Execute `plt.show()` to show the plot. Incompatible with
`output_file` param. Default: True
output_file : str, optional
If set, the plot will be saved to the location given, in the format
specified, provided that the format is supported. Default: None
plot_options : dict, optional
Plot options to be passed to the the matplotlib plotting routine.
Default: None
fit_options : dict, optional
Fitting options to be passed to the Fitter routine. Default: None
smooth_otions : dict, optional
Smoothing options for Gaussian smoothing from
`scipy.ndimage.filters.gaussian_filter`. Default: None
kwargs : optional
Additional plotting keyword arguments passed to the plotting
function.
"""
try:
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
except ImportError:
raise ImportError('Matplotlib >= 1.3.0 is necessary for plotting.')
if to_bin:
data_bin = self.bin(to_bin)
_x = data_bin.data[x]
_y = data_bin.data[y]
_z = data_bin.data[z]
if w == 'intensity':
_w = data_bin.intensity
else:
_w = data_bin.data[w]
else:
_x = self.data[x]
_y = self.data[y]
_z = self.data[z]
if w == 'intensity':
_w = self.intensity
else:
_w = self.data[w]
if smooth_options['sigma'] > 0:
from scipy.ndimage.filters import gaussian_filter
_w = gaussian_filter(_w, **smooth_options)
_x, _y, _z, _w = (np.ma.masked_where(_w <= 0, _x),
np.ma.masked_where(_w <= 0, _y),
np.ma.masked_where(_w <= 0, _z),
np.ma.masked_where(_w <= 0, _w))
fig = plt.figure()
axis = fig.add_subplot(111, projection='3d')
axis.scatter(_x, _y, _z, c=_w, linewidths=0, vmin=1.e-4,
vmax=0.1, norm=colors.LogNorm())
if output_file:
plt.savefig(output_file)
elif show_plot:
plt.show()
else:
pass
def plot_contour(self, x, y, z, to_bin=None, plot_options=None, smooth_options=None, output_file='', show_plot=True, **kwargs):
r"""Method for plotting a 2D contour plot of 3D data
Parameters
----------
x : str
`data_column` key defining the x-axis.
Default: :py:attr:`plot_default_x`.
y : str
`data_column` key defining the y-axis.
Default: :py:attr:`plot_default_y`.
z : str
`data_column` key defining the z-axis.
Default: None
bounds : dict, optional
If set, data will be rebinned to the specified parameters, in the
format `[min, max, num points]` for each `data_column` key. See
documentation for :py:meth:`.Data.bin`. Default: None
show_err : bool, optional
Plot error bars. Only applies to xy scatter plots. Default: True
show_plot : bool, optional
Execute `plt.show()` to show the plot. Incompatible with
`output_file` param. Default: True
output_file : str, optional
If set, the plot will be saved to the location given, in the format
specified, provided that the format is supported. Default: None
plot_options : dict, optional
Plot options to be passed to the the matplotlib plotting routine.
Default: None
fit_options : dict, optional
Fitting options to be passed to the Fitter routine. Default: None
smooth_otions : dict, optional
Smoothing options for Gaussian smoothing from
`scipy.ndimage.filters.gaussian_filter`. Default: None
kwargs : optional
Additional plotting keyword arguments passed to the plotting
function.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise
if to_bin:
data_bin = self.bin(to_bin)
_x = data_bin.data[x]
_y = data_bin.data[y]
if z == 'intensity':
_z = data_bin.intensity
else:
_z = data_bin.data[z]
else:
_x = self.data[x]
_y = self.data[y]
if z == 'intensity':
_z = self.intensity
else:
_z = self.data[z]
if smooth_options['sigma'] > 0:
from scipy.ndimage.filters import gaussian_filter
_z = gaussian_filter(_z, **smooth_options)
x_step = np.around(
np.abs(np.unique(_x) - np.roll(np.unique(_x), 1))[1], decimals=4)
y_step = np.around(
np.abs(np.unique(_y) - np.roll(np.unique(_y), 1))[1], decimals=4)
x_sparse = np.linspace(
_x.min(), _x.max(), (_x.max() - _x.min()) / x_step + 1)
y_sparse = np.linspace(
_y.min(), _y.max(), (_y.max() - _y.min()) / y_step + 1)
X, Y = np.meshgrid(x_sparse, y_sparse)
from scipy.interpolate import griddata
Z = griddata((_x, _y), _z, (X, Y))
plt.pcolormesh(X, Y, Z, **plot_options)
if output_file:
plt.savefig(output_file)
elif show_plot:
plt.show()
else:
pass
def plot_line(self, x, y, show_err=True, to_bin=None, plot_options=None, fit_options=None, smooth_options=None, output_file='', show_plot=True, **kwargs):
r"""Method to Plot a line of 2D data
Parameters
----------
x : str
`data_column` key defining the x-axis.
Default: :py:attr:`plot_default_x`.
y : str
`data_column` key defining the y-axis.
Default: :py:attr:`plot_default_y`.
bounds : dict, optional
If set, data will be rebinned to the specified parameters, in the
format `[min, max, num points]` for each `data_column` key. See
documentation for :py:meth:`.Data.bin`. Default: None
show_err : bool, optional
Plot error bars. Only applies to xy scatter plots. Default: True
show_plot : bool, optional
Execute `plt.show()` to show the plot. Incompatible with
`output_file` param. Default: True
output_file : str, optional
If set, the plot will be saved to the location given, in the format
specified, provided that the format is supported. Default: None
plot_options : dict, optional
Plot options to be passed to the the matplotlib plotting routine.
Default: None
fit_options : dict, optional
Fitting options to be passed to the Fitter routine. Default: None
smooth_otions : dict, optional
Smoothing options for Gaussian smoothing from
`scipy.ndimage.filters.gaussian_filter`. Default: None
kwargs : optional
Additional plotting keyword arguments passed to the plotting
function.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise
if to_bin:
data_bin = self.bin(to_bin)
_x = data_bin.data[x]
if y == 'intensity':
_y = data_bin.intensity
_err = data_bin.error
else:
_y = data_bin.data[y]
_err = np.sqrt(data_bin.data[y])
else:
_x = self.data[x]
if y == 'intensity':
_y = self.intensity
_err = self.error
else:
_y = self.data[y]
_err = np.sqrt(self.data[y])
if smooth_options['sigma'] > 0:
from scipy.ndimage.filters import gaussian_filter
_y = gaussian_filter(_y, **smooth_options)
if not plot_options:
plot_options['fmt'] = 'rs'
if show_err:
plt.errorbar(_x, _y, yerr=_err, **plot_options)
else:
plt.errorbar(_x, _y, **plot_options)
# add axis labels
plt.xlabel(x)
plt.ylabel(y)
if fit_options:
def residuals(params, data):
funct, x, y, err = data
return (y - funct(params, x)) / err
fitobj = Fitter(residuals, data=(
fit_options['function'], _x, _y, _err))
if 'fixp' in fit_options:
fitobj.parinfo = [{'fixed': fix}
for fix in fit_options['fixp']]
try:
fitobj.fit(params0=fit_options['p'])
fit_x = np.linspace(min(_x), max(_x), len(_x) * 10)
fit_y = fit_options['function'](fitobj.params, fit_x)
plt.plot(fit_x, fit_y, '{0}-'.format(plot_options['fmt'][0]))
param_string = u'\n'.join(['p$_{{{0:d}}}$: {1:.3f}'.format(i, p)
for i, p in enumerate(fitobj.params)])
chi2_params = u'$\chi^2$: {0:.3f}\n\n'.format(
fitobj.chi2_min) + param_string
plt.annotate(chi2_params, xy=(0.05, 0.95), xycoords='axes fraction',
horizontalalignment='left', verticalalignment='top',
bbox=dict(alpha=0.75, facecolor='white', edgecolor='none'))
except Exception as mes: # pylint: disable=broad-except
raise Exception("Something wrong with fit: {0}".format(mes))
if output_file:
plt.savefig(output_file)
elif show_plot:
plt.show()
else:
pass
| mit |
mehdidc/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 5 | 11439 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
steverobbins/mycli | mycli/packages/tabulate.py | 28 | 38075 | # -*- coding: utf-8 -*-
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from decimal import Decimal
from platform import python_version_tuple
from wcwidth import wcswidth
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_int_type = int
_long_type = long
_float_type = float
_text_type = unicode
_binary_type = str
def _is_file(f):
return isinstance(f, file)
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
import io
def _is_file(f):
return isinstance(f, io.IOBase)
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.4"
MIN_PADDING = 2
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"' }
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns):
def escape_char(c):
return LATEX_ESCAPE_RULES.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"html":
TableFormat(lineabove=Line("<table>", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=Line("</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is _int_type or type(string) is _long_type or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
if isinstance(string, (bool, Decimal,)):
return _text_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
lwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
return ' ' * lwidth + s
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
rwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
return s + ' ' * rwidth
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
xwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
lwidth = xwidth // 2
rwidth = 0 if xwidth <= 0 else lwidth + xwidth % 2
return ' ' * lwidth + s + ' ' * rwidth
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return wcswidth(_strip_invisible(s))
else:
return wcswidth(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
return strings
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = wcswidth
maxwidth = max(max(map(width_fn, strings)), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 }
invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=""):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
#Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type,headers))
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=[], tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = wcswidth
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
return "\n".join(lines)
def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data. See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_booktabs, tsv
(default: simple)
"""
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1f:s:",
["help", "header", "format", "separator"])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
tablefmt = "simple"
sep = r"\s+"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt, sep=sep)
else:
with open(f) as fobj:
_pprint_file(fobj)
def _pprint_file(fobject, headers, tablefmt, sep):
rows = fobject.readlines()
table = [re.split(sep, r.rstrip()) for r in rows]
print(tabulate(table, headers, tablefmt))
if __name__ == "__main__":
_main()
| bsd-3-clause |
microsoft/LightGBM | tests/distributed/_test_distributed.py | 1 | 7541 | import copy
import io
import socket
import subprocess
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Dict, Generator, List
import numpy as np
import pytest
from sklearn.datasets import make_blobs, make_regression
from sklearn.metrics import accuracy_score
TESTS_DIR = Path(__file__).absolute().parent
@pytest.fixture(scope='module')
def executable(pytestconfig) -> str:
"""Returns the path to the lightgbm executable."""
return pytestconfig.getoption('execfile')
def _find_random_open_port() -> int:
"""Find a random open port on localhost."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('', 0))
port = s.getsockname()[1]
return port
def _generate_n_ports(n: int) -> Generator[int, None, None]:
return (_find_random_open_port() for _ in range(n))
def _write_dict(d: Dict, file: io.TextIOWrapper) -> None:
for k, v in d.items():
file.write(f'{k} = {v}\n')
def create_data(task: str, n_samples: int = 1_000) -> np.ndarray:
"""Create the appropriate data for the task.
The data is returned as a numpy array with the label as the first column.
"""
if task == 'binary-classification':
centers = [[-4, -4], [4, 4]]
X, y = make_blobs(n_samples, centers=centers, random_state=42)
elif task == 'regression':
X, y = make_regression(n_samples, n_features=4, n_informative=2, random_state=42)
dataset = np.hstack([y.reshape(-1, 1), X])
return dataset
class DistributedMockup:
"""Simulate distributed training."""
default_train_config = {
'task': 'train',
'pre_partition': True,
'machine_list_file': TESTS_DIR / 'mlist.txt',
'tree_learner': 'data',
'force_row_wise': True,
'verbose': 0,
'num_boost_round': 20,
'num_leaves': 15,
'num_threads': 2,
}
default_predict_config = {
'task': 'predict',
'data': TESTS_DIR / 'train.txt',
'input_model': TESTS_DIR / 'model0.txt',
'output_result': TESTS_DIR / 'predictions.txt',
}
def __init__(self, executable: str):
self.executable = executable
def worker_train(self, i: int) -> subprocess.CompletedProcess:
"""Start the training process on the `i`-th worker."""
config_path = TESTS_DIR / f'train{i}.conf'
cmd = [self.executable, f'config={config_path}']
return subprocess.run(cmd)
def _set_ports(self) -> None:
"""Randomly assign a port for training to each worker and save all ports to mlist.txt."""
ports = set(_generate_n_ports(self.n_workers))
i = 0
max_tries = 100
while i < max_tries and len(ports) < self.n_workers:
n_ports_left = self.n_workers - len(ports)
candidates = _generate_n_ports(n_ports_left)
ports.update(candidates)
i += 1
if i == max_tries:
raise RuntimeError('Unable to find non-colliding ports.')
self.listen_ports = list(ports)
with open(TESTS_DIR / 'mlist.txt', 'wt') as f:
for port in self.listen_ports:
f.write(f'127.0.0.1 {port}\n')
def _write_data(self, partitions: List[np.ndarray]) -> None:
"""Write all training data as train.txt and each training partition as train{i}.txt."""
all_data = np.vstack(partitions)
np.savetxt(str(TESTS_DIR / 'train.txt'), all_data, delimiter=',')
for i, partition in enumerate(partitions):
np.savetxt(str(TESTS_DIR / f'train{i}.txt'), partition, delimiter=',')
def fit(self, partitions: List[np.ndarray], train_config: Dict = {}) -> None:
"""Run the distributed training process on a single machine.
For each worker i:
1. The i-th partition is saved as train{i}.txt.
2. A random port is assigned for training.
3. A configuration file train{i}.conf is created.
4. The lightgbm binary is called with config=train{i}.conf in another thread.
5. The trained model is saved as model{i}.txt. Each model file only differs in data and local_listen_port.
The whole training set is saved as train.txt.
"""
self.train_config = copy.deepcopy(self.default_train_config)
self.train_config.update(train_config)
self.n_workers = self.train_config['num_machines']
self._set_ports()
self._write_data(partitions)
self.label_ = np.hstack([partition[:, 0] for partition in partitions])
futures = []
with ThreadPoolExecutor(max_workers=self.n_workers) as executor:
for i in range(self.n_workers):
self.write_train_config(i)
train_future = executor.submit(self.worker_train, i)
futures.append(train_future)
results = [f.result() for f in futures]
for result in results:
if result.returncode != 0:
raise RuntimeError('Error in training')
def predict(self, predict_config: Dict[str, Any] = {}) -> np.ndarray:
"""Compute the predictions using the model created in the fit step.
predict_config is used to predict the training set train.txt
The predictions are saved as predictions.txt and are then loaded to return them as a numpy array.
"""
self.predict_config = copy.deepcopy(self.default_predict_config)
self.predict_config.update(predict_config)
config_path = TESTS_DIR / 'predict.conf'
with open(config_path, 'wt') as file:
_write_dict(self.predict_config, file)
cmd = [self.executable, f'config={config_path}']
result = subprocess.run(cmd)
if result.returncode != 0:
raise RuntimeError('Error in prediction')
y_pred = np.loadtxt(str(TESTS_DIR / 'predictions.txt'))
return y_pred
def write_train_config(self, i: int) -> None:
"""Create a file train{i}.conf with the required configuration to train.
Each worker gets a different port and piece of the data, the rest are the
model parameters contained in `self.config`.
"""
with open(TESTS_DIR / f'train{i}.conf', 'wt') as file:
output_model = TESTS_DIR / f'model{i}.txt'
data = TESTS_DIR / f'train{i}.txt'
file.write(f'output_model = {output_model}\n')
file.write(f'local_listen_port = {self.listen_ports[i]}\n')
file.write(f'data = {data}\n')
_write_dict(self.train_config, file)
def test_classifier(executable):
"""Test the classification task."""
num_machines = 2
data = create_data(task='binary-classification')
partitions = np.array_split(data, num_machines)
train_params = {
'objective': 'binary',
'num_machines': num_machines,
}
clf = DistributedMockup(executable)
clf.fit(partitions, train_params)
y_probas = clf.predict()
y_pred = y_probas > 0.5
assert accuracy_score(clf.label_, y_pred) == 1.
def test_regressor(executable):
"""Test the regression task."""
num_machines = 2
data = create_data(task='regression')
partitions = np.array_split(data, num_machines)
train_params = {
'objective': 'regression',
'num_machines': num_machines,
}
reg = DistributedMockup(executable)
reg.fit(partitions, train_params)
y_pred = reg.predict()
np.testing.assert_allclose(y_pred, reg.label_, rtol=0.2, atol=50.)
| mit |
williamleif/histwords | vecanalysis/makelowdim.py | 2 | 2913 | import numpy as np
import time
import random
from sklearn.utils.extmath import randomized_svd
from multiprocessing import Queue, Process
from argparse import ArgumentParser
from representations.explicit import Explicit
from ioutils import load_year_words, mkdir, write_pickle, words_above_count
INPUT_FORMAT = '{year:d}.bin'
OUT_FORMAT = '{year:d}'
def worker(proc_num, queue, out_dir, in_dir, count_dir, words, dim, num_words, min_count=100):
while True:
if queue.empty():
break
year = queue.get()
print "Loading embeddings for year", year
time.sleep(random.random() * 120)
valid_words = set(words_above_count(count_dir, year, min_count))
print len(valid_words)
words = list(valid_words.intersection(words[year][:num_words]))
print len(words)
base_embed = Explicit.load((in_dir + INPUT_FORMAT).format(year=year), normalize=False)
base_embed = base_embed.get_subembed(words, restrict_context=True)
print "SVD for year", year
u, s, v = randomized_svd(base_embed.m, n_components=dim, n_iter=5)
print "Saving year", year
np.save((out_dir + OUT_FORMAT).format(year=year, dim=dim) + "-u.npy", u)
np.save((out_dir + OUT_FORMAT).format(year=year, dim=dim) + "-v.npy", v)
np.save((out_dir + OUT_FORMAT).format(year=year, dim=dim) + "-s.npy", s)
write_pickle(base_embed.iw, (out_dir + OUT_FORMAT).format(year=year, dim=dim) + "-vocab.pkl")
if __name__ == '__main__':
parser = ArgumentParser("Run SVD on historical co-occurrence matrices")
parser.add_argument("in_dir", help="Directory with PPMI data")
parser.add_argument("count_dir", help="Directory with PPMI data")
parser.add_argument("word_file", help="File containing sorted list of words to potentially include")
parser.add_argument("--num-words", type=int, help="Number of words to include", default=1000000)
parser.add_argument("--dim", type=int, default=300)
parser.add_argument("--workers", type=int, default=50)
parser.add_argument("--start-year", type=int, default=1800)
parser.add_argument("--end-year", type=int, default=1990)
parser.add_argument("--year-inc", type=int, default=10)
parser.add_argument("--min-count", type=int, default=100)
args = parser.parse_args()
queue = Queue()
years = range(args.start_year, args.end_year + 1, args.year_inc)
years.reverse()
for year in years:
queue.put(year)
out_dir = args.in_dir + "/svd/" + str(args.dim) + "/" + str(args.num_words) + "/" + str(args.min_count) + "/"
mkdir(out_dir)
words = load_year_words(args.word_file, years)
procs = [Process(target=worker, args=[i, queue, out_dir, args.in_dir, args.count_dir, words, args.dim, args.num_words, args.min_count]) for i in range(args.workers)]
for p in procs:
p.start()
for p in procs:
p.join()
| apache-2.0 |
jackwluo/py-quantmod | dash_example_full.py | 1 | 8032 | # In[]:
# Import required libraries
import os
import pandas as pd
import dash
import dash_core_components as core
import dash_html_components as html
from dash.dependencies import Input, Output
from flask_caching import Cache
import quantmod as qm
# In[]:
# Create layout
app = dash.Dash("Quantmod Full Demo")
app.css.append_css({
'external_url': (
'https://rawgit.com/chriddyp/0247653a7c52feb4c48437e1c1837f75'
'/raw/a68333b876edaf62df2efa7bac0e9b3613258851/dash.css'
)
})
# Add caching
# cache = Cache(app.server, config={
# 'CACHE_TYPE': 'redis',
# 'CACHE_REDIS_URL': os.environ.get('REDIS_URL', '127.0.0.1:6379')
# })
# timeout = 60 * 60 # 1 hour
# Controls
sp500 = ['AAPL', 'ABT', 'ABBV', 'ACN', 'ACE', 'ADBE', 'ADT', 'AAP', 'AES',
'AET', 'AFL', 'AMG', 'A', 'GAS', 'ARE', 'APD', 'AKAM', 'AA', 'AGN',
'ALXN', 'ALLE', 'ADS', 'ALL', 'ALTR', 'MO', 'AMZN', 'AEE', 'AAL',
'AEP', 'AXP', 'AIG', 'AMT', 'AMP', 'ABC', 'AME', 'AMGN', 'APH', 'APC',
'ADI', 'AON', 'APA', 'AIV', 'AMAT', 'ADM', 'AIZ', 'T', 'ADSK', 'ADP',
'AN', 'AZO', 'AVGO', 'AVB', 'AVY', 'BHI', 'BLL', 'BAC', 'BK', 'BCR',
'BXLT', 'BAX', 'BBT', 'BDX', 'BBBY', 'BRK.B', 'BBY', 'BLX', 'HRB',
'BA', 'BWA', 'BXP', 'BSX', 'BMY', 'BRCM', 'BF.B', 'CHRW', 'CA',
'CVC', 'COG', 'CAM', 'CPB', 'COF', 'CAH', 'HSIC', 'KMX', 'CCL',
'CAT', 'CBG', 'CBS', 'CELG', 'CNP', 'CTL', 'CERN', 'CF', 'SCHW',
'CHK', 'CVX', 'CMG', 'CB', 'CI', 'XEC', 'CINF', 'CTAS', 'CSCO', 'C',
'CTXS', 'CLX', 'CME', 'CMS', 'COH', 'KO', 'CCE', 'CTSH', 'CL',
'CMCSA', 'CMA', 'CSC', 'CAG', 'COP', 'CNX', 'ED', 'STZ', 'GLW',
'COST', 'CCI', 'CSX', 'CMI', 'CVS', 'DHI', 'DHR', 'DRI', 'DVA',
'DE', 'DLPH', 'DAL', 'XRAY', 'DVN', 'DO', 'DTV', 'DFS', 'DISCA',
'DISCK', 'DG', 'DLTR', 'D', 'DOV', 'DOW', 'DPS', 'DTE', 'DD', 'DUK',
'DNB', 'ETFC', 'EMN', 'ETN', 'EBAY', 'ECL', 'EIX', 'EW', 'EA',
'EMC', 'EMR', 'ENDP', 'ESV', 'ETR', 'EOG', 'EQT', 'EFX', 'EQIX',
'EQR', 'ESS', 'EL', 'ES', 'EXC', 'EXPE', 'EXPD', 'ESRX', 'XOM',
'FFIV', 'FB', 'FAST', 'FDX', 'FIS', 'FITB', 'FSLR', 'FE', 'FISV',
'FLIR', 'FLS', 'FLR', 'FMC', 'FTI', 'F', 'FOSL', 'BEN', 'FCX',
'FTR', 'GME', 'GPS', 'GRMN', 'GD', 'GE', 'GGP', 'GIS', 'GM',
'GPC', 'GNW', 'GILD', 'GS', 'GT', 'GOOGL', 'GOOG', 'GWW', 'HAL',
'HBI', 'HOG', 'HAR', 'HRS', 'HIG', 'HAS', 'HCA', 'HCP', 'HCN',
'HP', 'HES', 'HPQ', 'HD', 'HON', 'HRL', 'HSP', 'HST', 'HCBK',
'HUM', 'HBAN', 'ITW', 'IR', 'INTC', 'ICE', 'IBM', 'IP', 'IPG',
'IFF', 'INTU', 'ISRG', 'IVZ', 'IRM', 'JEC', 'JBHT', 'JNJ',
'JCI', 'JOY', 'JPM', 'JNPR', 'KSU', 'K', 'KEY', 'GMCR', 'KMB',
'KIM', 'KMI', 'KLAC', 'KSS', 'KRFT', 'KR', 'LB', 'LLL', 'LH',
'LRCX', 'LM', 'LEG', 'LEN', 'LVLT', 'LUK', 'LLY', 'LNC', 'LLTC',
'LMT', 'L', 'LOW', 'LYB', 'MTB', 'MAC', 'M', 'MNK', 'MRO', 'MPC',
'MAR', 'MMC', 'MLM', 'MAS', 'MA', 'MAT', 'MKC', 'MCD', 'MCK',
'MJN', 'MMV', 'MDT', 'MRK', 'MET', 'KORS', 'MCHP', 'MU', 'MSFT',
'MHK', 'TAP', 'MDLZ', 'MON', 'MNST', 'MCO', 'MS', 'MOS', 'MSI',
'MUR', 'MYL', 'NDAQ', 'NOV', 'NAVI', 'NTAP', 'NFLX', 'NWL',
'NFX', 'NEM', 'NWSA', 'NEE', 'NLSN', 'NKE', 'NI', 'NE', 'NBL',
'JWN', 'NSC', 'NTRS', 'NOC', 'NRG', 'NUE', 'NVDA', 'ORLY',
'OXY', 'OMC', 'OKE', 'ORCL', 'OI', 'PCAR', 'PLL', 'PH', 'PDCO',
'PAYX', 'PNR', 'PBCT', 'POM', 'PEP', 'PKI', 'PRGO', 'PFE',
'PCG', 'PM', 'PSX', 'PNW', 'PXD', 'PBI', 'PCL', 'PNC', 'RL',
'PPG', 'PPL', 'PX', 'PCP', 'PCLN', 'PFG', 'PG', 'PGR', 'PLD',
'PRU', 'PEG', 'PSA', 'PHM', 'PVH', 'QRVO', 'PWR', 'QCOM',
'DGX', 'RRC', 'RTN', 'O', 'RHT', 'REGN', 'RF', 'RSG', 'RAI',
'RHI', 'ROK', 'COL', 'ROP', 'ROST', 'RLD', 'R', 'CRM', 'SNDK',
'SCG', 'SLB', 'SNI', 'STX', 'SEE', 'SRE', 'SHW', 'SPG', 'SWKS',
'SLG', 'SJM', 'SNA', 'SO', 'LUV', 'SWN', 'SE', 'STJ', 'SWK',
'SPLS', 'SBUX', 'HOT', 'STT', 'SRCL', 'SYK', 'STI', 'SYMC', 'SYY',
'TROW', 'TGT', 'TEL', 'TE', 'TGNA', 'THC', 'TDC', 'TSO', 'TXN',
'TXT', 'HSY', 'TRV', 'TMO', 'TIF', 'TWX', 'TWC', 'TJX', 'TMK',
'TSS', 'TSCO', 'RIG', 'TRIP', 'FOXA', 'TSN', 'TYC', 'UA',
'UNP', 'UNH', 'UPS', 'URI', 'UTX', 'UHS', 'UNM', 'URBN', 'VFC',
'VLO', 'VAR', 'VTR', 'VRSN', 'VZ', 'VRTX', 'VIAB', 'V', 'VNO',
'VMC', 'WMT', 'WBA', 'DIS', 'WM', 'WAT', 'ANTM', 'WFC', 'WDC',
'WU', 'WY', 'WHR', 'WFM', 'WMB', 'WEC', 'WYN', 'WYNN', 'XEL',
'XRX', 'XLNX', 'XL', 'XYL', 'YHOO', 'YUM', 'ZBH', 'ZION', 'ZTS']
etf = ['SPY', 'XLF', 'GDX', 'EEM', 'VXX', 'IWM', 'UVXY', 'UXO', 'GDXJ', 'QQQ']
tickers = sp500 + etf
tickers = [dict(label=str(ticker), value=str(ticker))
for ticker in tickers]
functions = dir(qm.ta)[9:-4]
functions = [dict(label=str(function[4:]), value=str(function))
for function in functions]
# Layout
app.layout = html.Div(
[
html.H2('Quantmod Charts'),
html.Div(
[
html.Span(
core.Dropdown(
id='dropdown',
options=tickers,
value='SPY',
),
style={
'width': '450px',
'display': 'inline-block',
'text-align': 'left'
},
),
html.Span(
core.Dropdown(
id='multi',
options=functions,
multi=True,
value=[],
),
style={
'width': '450px',
'display': 'inline-block',
'text-align': 'left'
},
),
]
),
html.Div(
[html.Label('Custom Arguments:'), core.Input(id='arglist')],
id='arg-controls',
style={'display': 'none'}
),
core.Graph(id='output')
],
style={
'width': '900',
'margin-left': 'auto',
'margin-right': 'auto',
'text-align': 'center',
'font-family': 'overpass',
'background-color': '#F3F3F3'
}
)
@app.callback(Output('arg-controls', 'style'), [Input('multi', 'value')])
def display_control(multi):
if not multi:
return {'display': 'none'}
else:
return {'display': 'inline-block'}
@app.callback(Output('output', 'figure'), [Input('dropdown', 'value'),
Input('multi', 'value'),
Input('arglist', 'value')])
# @cache.memoize(timeout=timeout)
def update_graph_from_dropdown(dropdown, multi, arglist):
# Get Quantmod Chart
ch = qm.get_symbol(dropdown, start='2016/01/01')
# Get functions
if arglist:
arglist = arglist.replace('(', '').replace(')', '').split(';')
arglist = [args.strip() for args in arglist]
for function, args in zip(multi, arglist):
if args:
args = args.split(',')
newargs = []
for arg in args:
try:
arg = int(arg)
except:
try:
arg = float(arg)
except:
pass
newargs.append(arg)
print(newargs)
getattr(qm, function)(ch, *newargs)
else:
getattr(qm, function)(ch)
else:
for function in multi:
getattr(qm, function)(ch)
# Return plot as figure
fig = ch.to_figure(width=900)
return fig
# In[]:
# Main
if __name__ == '__main__':
app.run_server(debug=True, threaded=True, port=4002)
| mit |
fyffyt/scikit-learn | sklearn/utils/multiclass.py | 83 | 12343 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
oew1v07/scikit-image | doc/examples/plot_gabor.py | 11 | 4450 | """
=============================================
Gabor filter banks for texture classification
=============================================
In this example, we will see how to classify textures based on Gabor filter
banks. Frequency and orientation representations of the Gabor filter are similar
to those of the human visual system.
The images are filtered using the real parts of various different Gabor filter
kernels. The mean and variance of the filtered images are then used as features
for classification, which is based on the least squared error for simplicity.
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as ndi
from skimage import data
from skimage.util import img_as_float
from skimage.filters import gabor_kernel
def compute_feats(image, kernels):
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = ndi.convolve(image, kernel, mode='wrap')
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return feats
def match(feats, ref_feats):
min_error = np.inf
min_i = None
for i in range(ref_feats.shape[0]):
error = np.sum((feats - ref_feats[i, :])**2)
if error < min_error:
min_error = error
min_i = i
return min_i
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.load('brick.png'))[shrink]
grass = img_as_float(data.load('grass.png'))[shrink]
wall = img_as_float(data.load('rough-wall.png'))[shrink]
image_names = ('brick', 'grass', 'wall')
images = (brick, grass, wall)
# prepare reference features
ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double)
ref_feats[0, :, :] = compute_feats(brick, kernels)
ref_feats[1, :, :] = compute_feats(grass, kernels)
ref_feats[2, :, :] = compute_feats(wall, kernels)
print('Rotated images matched against references using Gabor filter banks:')
print('original: brick, rotated: 30deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=190, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: brick, rotated: 70deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=70, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: grass, rotated: 145deg, match result: ', end='')
feats = compute_feats(ndi.rotate(grass, angle=145, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap')**2 +
ndi.convolve(image, np.imag(kernel), mode='wrap')**2)
# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in (0, 1):
theta = theta / 4. * np.pi
for frequency in (0.1, 0.4):
kernel = gabor_kernel(frequency, theta=theta)
params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency)
kernel_params.append(params)
# Save kernel and the power image for each image
results.append((kernel, [power(img, kernel) for img in images]))
fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(5, 6))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
for label, img, ax in zip(image_names, images, axes[0][1:]):
ax.imshow(img)
ax.set_title(label, fontsize=9)
ax.axis('off')
for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]):
# Plot Gabor kernel
ax = ax_row[0]
ax.imshow(np.real(kernel), interpolation='nearest')
ax.set_ylabel(label, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# Plot Gabor responses with the contrast normalized for each filter
vmin = np.min(powers)
vmax = np.max(powers)
for patch, ax in zip(powers, ax_row[1:]):
ax.imshow(patch, vmin=vmin, vmax=vmax)
ax.axis('off')
plt.show()
| bsd-3-clause |
jm-begon/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
PanDAWMS/panda-server | pandaserver/daemons/scripts/recover_lost_files_daemon.py | 1 | 4606 | import json
import glob
import time
import os.path
import datetime
import threading
import traceback
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
from pandaserver.config import panda_config
from pandaserver.dataservice import RecoverLostFilesCore
# logger
_logger = PandaLogger().getLogger('recover_lost_files')
# main
def main(tbuf=None, **kwargs):
_logger.debug("===================== start =====================")
# overall timeout value
overallTimeout = 300
# prefix of the files
prefixEVP = 'recov.'
# file pattern of evp files
evpFilePatt = panda_config.cache_dir + '/' + prefixEVP + '*'
from pandaserver.taskbuffer.TaskBuffer import taskBuffer
taskBuffer.init(panda_config.dbhost, panda_config.dbpasswd, nDBConnection=1)
# thread pool
class ThreadPool:
def __init__(self):
self.lock = threading.Lock()
self.list = []
def add(self, obj):
self.lock.acquire()
self.list.append(obj)
self.lock.release()
def remove(self, obj):
self.lock.acquire()
self.list.remove(obj)
self.lock.release()
def join(self):
self.lock.acquire()
thrlist = tuple(self.list)
self.lock.release()
for thr in thrlist:
thr.join()
# thread to ev-pd2p
class EvpThr(threading.Thread):
def __init__(self, lock, pool, tb_if, file_name, to_delete):
threading.Thread.__init__(self)
self.lock = lock
self.pool = pool
self.fileName = file_name
self.to_delete = to_delete
self.taskBuffer = tb_if
self.pool.add(self)
def run(self):
self.lock.acquire()
with open(self.fileName) as f:
ops = json.load(f)
tmpLog = LogWrapper(_logger, '< jediTaskID={} >'.format(ops['jediTaskID']))
tmpLog.info('start {}'.format(self.fileName))
s, o = RecoverLostFilesCore.main(self.taskBuffer, ops, tmpLog)
tmpLog.info('status={}. {}'.format(s, o))
if s is not None or self.to_delete:
tmpLog.debug('delete {}'.format(self.fileName))
try:
os.remove(self.fileName)
except Exception:
pass
self.pool.remove(self)
self.lock.release()
# get files
timeNow = datetime.datetime.utcnow()
timeInt = datetime.datetime.utcnow()
fileList = glob.glob(evpFilePatt)
fileList.sort()
# create thread pool and semaphore
adderLock = threading.Semaphore(1)
adderThreadPool = ThreadPool()
# add
while len(fileList) != 0:
# time limit to aviod too many copyArchve running at the sametime
if (datetime.datetime.utcnow() - timeNow) > datetime.timedelta(minutes=overallTimeout):
_logger.debug("time over in main session")
break
# try to get Semaphore
adderLock.acquire()
# get fileList
if (datetime.datetime.utcnow() - timeInt) > datetime.timedelta(minutes=15):
timeInt = datetime.datetime.utcnow()
# get file
fileList = glob.glob(evpFilePatt)
fileList.sort()
# choose a file
fileName = fileList.pop(0)
# release lock
adderLock.release()
if not os.path.exists(fileName):
continue
try:
modTime = datetime.datetime(*(time.gmtime(os.path.getmtime(fileName))[:7]))
if (timeNow - modTime) > datetime.timedelta(hours=2):
# last chance
_logger.debug("Last attempt : %s" % fileName)
thr = EvpThr(adderLock, adderThreadPool, taskBuffer, fileName, False)
thr.start()
elif (timeInt - modTime) > datetime.timedelta(seconds=5):
# try
_logger.debug("Normal attempt : %s" % fileName)
thr = EvpThr(adderLock, adderThreadPool, taskBuffer, fileName, True)
thr.start()
else:
_logger.debug("Wait %s : %s" % ((timeInt - modTime), fileName))
except Exception as e:
_logger.error("{} {}".format(str(e), traceback.format_exc()))
# join all threads
adderThreadPool.join()
_logger.debug("===================== end =====================")
# run
if __name__ == '__main__':
main()
| apache-2.0 |
trungnt13/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
amolkahat/pandas | asv_bench/benchmarks/plotting.py | 3 | 1454 | import numpy as np
from pandas import DataFrame, Series, DatetimeIndex, date_range
try:
from pandas.plotting import andrews_curves
except ImportError:
from pandas.tools.plotting import andrews_curves
import matplotlib
matplotlib.use('Agg')
class Plotting(object):
def setup(self):
self.s = Series(np.random.randn(1000000))
self.df = DataFrame({'col': self.s})
def time_series_plot(self):
self.s.plot()
def time_frame_plot(self):
self.df.plot()
class TimeseriesPlotting(object):
def setup(self):
N = 2000
M = 5
idx = date_range('1/1/1975', periods=N)
self.df = DataFrame(np.random.randn(N, M), index=idx)
idx_irregular = DatetimeIndex(np.concatenate((idx.values[0:10],
idx.values[12:])))
self.df2 = DataFrame(np.random.randn(len(idx_irregular), M),
index=idx_irregular)
def time_plot_regular(self):
self.df.plot()
def time_plot_regular_compat(self):
self.df.plot(x_compat=True)
def time_plot_irregular(self):
self.df2.plot()
class Misc(object):
def setup(self):
N = 500
M = 10
self.df = DataFrame(np.random.randn(N, M))
self.df['Name'] = ["A"] * N
def time_plot_andrews_curves(self):
andrews_curves(self.df, "Name")
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
aabadie/scikit-learn | sklearn/tests/test_naive_bayes.py | 72 | 19944 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_gnb_neg_priors():
"""Test whether an error is raised in case of negative priors"""
clf = GaussianNB(priors=np.array([-1., 2.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_priors():
"""Test whether the class prior override is properly used"""
clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y)
assert_array_almost_equal(clf.predict_proba([[-0.1, -0.1]]),
np.array([[0.825303662161683,
0.174696337838317]]), 8)
assert_array_equal(clf.class_prior_, np.array([0.3, 0.7]))
def test_gnb_wrong_nb_priors():
""" Test whether an error is raised if the number of prior is different
from the number of class"""
clf = GaussianNB(priors=np.array([.25, .25, .25, .25]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_greater_one():
"""Test if an error is raised if the sum of prior greater than one"""
clf = GaussianNB(priors=np.array([2., 1.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_large_bias():
"""Test if good prediction when class prior favor largely one class"""
clf = GaussianNB(priors=np.array([0.01, 0.99]))
clf.fit(X, y)
assert_equal(clf.predict([[-0.1, -0.1]]), np.array([2]))
def test_check_update_with_no_data():
""" Test when the partial fit is called without any data"""
# Create an empty array
prev_points = 100
mean = 0.
var = 1.
x_empty = np.empty((0, X.shape[1]))
tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean,
var, x_empty)
assert_equal(tmean, mean)
assert_equal(tvar, var)
def test_gnb_pfit_wrong_nb_features():
"""Test whether an error is raised when the number of feature changes
between two partial fit"""
clf = GaussianNB()
# Fit for the first time the GNB
clf.fit(X, y)
# Partial fit a second time with an incoherent X
assert_raises(ValueError, clf.partial_fit, np.hstack((X, X)), y)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
| bsd-3-clause |
hduongtrong/hyperemble | hyperemble/data/param_dist.py | 1 | 4020 | from __future__ import print_function, absolute_import, division
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
n = 1000
p = 100
n_jobs = 1
seed = 1
classification_models = [
KNeighborsClassifier,
LogisticRegression,
GaussianNB,
RandomForestClassifier,
ExtraTreesClassifier,
GradientBoostingClassifier,
LinearSVC,
SVC,
LinearDiscriminantAnalysis,
QuadraticDiscriminantAnalysis
]
default_params = {
"KNeighborsClassifier": {
"n_neighbors": 5,
"weights": 'uniform',
"n_jobs": 1,
},
"LogisticRegression": {
"solver": "lbfgs",
"multi_class": "multinomial",
"penalty": "l2",
"C": 1.0,
},
"GaussianNB": {
},
"RandomForestClassifier": {
"n_estimators": 500,
"criterion": "gini",
"max_depth": 8,
"bootstrap": True,
"random_state": seed,
"verbose": 0,
"n_jobs": -1,
},
"ExtraTreesClassifier": {
"n_estimators": 500,
"criterion": "gini",
"max_depth": 8,
"bootstrap": True,
"random_state": seed,
"verbose": 0,
"n_jobs": -1,
},
"GradientBoostingClassifier": {
"loss": "deviance",
"learning_rate": 0.1,
"n_estimators": 100,
"subsample": 1.0,
"max_depth": 6,
"random_state": seed,
"max_features": 10,
"verbose": 0,
},
"LinearSVC": {
"penalty": "l2",
"loss": "hinge",
"C": 1.0,
"verbose": 0,
"random_state": seed,
"multi_class": "ovr",
},
"SVC": {
"C": 1.0,
"kernel": "rbf",
"gamma": .01,
"random_state": seed,
"verbose": 0,
},
"LinearDiscriminantAnalysis": {
"solver": "lsqr",
"shrinkage": "auto",
},
"QuadraticDiscriminantAnalysis": {
"reg_param": .1,
},
}
dist_params = {
"KNeighborsClassifier": {
"n_neighbors": np.arange(int(np.sqrt(n))),
"weights": ['uniform', 'distance'],
"n_jobs": -1,
"algorithm": ["auto", "ball_tree", "kd_tree", "brute"],
"leaf_size": np.arange(1, 30)
},
"LogisticRegression": {
"solver": "lbfgs",
"multi_class": "multinomial",
"penalty": "l2",
"C": 1.0,
},
"GaussianNB": {
},
"RandomForestClassifier": {
"n_estimators": 500,
"criterion": "gini",
"max_depth": 8,
"bootstrap": True,
"random_state": seed,
"verbose": 0,
"n_jobs": -1,
},
"ExtraTreesClassifier": {
"n_estimators": 500,
"criterion": "gini",
"max_depth": 8,
"bootstrap": True,
"random_state": seed,
"verbose": 0,
"n_jobs": -1,
},
"GradientBoostingClassifier": {
"loss": "deviance",
"learning_rate": 0.1,
"n_estimators": 100,
"subsample": 1.0,
"max_depth": 6,
"random_state": seed,
"max_features": 10,
"verbose": 0,
},
"LinearSVC": {
"penalty": "l2",
"loss": "hinge",
"C": 1.0,
"verbose": 0,
"random_state": seed,
"multi_class": "ovr",
},
"SVC": {
"C": 1.0,
"kernel": "rbf",
"gamma": .01,
"random_state": seed,
"verbose": 0,
},
"LinearDiscriminantAnalysis": {
"solver": "lsqr",
"shrinkage": "auto",
},
"QuadraticDiscriminantAnalysis": {
"reg_param": .1,
},
}
| bsd-2-clause |
cython-testbed/pandas | pandas/tests/frame/test_query_eval.py | 3 | 40886 | # -*- coding: utf-8 -*-
from __future__ import print_function
import operator
import pytest
from pandas.compat import (zip, range, lrange, StringIO)
from pandas import DataFrame, Series, Index, MultiIndex, date_range
import pandas as pd
import numpy as np
from numpy.random import randn
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
makeCustomDataframe as mkdf)
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.computation.check import _NUMEXPR_INSTALLED
from pandas.tests.frame.common import TestData
PARSERS = 'python', 'pandas'
ENGINES = 'python', pytest.param('numexpr', marks=td.skip_if_no_ne)
@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
return request.param
@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
return request.param
def skip_if_no_pandas_parser(parser):
if parser != 'pandas':
pytest.skip("cannot evaluate with parser {0!r}".format(parser))
class TestCompat(object):
def setup_method(self, method):
self.df = DataFrame({'A': [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
self.expected2 = self.df.A + 1
def test_query_default(self):
# GH 12749
# this should always work, whether _NUMEXPR_INSTALLED or not
df = self.df
result = df.query('A>0')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1')
assert_series_equal(result, self.expected2, check_names=False)
def test_query_None(self):
df = self.df
result = df.query('A>0', engine=None)
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine=None)
assert_series_equal(result, self.expected2, check_names=False)
def test_query_python(self):
df = self.df
result = df.query('A>0', engine='python')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine='python')
assert_series_equal(result, self.expected2, check_names=False)
def test_query_numexpr(self):
df = self.df
if _NUMEXPR_INSTALLED:
result = df.query('A>0', engine='numexpr')
assert_frame_equal(result, self.expected1)
result = df.eval('A+1', engine='numexpr')
assert_series_equal(result, self.expected2, check_names=False)
else:
pytest.raises(ImportError,
lambda: df.query('A>0', engine='numexpr'))
pytest.raises(ImportError,
lambda: df.eval('A+1', engine='numexpr'))
class TestDataFrameEval(TestData):
def test_ops(self):
# tst ops and reversed ops in evaluation
# GH7198
# smaller hits python, larger hits numexpr
for n in [4, 4000]:
df = DataFrame(1, index=range(n), columns=list('abcd'))
df.iloc[0] = 2
m = df.mean()
for op_str, op, rop in [('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__')]:
base = (DataFrame(np.tile(m.values, n) # noqa
.reshape(n, -1),
columns=list('abcd')))
expected = eval("base{op}df".format(op=op_str))
# ops as strings
result = eval("m{op}df".format(op=op_str))
assert_frame_equal(result, expected)
# these are commutative
if op in ['+', '*']:
result = getattr(df, op)(m)
assert_frame_equal(result, expected)
# these are not
elif op in ['-', '/']:
result = getattr(df, rop)(m)
assert_frame_equal(result, expected)
# GH7192
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = (1 - np.isnan(df.iloc[0:25]))
result = (1 - np.isnan(df)).iloc[0:25]
assert_frame_equal(result, expected)
def test_query_non_str(self):
# GH 11485
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'b']})
msg = "expr must be a string to be evaluated"
with tm.assert_raises_regex(ValueError, msg):
df.query(lambda x: x.B == "b")
with tm.assert_raises_regex(ValueError, msg):
df.query(111)
def test_query_empty_string(self):
# GH 13139
df = pd.DataFrame({'A': [1, 2, 3]})
msg = "expr cannot be an empty string"
with tm.assert_raises_regex(ValueError, msg):
df.query('')
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(randn(10, 2), columns=list('ab'))
dict1 = {'a': 1}
dict2 = {'b': 2}
assert (df.eval('a + b', resolvers=[dict1, dict2]) ==
dict1['a'] + dict2['b'])
assert (pd.eval('a + b', resolvers=[dict1, dict2]) ==
dict1['a'] + dict2['b'])
class TestDataFrameQueryWithMultiIndex(object):
def test_query_with_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.random.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b], names=['color', 'food'])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values('color').values, index=index,
name='color')
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.random.choice(['eggs', 'ham'], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != 'red']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser,
engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(['red'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# ## LEVEL 1
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != 'eggs']
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
res1 = df.query('["eggs"] not in ilevel_1', parser=parser,
engine=engine)
res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(['eggs'])]
assert_frame_equal(res1, exp)
assert_frame_equal(res2, exp)
def test_query_with_partially_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(['red', 'green'], size=10)
b = np.arange(10)
index = MultiIndex.from_arrays([a, b])
index.names = [None, 'rating']
df = DataFrame(randn(10, 2), index=index)
res = df.query('rating == 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind == 1]
assert_frame_equal(res, exp)
res = df.query('rating != 1', parser=parser, engine=engine)
ind = Series(df.index.get_level_values('rating').values, index=index,
name='rating')
exp = df[ind != 1]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind == "red"]
assert_frame_equal(res, exp)
res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind != "red"]
assert_frame_equal(res, exp)
def test_query_multiindex_get_index_resolvers(self):
df = mkdf(10, 3, r_idx_nlevels=2, r_idx_names=['spam', 'eggs'])
resolvers = df._get_index_resolvers()
def to_series(mi, level):
level_values = mi.get_level_values(level)
s = level_values.to_series()
s.index = mi
return s
col_series = df.columns.to_series()
expected = {'index': df.index,
'columns': col_series,
'spam': to_series(df.index, 'spam'),
'eggs': to_series(df.index, 'eggs'),
'C0': col_series}
for k, v in resolvers.items():
if isinstance(v, Index):
assert v.is_(expected[k])
elif isinstance(v, Series):
assert_series_equal(v, expected[k])
else:
raise AssertionError("object must be a Series or Index")
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_raise_on_panel_with_multiindex(self, parser, engine):
p = tm.makePanel(7)
p.items = tm.makeCustomIndex(len(p.items), nlevels=2)
with pytest.raises(NotImplementedError):
pd.eval('p + 1', parser=parser, engine=engine)
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPandas(object):
@classmethod
def setup_class(cls):
cls.engine = 'numexpr'
cls.parser = 'pandas'
@classmethod
def teardown_class(cls):
del cls.engine, cls.parser
def test_date_query_with_attribute_access(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('@df.dates1 < 20130101 < @df.dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('index < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
d = {}
d['dates1'] = date_range('1/1/2012', periods=n)
d['dates3'] = date_range('1/1/2014', periods=n)
df = DataFrame(d)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('dates1 < 20130101 < dates3', engine=engine,
parser=parser)
expec = df[(df.index.to_series() < '20130101') &
('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_non_date(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame({'dates': date_range('1/1/2012', periods=n),
'nondate': np.arange(n)})
result = df.query('dates == nondate', parser=parser, engine=engine)
assert len(result) == 0
result = df.query('dates != nondate', parser=parser, engine=engine)
assert_frame_equal(result, df)
for op in ['<', '>', '<=', '>=']:
with pytest.raises(TypeError):
df.query('dates %s nondate' % op, parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
df = DataFrame({"i": lrange(10), "+": lrange(3, 13),
"r": lrange(4, 14)})
with pytest.raises(SyntaxError):
df.query('i - +', engine=engine, parser=parser)
def test_query_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(20, 2), columns=list('ab'))
a, b = 1, 2 # noqa
res = df.query('a > b', engine=engine, parser=parser)
expected = df[df.a > df.b]
assert_frame_equal(res, expected)
res = df.query('@a > b', engine=engine, parser=parser)
expected = df[a > df.b]
assert_frame_equal(res, expected)
# no local variable c
with pytest.raises(UndefinedVariableError):
df.query('@a > b > @c', engine=engine, parser=parser)
# no column named 'c'
with pytest.raises(UndefinedVariableError):
df.query('@a > b > c', engine=engine, parser=parser)
def test_query_doesnt_pickup_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
# we don't pick up the local 'sin'
with pytest.raises(UndefinedVariableError):
df.query('sin > 5', engine=engine, parser=parser)
def test_query_builtin(self):
from pandas.core.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
with tm.assert_raises_regex(NumExprClobberingError,
'Variables in expression.+'):
df.query('sin > 5', engine=engine, parser=parser)
def test_query(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c'])
assert_frame_equal(df.query('a < b', engine=engine, parser=parser),
df[df.a < df.b])
assert_frame_equal(df.query('a + b > b * c', engine=engine,
parser=parser),
df[df.a + df.b > df.b * df.c])
def test_query_index_with_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=Index(range(10), name='blob'),
columns=['a', 'b', 'c'])
res = df.query('(blob < 5) & (a < b)', engine=engine, parser=parser)
expec = df[(df.index < 5) & (df.a < df.b)]
assert_frame_equal(res, expec)
res = df.query('blob < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
def test_query_index_without_name(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randint(10, size=(10, 3)),
index=range(10), columns=['a', 'b', 'c'])
# "index" should refer to the index
res = df.query('index < b', engine=engine, parser=parser)
expec = df[df.index < df.b]
assert_frame_equal(res, expec)
# test against a scalar
res = df.query('index < 5', engine=engine, parser=parser)
expec = df[df.index < 5]
assert_frame_equal(res, expec)
def test_nested_scope(self):
engine = self.engine
parser = self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
expected = df[(df > 0) & (df2 > 0)]
result = df.query('(@df > 0) & (@df2 > 0)', engine=engine,
parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0]', engine=engine,
parser=parser)
assert_frame_equal(result, expected)
result = pd.eval('df[df > 0 and df2 > 0 and df[df > 0] > 0]',
engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
assert_frame_equal(result, expected)
result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser)
expected = df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
assert_frame_equal(result, expected)
def test_nested_raises_on_local_self_reference(self):
from pandas.core.computation.ops import UndefinedVariableError
df = DataFrame(np.random.randn(5, 3))
# can't reference ourself b/c we're a local so @ is necessary
with pytest.raises(UndefinedVariableError):
df.query('df > 0', engine=self.engine, parser=self.parser)
def test_local_syntax(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
df = DataFrame(randn(100, 10), columns=list('abcdefghij'))
b = 1
expect = df[df.a < b]
result = df.query('a < @b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
expect = df[df.a < df.b]
result = df.query('a < b', engine=engine, parser=parser)
assert_frame_equal(result, expect)
def test_chained_cmp_and_in(self):
skip_if_no_pandas_parser(self.parser)
engine, parser = self.engine, self.parser
cols = list('abc')
df = DataFrame(randn(100, len(cols)), columns=cols)
res = df.query('a < b < c and a not in b not in c', engine=engine,
parser=parser)
ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b) # noqa
expec = df[ind]
assert_frame_equal(res, expec)
def test_local_variable_with_in(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
a = Series(np.random.randint(3, size=15), name='a')
b = Series(np.random.randint(10, size=15), name='b')
df = DataFrame({'a': a, 'b': b})
expected = df.loc[(df.b - 1).isin(a)]
result = df.query('b - 1 in a', engine=engine, parser=parser)
assert_frame_equal(expected, result)
b = Series(np.random.randint(10, size=15), name='b')
expected = df.loc[(b - 1).isin(a)]
result = df.query('@b - 1 in a', engine=engine, parser=parser)
assert_frame_equal(expected, result)
def test_at_inside_string(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
c = 1 # noqa
df = DataFrame({'a': ['a', 'a', 'b', 'b', '@c', '@c']})
result = df.query('a == "@c"', engine=engine, parser=parser)
expected = df[df.a == "@c"]
assert_frame_equal(result, expected)
def test_query_undefined_local(self):
from pandas.core.computation.ops import UndefinedVariableError
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.rand(10, 2), columns=list('ab'))
with tm.assert_raises_regex(UndefinedVariableError,
"local variable 'c' is not defined"):
df.query('a == @c', engine=engine, parser=parser)
def test_index_resolvers_come_after_columns_with_the_same_name(self):
n = 1 # noqa
a = np.r_[20:101:20]
df = DataFrame({'index': a, 'b': np.random.randn(a.size)})
df.index.name = 'index'
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df[df['index'] > 5]
assert_frame_equal(result, expected)
df = DataFrame({'index': a,
'b': np.random.randn(a.size)})
result = df.query('ilevel_0 > 5', engine=self.engine,
parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
assert_frame_equal(result, expected)
df = DataFrame({'a': a, 'b': np.random.randn(a.size)})
df.index.name = 'a'
result = df.query('a > 5', engine=self.engine, parser=self.parser)
expected = df[df.a > 5]
assert_frame_equal(result, expected)
result = df.query('index > 5', engine=self.engine, parser=self.parser)
expected = df.loc[df.index[df.index > 5]]
assert_frame_equal(result, expected)
def test_inf(self):
n = 10
df = DataFrame({'a': np.random.rand(n), 'b': np.random.rand(n)})
df.loc[::2, 0] = np.inf
ops = '==', '!='
d = dict(zip(ops, (operator.eq, operator.ne)))
for op, f in d.items():
q = 'a %s inf' % op
expected = df[f(df.a, np.inf)]
result = df.query(q, engine=self.engine, parser=self.parser)
assert_frame_equal(result, expected)
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryNumExprPython, cls).setup_class()
cls.engine = 'numexpr'
cls.parser = 'python'
cls.frame = TestData().frame
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
df['dates2'] = date_range('1/1/2013', periods=5)
df['dates3'] = date_range('1/1/2014', periods=5)
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates2'] = date_range('1/1/2013', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT
res = df.query('(dates1 < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.iloc[0, 0] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
res = df.query('(index < 20130101) & (20130101 < dates3)',
engine=engine, parser=parser)
expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]
assert_frame_equal(res, expec)
def test_date_index_query_with_NaT_duplicates(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(randn(n, 3))
df['dates1'] = date_range('1/1/2012', periods=n)
df['dates3'] = date_range('1/1/2014', periods=n)
df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT
df.set_index('dates1', inplace=True, drop=True)
with pytest.raises(NotImplementedError):
df.query('index < 20130101 < dates3', engine=engine, parser=parser)
def test_nested_scope(self):
from pandas.core.computation.ops import UndefinedVariableError
engine = self.engine
parser = self.parser
# smoke test
x = 1 # noqa
result = pd.eval('x + 1', engine=engine, parser=parser)
assert result == 2
df = DataFrame(np.random.randn(5, 3))
df2 = DataFrame(np.random.randn(5, 3))
# don't have the pandas parser
with pytest.raises(SyntaxError):
df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)
with pytest.raises(UndefinedVariableError):
df.query('(df>0) & (df2>0)', engine=engine, parser=parser)
expected = df[(df > 0) & (df2 > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0)]', engine=engine,
parser=parser)
assert_frame_equal(expected, result)
expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]
result = pd.eval('df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]',
engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryPythonPandas, cls).setup_class()
cls.engine = 'python'
cls.parser = 'pandas'
cls.frame = TestData().frame
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):
@classmethod
def setup_class(cls):
super(TestDataFrameQueryPythonPython, cls).setup_class()
cls.engine = cls.parser = 'python'
cls.frame = TestData().frame
def test_query_builtin(self):
engine, parser = self.engine, self.parser
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
df.index.name = 'sin'
expected = df[df.index > 5]
result = df.query('sin > 5', engine=engine, parser=parser)
assert_frame_equal(expected, result)
class TestDataFrameQueryStrings(object):
def test_str_query_method(self, parser, engine):
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings == 'a']
if parser != 'pandas':
col = 'strings'
lst = '"a"'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
pytest.raises(NotImplementedError, df.query, ex,
engine=engine, parser=parser,
local_dict={'strings': df.strings})
else:
res = df.query('"a" == strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('strings == "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[df.strings.isin(['a'])])
expect = df[df.strings != 'a']
res = df.query('strings != "a"', engine=engine, parser=parser)
assert_frame_equal(res, expect)
res = df.query('"a" != strings', engine=engine, parser=parser)
assert_frame_equal(res, expect)
assert_frame_equal(res, df[~df.strings.isin(['a'])])
def test_str_list_query_method(self, parser, engine):
df = DataFrame(randn(10, 1), columns=['b'])
df['strings'] = Series(list('aabbccddee'))
expect = df[df.strings.isin(['a', 'b'])]
if parser != 'pandas':
col = 'strings'
lst = '["a", "b"]'
lhs = [col] * 2 + [lst] * 2
rhs = lhs[::-1]
eq, ne = '==', '!='
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)
with pytest.raises(NotImplementedError):
df.query(ex, engine=engine, parser=parser)
else:
res = df.query('strings == ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] == strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
expect = df[~df.strings.isin(['a', 'b'])]
res = df.query('strings != ["a", "b"]', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
res = df.query('["a", "b"] != strings', engine=engine,
parser=parser)
assert_frame_equal(res, expect)
def test_query_with_string_columns(self, parser, engine):
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
if parser == 'pandas':
res = df.query('a in b', parser=parser, engine=engine)
expec = df[df.a.isin(df.b)]
assert_frame_equal(res, expec)
res = df.query('a in b and c < d', parser=parser, engine=engine)
expec = df[df.a.isin(df.b) & (df.c < df.d)]
assert_frame_equal(res, expec)
else:
with pytest.raises(NotImplementedError):
df.query('a in b', parser=parser, engine=engine)
with pytest.raises(NotImplementedError):
df.query('a in b and c < d', parser=parser, engine=engine)
def test_object_array_eq_ne(self, parser, engine):
df = DataFrame({'a': list('aaaabbbbcccc'),
'b': list('aabbccddeeff'),
'c': np.random.randint(5, size=12),
'd': np.random.randint(9, size=12)})
res = df.query('a == b', parser=parser, engine=engine)
exp = df[df.a == df.b]
assert_frame_equal(res, exp)
res = df.query('a != b', parser=parser, engine=engine)
exp = df[df.a != df.b]
assert_frame_equal(res, exp)
def test_query_with_nested_strings(self, parser, engine):
skip_if_no_pandas_parser(parser)
raw = """id event timestamp
1 "page 1 load" 1/1/2014 0:00:01
1 "page 1 exit" 1/1/2014 0:00:31
2 "page 2 load" 1/1/2014 0:01:01
2 "page 2 exit" 1/1/2014 0:01:31
3 "page 3 load" 1/1/2014 0:02:01
3 "page 3 exit" 1/1/2014 0:02:31
4 "page 1 load" 2/1/2014 1:00:01
4 "page 1 exit" 2/1/2014 1:00:31
5 "page 2 load" 2/1/2014 1:01:01
5 "page 2 exit" 2/1/2014 1:01:31
6 "page 3 load" 2/1/2014 1:02:01
6 "page 3 exit" 2/1/2014 1:02:31
"""
df = pd.read_csv(StringIO(raw), sep=r'\s{2,}', engine='python',
parse_dates=['timestamp'])
expected = df[df.event == '"page 1 load"']
res = df.query("""'"page 1 load"' in event""", parser=parser,
engine=engine)
assert_frame_equal(expected, res)
def test_query_with_nested_special_character(self, parser, engine):
skip_if_no_pandas_parser(parser)
df = DataFrame({'a': ['a', 'b', 'test & test'],
'b': [1, 2, 3]})
res = df.query('a == "test & test"', parser=parser, engine=engine)
expec = df[df.a == 'test & test']
assert_frame_equal(res, expec)
def test_query_lex_compare_strings(self, parser, engine):
import operator as opr
a = Series(np.random.choice(list('abcde'), 20))
b = Series(np.arange(a.size))
df = DataFrame({'X': a, 'Y': b})
ops = {'<': opr.lt, '>': opr.gt, '<=': opr.le, '>=': opr.ge}
for op, func in ops.items():
res = df.query('X %s "d"' % op, engine=engine, parser=parser)
expected = df[func(df.X, 'd')]
assert_frame_equal(res, expected)
def test_query_single_element_booleans(self, parser, engine):
columns = 'bid', 'bidsize', 'ask', 'asksize'
data = np.random.randint(2, size=(1, len(columns))).astype(bool)
df = DataFrame(data, columns=columns)
res = df.query('bid & ask', engine=engine, parser=parser)
expected = df[df.bid & df.ask]
assert_frame_equal(res, expected)
def test_query_string_scalar_variable(self, parser, engine):
skip_if_no_pandas_parser(parser)
df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'],
'Price': [109.70, 109.72, 183.30, 183.35]})
e = df[df.Symbol == 'BUD US']
symb = 'BUD US' # noqa
r = df.query('Symbol == @symb', parser=parser, engine=engine)
assert_frame_equal(e, r)
class TestDataFrameEvalWithFrame(object):
def setup_method(self, method):
self.frame = DataFrame(randn(10, 3), columns=list('abc'))
def teardown_method(self, method):
del self.frame
def test_simple_expr(self, parser, engine):
res = self.frame.eval('a + b', engine=engine, parser=parser)
expect = self.frame.a + self.frame.b
assert_series_equal(res, expect)
def test_bool_arith_expr(self, parser, engine):
res = self.frame.eval('a[a < 1] + b', engine=engine, parser=parser)
expect = self.frame.a[self.frame.a < 1] + self.frame.b
assert_series_equal(res, expect)
@pytest.mark.parametrize('op', ['+', '-', '*', '/'])
def test_invalid_type_for_operator_raises(self, parser, engine, op):
df = DataFrame({'a': [1, 2], 'b': ['c', 'd']})
with tm.assert_raises_regex(TypeError,
r"unsupported operand type\(s\) "
"for .+: '.+' and '.+'"):
df.eval('a {0} b'.format(op), engine=engine, parser=parser)
| bsd-3-clause |
viekie/tensorflow-tutorial | chap03/auto_encode.py | 1 | 2728 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# Power by viekie2017-08-04 08:03:26
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from tensorflow.examples.tutorials.mnist import input_data
from six.moves import xrange
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
LEARNING_RATE = 0.01
EPOCHES = 10
BATCH_SIZE = 256
DISPLAY_STEP = 1
EXAMPLES_TO_SHOW = 10
N_INPUT = 784
x = tf.placeholder(tf.float32, [None, N_INPUT])
N_HIDDEN_1 = 256
N_HIDDEN_2 = 128
weights = {
'encoder_w1': tf.Variable(tf.random_normal([N_INPUT, N_HIDDEN_1])),
'encoder_w2': tf.Variable(tf.random_normal([N_HIDDEN_1, N_HIDDEN_2])),
'decoder_w1': tf.Variable(tf.random_normal([N_HIDDEN_2, N_HIDDEN_1])),
'decoder_w2': tf.Variable(tf.random_normal([N_HIDDEN_1, N_INPUT]))
}
bias = {
'encoder_b1': tf.Variable(tf.random_normal([N_HIDDEN_1])),
'encoder_b2': tf.Variable(tf.random_normal([N_HIDDEN_2])),
'decoder_b1': tf.Variable(tf.random_normal([N_HIDDEN_1])),
'decoder_b2': tf.Variable(tf.random_normal([N_INPUT]))
}
def encoder(x):
layer1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_w1']),
bias['encoder_b1']))
layer2 = tf.nn.sigmoid(tf.add(tf.matmul(layer1, weights['encoder_w2']),
bias['encoder_b2']))
return layer2
def decoder(x):
layer1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_w1']),
bias['decoder_b1']))
layer2 = tf.nn.sigmoid(tf.add(tf.matmul(layer1, weights['decoder_w2']),
bias['decoder_b2']))
return layer2
encoder_op = encoder(x)
decoder_op = decoder(encoder_op)
y_pred = decoder_op
y_true = x
cost = tf.reduce_mean(tf.pow(y_pred-y_true, 2))
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
trainer = optimizer.minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
batchs = int(mnist.train.num_examples/BATCH_SIZE)
for epoch in xrange(EPOCHES):
for i in xrange(batchs):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
c = sess.run(trainer, feed_dict={x: xs})
if epoch % DISPLAY_STEP == 0:
print('epoch: %d, cost:%f'.format(epoch+1, c))
encoder_decoder = \
sess.run(y_pred, feed_dict={x: mnist.test.images[:EXAMPLES_TO_SHOW]})
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(EXAMPLES_TO_SHOW):
a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
a[1][i].imshow(np.reshape(encoder_decoder[i], (28, 28)))
plt.show()
| apache-2.0 |
gomesfelipe/BDA_py_demos | demos_ch5/demo5_1.py | 19 | 5055 | """Bayesian Data Analysis, 3rd ed
Chapter 5, demo 1
Hierarchical model for Rats experiment (BDA3, p. 102).
"""
from __future__ import division
import numpy as np
from scipy.stats import beta
from scipy.special import gammaln
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=(plt.rcParams['lines.color'],)) # Disable color cycle
# rat data (BDA3, p. 102)
y = np.array([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 5, 2,
5, 3, 2, 7, 7, 3, 3, 2, 9, 10, 4, 4, 4, 4, 4, 4, 4,
10, 4, 4, 4, 5, 11, 12, 5, 5, 6, 5, 6, 6, 6, 6, 16, 15,
15, 9, 4
])
n = np.array([
20, 20, 20, 20, 20, 20, 20, 19, 19, 19, 19, 18, 18, 17, 20, 20, 20,
20, 19, 19, 18, 18, 25, 24, 23, 20, 20, 20, 20, 20, 20, 10, 49, 19,
46, 27, 17, 49, 47, 20, 20, 13, 48, 50, 20, 20, 20, 20, 20, 20, 20,
48, 19, 19, 19, 22, 46, 49, 20, 20, 23, 19, 22, 20, 20, 20, 52, 46,
47, 24, 14
])
M = len(y)
# plot the separate and pooled models
plt.figure(figsize=(8,10))
x = np.linspace(0, 1, 250)
# separate
plt.subplot(2, 1, 1)
lines = plt.plot(x, beta.pdf(x[:,None], y[:-1] + 1, n[:-1] - y[:-1] + 1),
linewidth=1)
# highlight the last line
line1, = plt.plot(x, beta.pdf(x, y[-1] + 1, n[-1] - y[-1] + 1), 'r')
plt.legend((lines[0], line1),
(r'Posterior of $\theta_j$', r'Posterior of $\theta_{71}$'))
plt.yticks(())
plt.title('separate model')
# pooled
plt.subplot(2, 1, 2)
plt.plot(x, beta.pdf(x, y.sum() + 1, n.sum() - y.sum() + 1),
linewidth=2, label=(r'Posterior of common $\theta$'))
plt.legend()
plt.yticks(())
plt.xlabel(r'$\theta$', fontsize=20)
plt.title('pooled model')
# compute the marginal posterior of alpha and beta in the hierarchical model in a grid
A = np.linspace(0.5, 6, 100)
B = np.linspace(3, 33, 100)
# calculated in logarithms for numerical accuracy
lp = (
- 5/2 * np.log(A + B[:,None])
+ np.sum(
gammaln(A + B[:,None])
- gammaln(A)
- gammaln(B[:,None])
+ gammaln(A + y[:,None,None])
+ gammaln(B[:,None] + (n - y)[:,None,None])
- gammaln(A + B[:,None] + n[:,None,None]),
axis=0
)
)
# subtract the maximum value to avoid over/underflow in exponentation
lp -= lp.max()
p = np.exp(lp)
# plot the marginal posterior
fig = plt.figure()
plt.imshow(p, origin='lower', aspect='auto', extent=(A[0], A[-1], B[0], B[-1]))
plt.xlabel(r'$\alpha$', fontsize=20)
plt.ylabel(r'$\beta$', fontsize=20)
plt.title('The marginal posterior of alpha and beta in hierarchical model')
# sample from the posterior grid of alpha and beta
nsamp = 1000
samp_indices = np.unravel_index(
np.random.choice(p.size, size=nsamp, p=p.ravel()/p.sum()),
p.shape
)
samp_A = A[samp_indices[1]]
samp_B = B[samp_indices[0]]
# add random jitter, see BDA3 p. 76
samp_A += (np.random.rand(nsamp) - 0.5) * (A[1]-A[0])
samp_B += (np.random.rand(nsamp) - 0.5) * (B[1]-B[0])
# Plot samples from the distribution of distributions Beta(alpha,beta),
# that is, plot Beta(alpha,beta) using the posterior samples of alpha and beta
fig = plt.figure(figsize=(8,10))
plt.subplot(2, 1, 1)
plt.plot(x, beta.pdf(x[:,None], samp_A[:20], samp_B[:20]), linewidth=1)
plt.yticks(())
plt.title(r'Posterior samples from the distribution of distributions '
r'Beta($\alpha$,$\beta$)')
# The average of above distributions, is the predictive distribution for a new
# theta, and also the prior distribution for theta_j.
# Plot this.
plt.subplot(2, 1, 2)
plt.plot(x, np.mean(beta.pdf(x, samp_A[:,None], samp_B[:,None]), axis=0))
plt.yticks(())
plt.xlabel(r'$\theta$', fontsize=20)
plt.title(r'Predictive distribution for a new $\theta$ '
r'and prior for $\theta_j$')
# And finally compare the separate model and hierarchical model
plt.figure(figsize=(8,10))
x = np.linspace(0, 1, 250)
# first plot the separate model (same as above)
plt.subplot(2, 1, 1)
# note that for clarity only every 7th distribution is plotted
plt.plot(x, beta.pdf(x[:,None], y[7:-1:7] + 1, n[7:-1:7] - y[7:-1:7] + 1),
linewidth=1)
# highlight the last line
plt.plot(x, beta.pdf(x, y[-1] + 1, n[-1] - y[-1] + 1), 'r')
plt.yticks(())
plt.title('separate model')
# And the hierarchical model. Note that these marginal posteriors for theta_j are
# more narrow than in separate model case, due to borrowed information from
# the other theta_j's.
plt.subplot(2, 1, 2)
# note that for clarity only every 7th distribution is plotted
lines = plt.plot(
x,
np.mean(
beta.pdf(
x[:,None],
y[7::7] + samp_A[:,None,None],
n[7::7] - y[7::7] + samp_B[:,None,None]
),
axis=0
),
linewidth=1,
)
# highlight the last line
lines[-1].set_linewidth(2)
lines[-1].set_color('r')
plt.yticks(())
plt.xlabel(r'$\theta$', fontsize=20)
plt.title('hierarchical model')
plt.show()
| gpl-3.0 |
oscarcbr/cellery | cellery/randomz.py | 1 | 115214 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# randomz.py part of cellery (ceRNAs linking inference)
#
# Copyright 2016 Oscar Bedoya Reina <obedoya@igmm-linux-005>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
########################################################
#~ Import external libraries
########################################################
#----------------------------
#Import matplotlib libraries
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#----------------------------
#Import other libraries
from cellery import exceptions,statistics
from numpy import array,float32,divide,inf,ma,multiply,nan,random, \
vectorize,zeros
from random import seed
from scipy import integrate
from scipy.stats import mstats,binom,norm
import numpy as np
########################################################
#~ Compute randomization on only columns
########################################################
def cmptClmRndmztn(aBckgrndMetrcMskd,aBckgrndPosRows, \
aBckgrndClmnPosProb,aTrgtMetrcMskd=None,statstcTrgt=False, \
stdTrgt=False,lenTrgtRows=False,lenTrgtClms=False,outPltFl=False, \
numRndmztns=1000,maxRndmztns=100,seedAdd=False,statstc='mean', \
vrbse=True,aPosRowsToGns=None,aPosClmnsToGns=None, \
aBckgrndMirnMetrcMskd=None,aTrgtMirnStatstcMskd=None,mirnDtype='cnt', \
outMirnStstFl=None,aMirnNms=None):
"""
Input: aBckgrndMetrcMskd is a masked array with background metric
values to be randomly sampled. aBckgrndPosRows is the position of
the rows in the background. aBckgrndClmnPosProb is the probability
of all columns to be selected from the background. Optionally,
aTrgtMetrcMskd is a masked array with metric values of interest for
the target. statstcTrgt is the statistic value of the target whose
probability is going to be calculated from the randomized background
using a z-score approach. stdTrgt is the standard deviation of the
target data. lenTrgtRows is the number of rows to be sampled.
lenTrgtClms is the number of columns to be sampled. outPltFl is a
file to plot the randomization and significance of target statistic.
numRndmztns is the number of randomizations to run in the background.
maxRndmztns is the maximum number of iterations to enforce normality.
seedAdd is the seed to run the randomizations. statstc is the
statistic to sample from each randomization to build the normal
distribution to test the significance of the target statistic. If
vrbse all log messages are going to be printed. aPosClmnsToGns is an
array of position of gene to which each column in aBckgrndMetrcMskd
and aPosProbClmns is mapped. aPosRowsToGns is an array of position
of gene to which each row in aBckgrndMetrcMskd and aPosProbRows is
mapped. If aPosRowsToGns and aPosClmnsToGns arenot None calculations
are going to be run by gene. aBckgrndMirnMetrcMskd is a masked array
(3D supermatrix) with an array of metric values for each column and
row to be summarized using the value statstc. aBckgrndMirnMetrcMskd
is an array with the miRNA metrics summarized for the background.
aTrgtMirnStatstcMskd is an array with the miRNA metrics for the
target dataset. mirnDtype is the datatype of the miRNA metric:
{'cnt' for counts and 'scr' for scores}. outMirnStstFl is the file
to save the results of the miRNA probability calculation. aMirnNms
is an array of miRNA names. fnctn is the statistical function to
calculate the probability {'sf' is greater or equal than}.
Output: ovrAllogRun is the log message of the randomization runs.
p_val is the one-side probability (following fnctn input) that the
target statistic belongs to the normal distributed statistic
obtained from the randomized background.
NOTE: Only columns are going to be radomized.
NOTE: Positions in aBckgrndPosRows are going to be obtained in the
same order for every randomization.
"""
#----------------------------
#Test for inputs
ovrAllogRun = []#holder for log message
fnctn='sf'#Greater or equal, as normal distributed 1-p is less than.
assert aTrgtMetrcMskd is not None or (statstcTrgt and stdTrgt and \
lenTrgtClms)
assert not lenTrgtRows#no length target for rows
#mirna inputs
if aBckgrndMirnMetrcMskd is not None or aTrgtMirnStatstcMskd is not \
None or outMirnStstFl is not None or aMirnNms is not None:
assert aBckgrndMirnMetrcMskd is not None and \
aTrgtMirnStatstcMskd is not None and outMirnStstFl is not None \
and aMirnNms is not None
assert mirnDtype in {'cnt','scr'}
try:
raise exceptions.CelleryWarningObjct \
('Randomizations are going to be run for','miRNAs')
except exceptions.CelleryWarningObjct as mssge:
if vrbse:
print mssge
ovrAllogRun.append(mssge)
pass
#gene inputs
if aPosClmnsToGns is not None and aPosRowsToGns is not None:
mthdBckgrndRndmztn = rnClmRndmztnByGns
try:
raise exceptions.CelleryWarningObjct \
('Randomizations are going to be run at the level of','gene')
except exceptions.CelleryWarningObjct as mssge:
if vrbse:
print mssge
ovrAllogRun.append(mssge)
pass
if aTrgtMetrcMskd is not None:
#calculate stat for the target metrics by gene
vctrzdRtrnAvrgMskdArray=vectorize(rtrnAvrgMskdArray, \
excluded={0})
aTrgtMetrcMskdGns = vctrzdRtrnAvrgMskdArray(aTrgtMetrcMskd, \
aPosRowsToGns,aPosClmnsToGns)
aTrgtMetrcMskdGns = ma.masked_invalid(aTrgtMetrcMskdGns)
statstcTrgt = getattr(ma,statstc)(aTrgtMetrcMskdGns)
stdTrgt = ma.std(aTrgtMetrcMskdGns)
lenTrgtClms = aTrgtMetrcMskd.shape[1]
else:
assert statstcTrgt and stdTrgt and lenTrgtClms
if aTrgtMirnStatstcMskd is not None:
rtrnRndmBckgrndMirnMetrcPrGn = \
vectorize(rtrnStstMskdMirnArray,excluded={0})
aTrgtMirnStatstcMskd = \
rtrnRndmBckgrndMirnMetrcPrGn(aTrgtMirnStatstcMskd, \
aPosRowsToGns,aPosClmnsToGns,numMrns)
else:
mthdBckgrndRndmztn = rnClmRndmztn#only randomize columns
if statstcTrgt or stdTrgt or lenTrgtClms:
assert statstcTrgt and stdTrgt and lenTrgtClms
else:
statstcTrgt = getattr(ma,statstc)(ma.masked_invalid \
(aTrgtMetrcMskd))
stdTrgt = ma.std(ma.masked_invalid(aTrgtMetrcMskd))
lenTrgtClms = aTrgtMetrcMskd.shape[1]
#----------------------------
#Report variables to use
if seedAdd:
seedMssg = '\tSeed: %s'%seedAdd
else:
seedMssg = ''
messg= '\n'.join([
'-----------------------------------------------------------', \
'A randomization of columns is going to be conducted with parameters:', \
'\tTarget %s: %s, and standard error: %s'%(statstc,statstcTrgt, \
stdTrgt), \
'\t%s columns are going to be sampled in %s randomizations' \
%(lenTrgtClms,numRndmztns), \
"\tand significance of target's %s is going to be determined."% \
statstc, \
seedMssg, \
'-----------------------------------------------------------'])
ovrAllogRun.append(messg)
if vrbse:
print messg
#----------------------------
#Run randomizations
p_val,zscore,meanBckgrnd,stdBckgrnd,logRun = rndmzCore \
(aBckgrndMetrcMskd,mthdBckgrndRndmztn,statstcTrgt,stdTrgt,outPltFl, \
aBckgrndClmnPosProb,aBckgrndPosRows,lenTrgtClms,lenTrgtRows, \
seedAdd,numRndmztns,maxRndmztns,statstc,fnctn,vrbse,aPosClmnsToGns, \
aPosRowsToGns,aBckgrndMirnMetrcMskd,aTrgtMirnStatstcMskd,mirnDtype, \
outMirnStstFl,aMirnNms)
#----------------------------
#Set log out
if p_val<=0.05:
signfcn = 'in the top 5% of the values in the randomized'
elif p_val<=0.01:
signfcn = 'in the top 1% of the values in the randomized'
elif p_val>=0.95:
signfcn = 'in the bottom 5% of the values in the randomized'
p_val = 1-p_val#p-val in the bottom
elif p_val>=0.99:
p_val = 1-p_val#p-val in the bottom
signfcn = 'in the bottom 1% of the values in the randomized'
else:
signfcn = 'to be comparable with the values in the randomized'
ovrAllogRun.append(logRun)
messg = '\n'.join([
'-----------------------------------------------------------', \
'\t%s randomizations finished!'%numRndmztns, \
"\tTarget's %s was found %s background"%(statstc,signfcn), \
"\twith z-score of: %s, and p-value %s."%(zscore,p_val), \
"\tBackground had normal distribution with mean: %s and std. %s" \
%(meanBckgrnd,stdBckgrnd)])
ovrAllogRun.append(messg)
if vrbse:
print messg
#----------------------------
#Return log message and p-values
return ovrAllogRun,p_val
########################################################
#~ Compute full randomization on an array
########################################################
def cmptFullRndmztn(aBckgrndMetrcMskd,aBckgrndRowsPosProb, \
aBckgrndClmnPosProb,aTrgtMetrcMskd,statstcTrgt,stdTrgt,lenTrgtRows, \
lenTrgtClms,outPltFl,numRndmztns,maxRndmztns,seedAdd,statstc,vrbse, \
aPosRowsToGns=None,aPosClmnsToGns=None,aBckgrndMirnMetrcMskd=None, \
aTrgtMirnStatstcMskd=None,mirnDtype='cnt',outMirnStstFl=None, \
aMirnNms=None):
"""
Input: aBckgrndMetrcMskd is a masked array with background metric
values to be randomly sampled. aBckgrndRowsPosProb is the
probability of all rows in the background. aBckgrndClmnPosProb is
the position probability of all columns in the background.
Optionally aTrgtMetrcMskd is a masked array with metric values of
interest for the target. statstcTrgt is the statistic value of the
target whose probability is going to be calculated from the
randomized background using a z-score approach. stdTrgt is the
standard deviation of the target data. lenTrgtRows is the number of
rows to be sampled. lenTrgtClms is the number of columns to be
sampled. outPltFl is a file to plot the randomization and
significance of target statistic. numRndmztns is the number of
randomizations to run in the background. maxRndmztns is the maximum
number of iterations to enforce normality. seedAdd is the seed
to run the randomizations. statstc is the statistic to sample from
each randomization to build the normal distribution to test the
significance of the target statistic. If vrbse all log messages are
going to be printed. aPosClmnsToGns is an array of position of gene
to which each column in aBckgrndMetrcMskd and aPosProbClmns is
mapped. aPosRowsToGns is an array of position of gene to which each
row in aBckgrndMetrcMskd and aPosProbRows is mapped. If
aPosRowsToGns and aPosClmnsToGns are not None calculations are going
to be run by gene. aBckgrndMirnMetrcMskd is a masked array (3D
supermatrix) with an array of metric values for each column and row
to be summarized using the value statstc. aBckgrndMirnMetrcMskd is
an array with the miRNA metrics summarized for the background.
aTrgtMirnStatstcMskd is an array with the miRNA metrics for the
target dataset. mirnDtype is the datatype of the miRNA metric:
{'cnt' for counts and 'scr' for scores}. outMirnStstFl is the file
to save the results of the miRNA probability calculation. aMirnNms
is an array of miRNA names.
Output: ovrAllogRun is the log message of the randomization runs.
p_val is the one-side probability (following fnctn input) that the
target statistic belongs to the normal distributed statistic
obtained from the randomized background.
NOTE: Both, rows and columns are going to be radomized.
"""
#----------------------------
#Test for inputs
ovrAllogRun = []#holder for log message
fnctn='sf'#Greater or equal, as normal distributed 1-p is less than.
assert aTrgtMetrcMskd is not None or (statstcTrgt and stdTrgt and \
lenTrgtRows and lenTrgtClms)
#mirna inputs
if aBckgrndMirnMetrcMskd is not None or aTrgtMirnStatstcMskd is not \
None or outMirnStstFl is not None or aMirnNms is not None:
assert aBckgrndMirnMetrcMskd is not None and \
aTrgtMirnStatstcMskd is not None and outMirnStstFl is not None \
and aMirnNms is not None
assert mirnDtype in {'cnt','scr'}
try:
raise exceptions.CelleryWarningObjct \
('Randomizations are going to be run for','miRNAs')
except exceptions.CelleryWarningObjct as mssge:
if vrbse:
print mssge
ovrAllogRun.append(mssge)
pass
#gene inputs
if aPosClmnsToGns is not None and aPosRowsToGns is not None:
mthdBckgrndRndmztn = rnClmNRowRndmztnByGns
try:
raise exceptions.CelleryWarningObjct \
('Randomizations are going to be run at the level of','gene')
except exceptions.CelleryWarningObjct as mssge:
if vrbse:
print mssge
ovrAllogRun.append(mssge)
pass
if aTrgtMetrcMskd is not None:
#calculate stat for the target metrics by gene
vctrzdRtrnAvrgMskdArray = vectorize(rtrnAvrgMskdArray, \
excluded={0})
aTrgtMetrcMskdGns = vctrzdRtrnAvrgMskdArray(aTrgtMetrcMskd, \
aPosRowsToGns,aPosClmnsToGns)
aTrgtMetrcMskdGns = ma.masked_invalid(aTrgtMetrcMskdGns)
statstcTrgt = getattr(ma,statstc)(aTrgtMetrcMskdGns)
stdTrgt = ma.std(aTrgtMetrcMskdGns)
lenTrgtRows,lenTrgtClms = aTrgtMetrcMskd.shape
else:
assert statstcTrgt and stdTrgt and lenTrgtRows and \
lenTrgtClms
if aTrgtMirnStatstcMskd is not None:
rtrnRndmBckgrndMirnMetrcPrGn = \
vectorize(rtrnStstMskdMirnArray,excluded={0})
aTrgtMirnStatstcMskd = \
rtrnRndmBckgrndMirnMetrcPrGn(aTrgtMirnStatstcMskd, \
aPosRowsToGns,aPosClmnsToGns,numMrns)
else:
mthdBckgrndRndmztn = rnClmNRowRndmztn#full randomz. method.
if statstcTrgt or stdTrgt or lenTrgtRows or lenTrgtClms:
assert statstcTrgt and stdTrgt and lenTrgtRows and \
lenTrgtClms
else:
statstcTrgt = getattr(ma,statstc)(ma.masked_invalid \
(aTrgtMetrcMskd))
stdTrgt = ma.std(ma.masked_invalid(aTrgtMetrcMskd))
lenTrgtRows,lenTrgtClms = aTrgtMetrcMskd.shape
#----------------------------
#Report variables to use
if seedAdd:
seedMssg = '\tSeed: %s'%seedAdd
else:
seedMssg = ''
messg= '\n'.join([
'-----------------------------------------------------------', \
'A full randomization is going to be conducted with parameters:', \
'\tTarget %s: %s, and standard error: %s'%(statstc,statstcTrgt, \
stdTrgt), \
'\t%s columns and %s rows are going to be sampled in %s randomizations' \
%(lenTrgtClms,lenTrgtRows,numRndmztns), \
"\tand significance of target's %s is going to be determined."% \
statstc, \
seedMssg, \
'-----------------------------------------------------------'])
ovrAllogRun.append(messg)
if vrbse:
print messg
#----------------------------
#Run randomizations
p_val,zscore,meanBckgrnd,stdBckgrnd,logRun = rndmzCore \
(aBckgrndMetrcMskd,mthdBckgrndRndmztn,statstcTrgt,stdTrgt,outPltFl, \
aBckgrndClmnPosProb,aBckgrndRowsPosProb,lenTrgtClms,lenTrgtRows, \
seedAdd,numRndmztns,maxRndmztns,statstc,fnctn,vrbse,aPosClmnsToGns, \
aPosRowsToGns,aBckgrndMirnMetrcMskd,aTrgtMirnStatstcMskd,mirnDtype, \
outMirnStstFl,aMirnNms)
#----------------------------
#Set log out
if p_val<=0.05:
signfcn = 'in the top 5% of the values in the randomized'
elif p_val<=0.01:
signfcn = 'in the top 1% of the values in the randomized'
elif p_val>=0.95:
signfcn = 'in the bottom 5% of the values in the randomized'
p_val = 1-p_val#p-val in the bottom
elif p_val>=0.99:
p_val = 1-p_val#p-val in the bottom
signfcn = 'in the bottom 1% of the values in the randomized'
else:
signfcn = 'to be comparable with the values in the randomized'
ovrAllogRun.append(logRun)
messg = '\n'.join([
'-----------------------------------------------------------', \
'\t%s randomizations finished!'%numRndmztns, \
"\tTarget's %s was found %s background"%(statstc,signfcn), \
"\twith z-score of: %s, and p-value %s."%(zscore,p_val), \
"\tBackground had normal distribution with mean: %s and std. %s" \
%(meanBckgrnd,stdBckgrnd)])
ovrAllogRun.append(messg)
if vrbse:
print messg
#----------------------------
#Return log message and p-values
return ovrAllogRun,p_val
########################################################
#~ Compute randomization on only rows
########################################################
def cmptRowRndmztn(aBckgrndMetrcMskd,aBckgrndRowsPosProb, \
aBckgrndPosClmns,aTrgtMetrcMskd=None,statstcTrgt=False,stdTrgt=False, \
lenTrgtRows=False,lenTrgtClms=False,outPltFl=False, \
numRndmztns=1000,maxRndmztns=100,seedAdd=False,statstc='mean', \
vrbse=True,aPosRowsToGns=None,aPosClmnsToGns=None, \
aBckgrndMirnMetrcMskd=None,aTrgtMirnStatstcMskd=None,mirnDtype='cnt', \
outMirnStstFl=None,aMirnNms=None):
"""
Input: aBckgrndMetrcMskd is a masked array with background metric
values to be randomly sampled. aBckgrndRowsPosProb is the
probability of all rows in the background. aBckgrndPosClmns is the
position of the columns to be selected from the background.
Optionally, aTrgtMetrcMskd is a masked array with metric values of
interest for the target. statstcTrgt is the statistic value of the
target whose probability is going to be calculated from the
randomized background using a z-score approach. stdTrgt is the
standard deviation of the target data. lenTrgtRows is the number of
rows to be sampled. lenTrgtClms is the number of columns to be
sampled. outPltFl is a file to plot the randomization and
significance of target statistic. numRndmztns is the number of
randomizations to run in the background. maxRndmztns is the maximum
number of iterations to enforce normality. seedAdd is the seed
to run the randomizations. statstc is the statistic to sample from
each randomization to build the normal distribution to test the
significance of the target statistic. If vrbse all log messages are
going to be printed. aPosClmnsToGns is an array of position of gene
to which each column in aBckgrndMetrcMskd and aPosProbClmns is
mapped. aPosRowsToGns is an array of position of gene to which each
row in aBckgrndMetrcMskd and aPosProbRows is mapped. If
aPosRowsToGns and aPosClmnsToGns are not None calculations are going
to be run by gene. aBckgrndMirnMetrcMskd is a masked array (3D
supermatrix) with an array of metric values for each column and row
to be summarized using the value statstc. aBckgrndMirnMetrcMskd is
an array with the miRNA metrics summarized for the background.
aTrgtMirnStatstcMskd is an array with the miRNA metrics for the
target dataset. mirnDtype is the datatype of the miRNA metric:
{'cnt' for counts and 'scr' for scores}. outMirnStstFl is the file
to save the results of the miRNA probability calculation. aMirnNms
is an array of miRNA names.
Output: ovrAllogRun is the log message of the randomization runs.
p_val is the one-side probability (following fnctn input) that the
target statistic belongs to the normal distributed statistic
obtained from the randomized background.
NOTE: Only rows are going to be radomized.
NOTE: Positions in aBckgrndPosClmns are going to be obtained in the
same order for every randomization.
"""
#----------------------------
#Test for inputs
ovrAllogRun = []#holder for log message
fnctn='sf'#Greater or equal, as normal distributed 1-p is less than.
assert aTrgtMetrcMskd is not None or (statstcTrgt and stdTrgt and \
lenTrgtRows)
assert not lenTrgtClms#no length target for rows
#mirna inputs
if aBckgrndMirnMetrcMskd is not None or aTrgtMirnStatstcMskd is not \
None or outMirnStstFl is not None or aMirnNms is not None:
assert aBckgrndMirnMetrcMskd is not None and \
aTrgtMirnStatstcMskd is not None and outMirnStstFl is not None \
and aMirnNms is not None
assert mirnDtype in {'cnt','scr'}
try:
raise exceptions.CelleryWarningObjct \
('Randomizations are going to be run for','miRNAs')
except exceptions.CelleryWarningObjct as mssge:
if vrbse:
print mssge
ovrAllogRun.append(mssge)
pass
#gene inputs
if aPosClmnsToGns is not None and aPosRowsToGns is not None:
mthdBckgrndRndmztn = rnRowRndmztnByGns
try:
raise exceptions.CelleryWarningObjct \
('Randomizations are going to be run at the level of','gene')
except exceptions.CelleryWarningObjct as mssge:
if vrbse:
print mssge
ovrAllogRun.append(mssge)
pass
if aTrgtMetrcMskd is not None:
#calculate stat for the target metrics by gene
vctrzdRtrnAvrgMskdArray=vectorize(rtrnAvrgMskdArray, \
excluded={0})
aTrgtMetrcMskdGns = vctrzdRtrnAvrgMskdArray(aTrgtMetrcMskd, \
aPosRowsToGns,aPosClmnsToGns)
aTrgtMetrcMskdGns = ma.masked_invalid(aTrgtMetrcMskdGns)
statstcTrgt = getattr(ma,statstc)(aTrgtMetrcMskdGns)
stdTrgt = ma.std(aTrgtMetrcMskdGns)
lenTrgtRows = aTrgtMetrcMskd.shape[0]
else:
assert statstcTrgt and stdTrgt and lenTrgtRows
if aTrgtMirnStatstcMskd is not None:
rtrnRndmBckgrndMirnMetrcPrGn = \
vectorize(rtrnStstMskdMirnArray,excluded={0})
aTrgtMirnStatstcMskd = \
rtrnRndmBckgrndMirnMetrcPrGn(aTrgtMirnStatstcMskd, \
aPosRowsToGns,aPosClmnsToGns,numMrns)
else:
mthdBckgrndRndmztn = rnRowRndmztn#only randomize rows
if statstcTrgt or stdTrgt or lenTrgtRows:
assert statstcTrgt and stdTrgt and lenTrgtRows
else:
statstcTrgt = getattr(ma,statstc)(ma.masked_invalid \
(aTrgtMetrcMskd))
stdTrgt = ma.std(ma.masked_invalid(aTrgtMetrcMskd))
lenTrgtRows = aTrgtMetrcMskd.shape[0]
#----------------------------
#Report variables to use
if seedAdd:
seedMssg = '\tSeed: %s'%seedAdd
else:
seedMssg = ''
messg= '\n'.join([
'-----------------------------------------------------------', \
'A randomization of rows is going to be conducted with parameters:', \
'\tTarget %s: %s, and standard error: %s'%(statstc,statstcTrgt, \
stdTrgt), \
'\t%s rows are going to be sampled in %s randomizations' \
%(lenTrgtRows,numRndmztns), \
"\tand significance of target's %s is going to be determined."% \
statstc, \
seedMssg, \
'-----------------------------------------------------------'])
ovrAllogRun.append(messg)
if vrbse:
print messg
#----------------------------
#Run randomizations
p_val,zscore,meanBckgrnd,stdBckgrnd,logRun = rndmzCore \
(aBckgrndMetrcMskd,mthdBckgrndRndmztn,statstcTrgt,stdTrgt,outPltFl, \
aBckgrndPosClmns,aBckgrndRowsPosProb,lenTrgtClms,lenTrgtRows, \
seedAdd,numRndmztns,maxRndmztns,statstc,fnctn,vrbse,aPosClmnsToGns, \
aPosRowsToGns,aBckgrndMirnMetrcMskd,aTrgtMirnStatstcMskd,mirnDtype, \
outMirnStstFl,aMirnNms)
#----------------------------
#Set log out
if p_val<=0.05:
signfcn = 'in the top 5% of the values in the randomized'
elif p_val<=0.01:
signfcn = 'in the top 1% of the values in the randomized'
elif p_val>=0.95:
signfcn = 'in the bottom 5% of the values in the randomized'
p_val = 1-p_val#p-val in the bottom
elif p_val>=0.99:
p_val = 1-p_val#p-val in the bottom
signfcn = 'in the bottom 1% of the values in the randomized'
else:
signfcn = 'to be comparable with the values in the randomized'
ovrAllogRun.append(logRun)
messg = '\n'.join([
'-----------------------------------------------------------', \
'\t%s randomizations finished!'%numRndmztns, \
"\tTarget's %s was found %s background"%(statstc,signfcn), \
"\twith z-score of: %s, and p-value %s."%(zscore,p_val), \
"\tBackground had normal distribution with mean: %s and std. %s" \
%(meanBckgrnd,stdBckgrnd)])
ovrAllogRun.append(messg)
if vrbse:
print messg
#----------------------------
#Return log message and p-values
return ovrAllogRun,p_val
########################################################
#~ Calculate a statistic on miRNA metrics for a pair of genes/lncrnas.
########################################################
def cmptMirnaStatRowClmnPrs(aRowsMirnMetrc,aClmnsMirnMetrc, \
aRowsClmnsMsk=None,mirnPrsStstc='add',dtype='cnt'):
"""
Input: aRowsMirnMetrc is an array of miRNA metric values arrays for
each row. aClmnsMirnMetrc is an array of miRNA metric values arrays
for each column. Optionally, aRowsClmnsMsk is an array of size
len(aRowsMirnMetrc) x len(aClmnsMirnMetrc)to mask the output matrix.
mirnPrsStstc is the statistic to calculate values for miRNAs. dtype
is the datatype of the miRNA metric: {'cnt' for counts and 'scr'
for scores}.
Output: aMirnMetrcMskd is an output array with stats for the metrics
of shared miRNAs for each masked row-column pair.
NOTE: row-column pairs masked in aRowsClmnsMsk are retrieved but
they are masked.
NOTE: non-shared miRNAs have values of nan.
"""
#----------------------------
#Test input variables
lenRows,lenMirnsRows = aRowsMirnMetrc.shape
lenClmns,lenMirnsClmns = aClmnsMirnMetrc.shape
assert lenMirnsRows==lenMirnsClmns
assert dtype in {'cnt','scr'}
if aRowsClmnsMsk is not None:
lenRowsMsk,lenClmnsMsk,lenMirnsMsk = aRowsClmnsMsk.shape
assert lenRows==lenRowsMsk and lenClmnsMsk==lenClmns and \
lenMirnsMsk==lenMirnsRows
else:#make an empty mask
aRowsClmnsMsk = zeros((lenRows,lenClmns,lenMirnsRows))
if dtype=='scr':
aRowsMirnMetrcMskd = ma.masked_invalid(aRowsMirnMetrc)
aClmnsMirnMetrcMskd = ma.masked_invalid(aClmnsMirnMetrc)
else:
aRowsMirnMetrcMskd = ma.masked_less(aRowsMirnMetrc,1)
aClmnsMirnMetrcMskd = ma.masked_less(aClmnsMirnMetrc,1)
#----------------------------
#Calculate metric for each miRNA in each row-column pair
aMirnMetrc = ma.zeros((lenRows,lenClmns,lenMirnsRows),dtype=float32)
for rowPos in xrange(lenRows):
for clmnPos in xrange(lenClmns):
aMirnMetrc[rowPos][clmnPos] = getattr(ma,mirnPrsStstc) \
(aRowsMirnMetrcMskd[rowPos],aClmnsMirnMetrcMskd[clmnPos])
aMirnMetrc.fill_value = nan
#----------------------------
#Mask the output stats array
aMirnMetrcMskd = ma.array(aMirnMetrc.filled(),mask=aRowsClmnsMsk)
return aMirnMetrcMskd
########################################################
#~ Plot data distribution given a background
########################################################
def mkPlt(statstcTrgt,aBcrkngdStatstc,outPltFl):
"""
Input: statstcTrgt is the target statistic value. aBcrkngdStatstc is
an array of statistic values for the background sampling. outPltFl
is the output plot file.
Output: outPltFl is the output plot file with the distribution of
the background indicating the position of the target.
"""
fig = plt.figure()
xVls,yVls,info = plt.hist(aBcrkngdStatstc,100,normed=True)
plt.xlabel('Mean ECP value')
plt.ylabel('Number of replicates')
plt.annotate('Target (%s)'%statstcTrgt, xy=(statstcTrgt, max(xVls)), \
xytext = (statstcTrgt, max(xVls)+10),arrowprops=dict(facecolor='red', \
shrink = 0.05, width=2))
plt.gcf().subplots_adjust(bottom=0.15)
plt.savefig(outPltFl,bbox_inches='tight',format='eps')
plt.close()
return 0
########################################################
#~ Process miRNA randomization results
########################################################
def procMirnRndmztn(aBcrkngdMirnStatstc,aTrgtMirnStatstcMskd,mirnDtype, \
outMirnStstFl,aMirnNms,fnctn='sf'):
"""
Input: aBcrkngdMirnStatstc is an array with the miRNA metrics
summarized for the background. aTrgtMirnStatstcMskd is an array with
the miRNA metrics for the target dataset. mirnDtype is the datatype
of the miRNA metric: {'cnt' for counts and 'scr' for scores}.
outMirnStstFl is the file to save the results of the miRNA
probability calculation. aMirnNms is an array of miRNA names. fnctn
is the statistical function to calculate the probability {'sf' is
greater or equal than}.
Output: mssge is the log text of the probability calculation
results.
NOTE: Results are going to be written in the outMirnStstFl file.
NOTE: aBcrkngdMirnStatstc is an unmasked array and
aTrgtMirnStatstcMskd is masked.
"""
#----------------------------
#Calculate statistic for target dataset and set initial parameters
aTrgtMirnStatstcMskdFlld = rtrnMirnStat(aTrgtMirnStatstcMskd)
aTrgtMirnMskdAvrg = rtrnMirnStat(aTrgtMirnStatstcMskd,'mean')
aTrgtMirnMskdStd = rtrnMirnStat(aTrgtMirnStatstcMskd,'std')
aBcrkngdMirnStatstcMskd = ma.masked_invalid(aBcrkngdMirnStatstc)
aBcrkngdMirnStatstcMskdAvrg = rtrnMirnStat(aBcrkngdMirnStatstcMskd, \
'mean')
aBcrkngdMirnStatstcMskdStd = rtrnMirnStat(aBcrkngdMirnStatstcMskd, \
'std')
nReps,nMirns = aBcrkngdMirnStatstcMskd.shape
assert nMirns==len(aTrgtMirnStatstcMskdFlld)==len(aMirnNms)
#----------------------------
#Calculate significance of miRNA scores/counts using z-score
aMirPval = zeros(nMirns,dtype=float32)
aMirZscore = zeros(nMirns,dtype=float32)
aMirPval.fill(nan)
aMirZscore.fill(nan)
for mirnPos in xrange(nMirns):
mirTrgtStat = aTrgtMirnStatstcMskdFlld[mirnPos]
mirBckgrndStat = aBcrkngdMirnStatstcMskd[:,mirnPos]
mirZscore = mstats.zmap(mirTrgtStat,mirBckgrndStat)
aMirZscore[mirnPos] = mirZscore
#equal or greater than if 'sf'
aMirPval[mirnPos] = getattr(norm,fnctn)(mirZscore)
aMirQvalGrtrEql = statistics.bh_qvalues(aMirPval)#FDR values gtrEql
aMirQvalLssEql = statistics.bh_qvalues(1-aMirPval)#FDR values lessEql
#----------------------------
#Calculate significance of miRNA counts using exact binomial
if mirnDtype=='cnt':
#number of trials
N = ma.sum(aTrgtMirnStatstcMskd).__float__()
N = float32(N)
#probability of sucess single trial
aMirnP_sngl = ma.sum(aBcrkngdMirnStatstcMskd,axis=0)/ma.sum \
(aBcrkngdMirnStatstcMskd)
aMirnP_sngl = aMirnP_sngl.filled()
#equal or greater than
aMirnCntPvalGrtrEql = binom.sf(aTrgtMirnStatstcMskdFlld-1,N, \
aMirnP_sngl)
#equal or less than
aMirnCntPvalLssEql = 1-binom.sf(aTrgtMirnStatstcMskdFlld,N, \
aMirnP_sngl)
aMirnCntQvalGrtrEql = statistics.bh_qvalues(aMirnCntPvalGrtrEql)
aMirnCntQvalLssEql = statistics.bh_qvalues(aMirnCntPvalLssEql)
#----------------------------
#Format output
cntSngfMoreZscore = 0
cntSngfLssZscore = 0
if mirnDtype=='cnt':
cntSngfLssBnml = 0
cntSngfMoreBnml = 0
outRslts = ['\t'.join(['name','target_average','target_std', \
'target_statistic','background_statistic_average', \
'background_statistic_std','target_statistic_k2', \
'target_statistic_k-wPval','z-score', \
'p-val (target_statistic >= background_statistic)', \
'q-val (target_statistic >= background_statistic)', \
'p-val (target_statistic =< background_statistic)', \
'q-val (target_statistic =< background_statistic)', \
'significance (target_statistic >= background_statistic)', \
'significance (target_statistic =< background_statistic)', \
'N (binomial test parameter)','k (observations)','p sucess', \
'p-val (target_counts >= background_counts)', \
'q-val (target_counts >= background_counts)', \
'p-val (target_counts =< background_counts)', \
'q-val (target_counts =< background_counts)', \
'significance (target_counts >= background_counts)', \
'significance (target_counts =< background_counts)'])]
else:
outRslts = ['\t'.join(['name','target_average','target_std', \
'target_statistic','background_statistic_average', \
'background_statistic_std','target_statistic_k2', \
'target_statistic_k-wPval','z-score', \
'p-val (target_statistic >= background_statistic)', \
'q-val (target_statistic >= background_statistic)', \
'p-val (target_statistic =< background_statistic)', \
'q-val (target_statistic =< background_statistic)', \
'significance (target_statistic >= background_statistic)', \
'significance (target_statistic =< background_statistic)'])]
for mirnPos in xrange(nMirns):
mirnNm = aMirnNms[mirnPos]
mirTrgtArvg = aTrgtMirnMskdAvrg[mirnPos]
mirTrgtStd = aTrgtMirnMskdStd[mirnPos]
mirTrgtStat = aTrgtMirnStatstcMskdFlld[mirnPos]
mirBckgrndStat = aBcrkngdMirnStatstcMskd[:,mirnPos]
mirBckgrndStatArvg = aBcrkngdMirnStatstcMskdAvrg[mirnPos]
mirBckgrndStatStd = aBcrkngdMirnStatstcMskdStd[mirnPos]
mirK2,aMirKWPval = mstats.normaltest(mirBckgrndStat)
mirZscore = aMirZscore[mirnPos]
mirPvalGrtrEql = aMirPval[mirnPos]
mirQvalGrtrEql = aMirQvalGrtrEql[mirnPos]
signfcGrtrEql = ''
if mirQvalGrtrEql<=0.05:
signfcGrtrEql+='*'
cntSngfMoreZscore+=1
if mirQvalGrtrEql<=0.01:
signfcGrtrEql+='*'
mirPvalLssEql = 1-mirPvalGrtrEql
mirQvalLssEql = aMirQvalLssEql[mirnPos]
signfcLssEql = ''
if mirQvalLssEql<=0.05:
signfcLssEql+='*'
cntSngfLssZscore+=1
if mirQvalLssEql<=0.01:
signfcLssEql+='*'
if mirnDtype=='cnt':
signfcCntGrtrEql = ''
signfcCntLssEql = ''
mirN = N
mirK = aTrgtMirnStatstcMskdFlld[mirnPos]
mirPsuccs = aMirnP_sngl[mirnPos]
mirPCntGrtrEql = aMirnCntPvalGrtrEql[mirnPos]
mirQCntGrtrEql = aMirnCntQvalGrtrEql[mirnPos]
if mirQCntGrtrEql<=0.05:
signfcCntGrtrEql+='*'
cntSngfMoreBnml+=1
if mirQCntGrtrEql<=0.01:
signfcCntGrtrEql+='*'
mirPCntLssEql = aMirnCntPvalLssEql[mirnPos]
mirQCntLssEql = aMirnCntQvalLssEql[mirnPos]
if mirQCntLssEql<=0.05:
signfcCntLssEql+='*'
cntSngfLssBnml+=1
if mirQCntLssEql<=0.01:
signfcCntLssEql+='*'
outRslts.append('\t'.join([str(v) for v in mirnNm, \
mirTrgtArvg,mirTrgtStd,mirTrgtStat,mirBckgrndStatArvg, \
mirBckgrndStatStd,mirK2,aMirKWPval,mirZscore,mirPvalGrtrEql, \
mirQvalGrtrEql, mirPvalLssEql,mirQvalLssEql,signfcGrtrEql, \
signfcLssEql,mirN,mirK,mirPsuccs,mirPCntGrtrEql, \
mirQCntGrtrEql,mirPCntLssEql,mirQCntLssEql,signfcCntGrtrEql, \
signfcCntLssEql]))
else:
outRslts.append('\t'.join([str(v) for v in mirnNm, \
mirTrgtArvg,mirTrgtStd,mirTrgtStat,mirBckgrndStatArvg, \
mirBckgrndStatStd,mirK2,aMirKWPval,mirZscore,mirPvalGrtrEql, \
mirQvalGrtrEql, mirPvalLssEql,mirQvalLssEql,signfcGrtrEql, \
signfcLssEql]))
#----------------------------
#Write output file
outFl = open(outMirnStstFl,'w')
outFl.write('\n'.join(outRslts))
outFl.close()
#----------------------------
#Make output message
if mirnDtype == 'cnt':
mthds = 'z-score and binomial probability'
sgnfcntRprt = \
'%s miRNAs for z-score and %s for binomial probability'% \
(cntSngfMoreZscore,cntSngfMoreBnml)
else:
mthds = 'z-score'
sgnfcntRprt = '%s miRNAs for z-score'%cntSngfMoreZscore
mssge = '\n'.join([
'\t-----------------------------------------------------------',
'\t The probabilities for independent miRNAs were calculated',
'\t using: %s.'%mthds,
'\t Results were written in the file: %s'%outMirnStstFl,
'\t %s were found to be significant (q<0.05)'%sgnfcntRprt,
'\t-----------------------------------------------------------'])
return mssge
########################################################
#~ Run randomization on (both) rows and columns
########################################################
def rnClmNRowRndmztn(aBckgrndMetrcMskd,numRndmztns,statstc,seedAdd, \
aPosProbClmns,aPosProbRows,lenTrgtClms=False,lenTrgtRows=False, \
aPosClmnsToGns=None,aPosRowsToGns=None,aBckgrndMirnMetrcMskd=None, \
vrbse=True):
"""
Input: aBckgrndMetrcMskd is a 2x2 masked array with a metric of
interest in each cell and rows and columns in the same order as the
other input files. numRndmztns is the number of randomizations.
statstc is the statistic to calculate from the background samples.
seedAdd is the seed. aPosProbClmns is an array with the
probabilities of sampling the positions in columns. aPosProbRows is
an array with the probabilities of sampling the positions in rows.
lenTrgtClms is the size from the column samples to take (normarlly
the same as the target).lenTrgtRows is the size of the samples to
take from the rows (normarlly the same as the target).
aPosClmnsToGns and aPosRowsToGns are always None. Optionally,
aBckgrndMirnMetrcMskd is a masked array (3D supermatrix) with an
array of metric values for each column and row to be summarized
using the value statstc.
Output: aBcrkngdStatstc is the statistic value for each
randomization of columns and rows (of size lenTrgtClms and
lenTrgtRows respectively) in numRndmztns. aBcrkngdMirnStatstc is an
with the miRNA metrics summarized, is None if aBckgrndMirnMetrcMskd
isNone.
"""
aBcrkngdStatstc = zeros(numRndmztns,dtype=float32)
aBcrkngdStatstc.fill(nan)
lenAPosProbClmns = len(aPosProbClmns)
lenAPosProbRows = len(aPosProbRows)
if aBckgrndMirnMetrcMskd is not None:
nRows,nClmns,numMrns = aBckgrndMirnMetrcMskd.shape
assert nRows==lenAPosProbRows and nClmns==lenAPosProbClmns
aBcrkngdMirnStatstc = zeros((numRndmztns,numMrns),dtype=float32)
aBcrkngdMirnStatstc.fill(nan)
else:
aBcrkngdMirnStatstc = None
#----------------------------
#Run randomization
for smpl in xrange(numRndmztns):
if vrbse and smpl%500 == 0:
print '\t...Running randomization %s out of %s'%(smpl, \
numRndmztns)
aPosClmns = xrange(lenAPosProbClmns)
seed('%s'%(smpl*seedAdd))#set a seed to make it
#replicable
aPosClmsRndm = random.choice(aPosClmns,lenTrgtClms, \
p=aPosProbClmns)
#
aPosRows = xrange(lenAPosProbRows)
seed('%s'%(smpl*seedAdd))#set a seed to make it
#replicable
aPosRowsRndm = random.choice(aPosRows,lenTrgtRows, \
p=aPosProbRows)
aRndmBckgrndMetrc = aBckgrndMetrcMskd[:,aPosClmsRndm]
aRndmBckgrndMetrc = aRndmBckgrndMetrc[aPosRowsRndm,:]
aRndmBckgrndMetrc = ma.masked_invalid(aRndmBckgrndMetrc)
aBcrkngdStatstc[smpl] = getattr(ma,statstc)(aRndmBckgrndMetrc)
#----------------------------
#Run randomization miRNA metrics
if aBckgrndMirnMetrcMskd is not None:
aRndmBckgrndMirnMtrc = aBckgrndMirnMetrcMskd[:,aPosClmsRndm]
aRndmBckgrndMirnMtrc = aRndmBckgrndMirnMtrc[aPosRowsRndm,:]
aRndmBckgrndMirnMtrc.fill_value = nan
aRndmBckgrndMirnMtrc = ma.masked_invalid \
(aRndmBckgrndMirnMtrc.filled())
aBcrkngdMirnStatstc[smpl]=rtrnMirnStat(aRndmBckgrndMirnMtrc)
return aBcrkngdStatstc,aBcrkngdMirnStatstc
########################################################
#~ Run randomization on (both) rows and columns and average by gene
########################################################
def rnClmNRowRndmztnByGns(aBckgrndMetrcMskd,numRndmztns,statstc,seedAdd, \
aPosProbClmns,aPosProbRows,lenTrgtClms,lenTrgtRows, \
aPosClmnsToGns,aPosRowsToGns,aBckgrndMirnMetrcMskd=None,vrbse=True):
"""
Input: aBckgrndMetrcMskd is a 2x2 masked array with a metric of
interest in each cell and rows and columns in the same order as the
other input files. numRndmztns is the number of randomizations.
statstc is the statistic to calculate from the background samples.
seedAdd is the seed. aPosProbClmns is an array with the
probabilities of sampling the positions in columns. aPosProbRows is
an array with the probabilities of sampling the positions in rows.
lenTrgtClms is the size from the column samples to take (normarlly
the same as the target).lenTrgtRows is the size of the samples to
take from the rows (normarlly the same as the target).aPosClmnsToGns
is an array of position of gene to which each column in
aBckgrndMetrcMskd and aPosProbClmns is mapped. aPosRowsToGns is an
array of position of gene to which each row in aBckgrndMetrcMskd and
aPosProbRows is mapped. Optionally, aBckgrndMirnMetrcMskd is a
masked array (3D supermatrix) with an array of metric values for
each column and row to be summarized using the value statstc.
Output: aBcrkngdStatstc is the statistic value for each
randomization of columns and rows (of size lenTrgtClms and
lenTrgtRows respectively) in numRndmztns. aBcrkngdMirnStatstc is an
with the miRNA metrics summarized, is None if aBckgrndMirnMetrcMskd
is None.
"""
aBcrkngdStatstc = zeros(numRndmztns,dtype=float32)
aBcrkngdStatstc.fill(nan)
lenAPosProbClmns = len(aPosProbClmns)
lenAPosProbRows = len(aPosProbRows)
#vectorize the function
rtrnRndmBckgrndMetrcPrGn =vectorize(rtrnAvrgMskdArray,excluded={0})
lenClmnsToGns = max(aPosClmnsToGns)
lenRowsToGns = max(aPosRowsToGns)
#to randomize miRNA metrics
if aBckgrndMirnMetrcMskd is not None:
nRows,nClmns,numMrns = aBckgrndMirnMetrcMskd.shape
assert nRows==lenAPosProbRows and nClmns==lenAPosProbClmns
aBcrkngdMirnStatstc = zeros((numRndmztns,numMrns),dtype=float32)
aBcrkngdMirnStatstc.fill(nan)
rtrnRndmBckgrndMirnMetrcPrGn = \
vectorize(rtrnStstMskdMirnArray,excluded={0})
else:
aBcrkngdMirnStatstc = None
#----------------------------
#Run randomization
for smpl in xrange(numRndmztns):
if vrbse and smpl%500 == 0:
print '\t...Running randomization %s out of %s'%(smpl, \
numRndmztns)
aPosClmns = xrange(lenAPosProbClmns)
seed('%s'%(smpl*seedAdd))#set a seed to make it
#replicable
aPosClmsRndmGns = zeros(lenClmnsToGns,dytpe=float32)
aPosClmsRndmGns.fill(nan)
aPosRowsRndmGns = zeros(lenRowsToGns,dytpe=float32)
aPosRowsRndmGns.fill(nan)
#~
aPosClmsRndm = random.choice(aPosClmns,lenTrgtClms, \
p=aPosProbClmns)
for rltvPos,absPos in enumerate(aPosClmsRndm):
aPosClmsRndmGns[aPosClmnsToGns[absPos]].append(rltvPos)
#
aPosRows = xrange(lenAPosProbRows)
seed('%s'%(smpl*seedAdd))#set a seed to make it
#replicable
aPosRowsRndm = random.choice(aPosRows,lenTrgtRows, \
p=aPosProbRows)
for rltvPos,absPos in enumerate(aPosRowsRndm):
aPosRowsRndmGns[aPosRowsToGns[absPos]].append(rltvPos)
#
aRndmBckgrndMetrc = rtrnRndmBckgrndMetrcPrGn(aBckgrndMetrcMskd, \
aPosRowsRndmGns,aPosClmsRndmGns)
aRndmBckgrndMetrc = ma.masked_invalid(aRndmBckgrndMetrc)
aBcrkngdStatstc[smpl] = getattr(ma,statstc)(aRndmBckgrndMetrc)
#----------------------------
#Run randomization miRNA metrics
if aBckgrndMirnMetrcMskd is not None:
aRndmBckgrndMirnMtrc = rtrnRndmBckgrndMirnMetrcPrGn \
(aBckgrndMirnMetrcMskd,aPosRowsRndmGns,aPosClmsRndmGns, \
numMrns)
aRndmBckgrndMirnMtrc=ma.masked_invalid(aRndmBckgrndMirnMtrc)
aBcrkngdMirnStatstc[smpl]=rtrnMirnStat(aRndmBckgrndMirnMtrc)
return aBcrkngdStatstc,aBcrkngdMirnStatstc
########################################################
#~ Run randomization on columns
########################################################
def rnClmRndmztn(aBckgrndMetrcMskd,numRndmztns,statstc,seedAdd, \
aPosProbClmns=None,aPosRows=None,lenTrgtClms=False, \
lenTrgtRows=False,aPosClmnsToGns=None,aPosRowsToGns=None, \
aBckgrndMirnMetrcMskd=None,vrbse=True):
"""
Input: aBckgrndMetrcMskd is a 2x2 masked array with a metric of
interest in each cell and rows and columns in the same order as the
other input files. numRndmztns is the number of randomizations.
statstc is the statistic to calculate from the background samples.
seedAdd is the seed. aPosRows is an array of positions in rows to be
sampled. aPosProbClmns is an array with the probabilities of
sampling the positions in columns. lenTrgtClms is the size of the
samples to take from the columns (normarlly the same as the target).
aPosClmnsToGns and aPosRowsToGns are always None. Optionally,
aBckgrndMirnMetrcMskd is a masked array (3D supermatrix) with an
array of metric values for each column and row to be summarized
using the value statstc.
Output: aBcrkngdStatstc is the statistic value for each
randomization of columns (of size lenTrgtClms) and values in
aPosRows for numRndmztns randomizations. aBcrkngdMirnStatstc is an
with the miRNA metrics summarized, is None if aBckgrndMirnMetrcMskd
is None.
"""
aBcrkngdStatstc = zeros(numRndmztns,dtype=float32)
aBcrkngdStatstc.fill(nan)
lenAPosProbClmns = len(aPosProbClmns)
if aPosRow is None:
aPosRows = xrange(aBckgrndMetrcMskd.shape[0])
try:
raise exceptions.CelleryWarningObjct \
('Columns are going to be randomized but no positions were',
'provided for rows, all of them are being included.')
except exceptions.CelleryWarningObjct as err:
print err
pass
if aBckgrndMirnMetrcMskd is not None:
nRows,nClmns,numMrns = aBckgrndMirnMetrcMskd.shape
assert nClmns==lenAPosProbClmns
aBcrkngdMirnStatstc = zeros((numRndmztns,numMrns),dtype=float32)
aBcrkngdMirnStatstc.fill(nan)
else:
aBcrkngdMirnStatstc = None
#----------------------------
#Run randomization
for smpl in xrange(numRndmztns):
if vrbse and smpl%500 == 0:
print '\t...Running randomization %s out of %s'%(smpl, \
numRndmztns)
#
aPosClmns = xrange(lenAPosProbClmns)
seed('%s'%(smpl*seedAdd))#set a seed to make it
#replicable
aPosClmsRndm = random.choice(aPosClmns,lenTrgtClms, \
p=aPosProbClmns)
aRndmBckgrndMetrc = aBckgrndMetrcMskd[:,aPosClmsRndm]
aRndmBckgrndMetrc = aRndmBckgrndMetrc[aPosRows,:]
aRndmBckgrndMetrc = ma.masked_invalid(aRndmBckgrndMetrc)
aBcrkngdStatstc[smpl] = getattr(ma,statstc)(aRndmBckgrndMetrc)
#----------------------------
#Run randomization miRNA metrics
if aBckgrndMirnMetrcMskd is not None:
aRndmBckgrndMirnMtrc = aBckgrndMirnMetrcMskd[:,aPosClmsRndm]
aRndmBckgrndMirnMtrc = aRndmBckgrndMirnMtrc[aPosRows,:]
aRndmBckgrndMirnMtrc.fill_value = nan
aRndmBckgrndMirnMtrc = ma.masked_invalid \
(aRndmBckgrndMirnMtrc.filled())
aBcrkngdMirnStatstc[smpl]=rtrnMirnStat(aRndmBckgrndMirnMtrc)
return aBcrkngdStatstc,aBcrkngdMirnStatstc
########################################################
#~ Run randomization on only columns and average by gene
########################################################
def rnClmRndmztnByGns(aBckgrndMetrcMskd,numRndmztns,statstc,seedAdd, \
aPosProbClmns=None,aPosRows=None,lenTrgtClms=None, \
lenTrgtRows=None,aPosClmnsToGns=None,aPosRowsToGns=None, \
aBckgrndMirnMetrcMskd=None,vrbse=True):
"""
Input: aBckgrndMetrcMskd is a 2x2 masked array with a metric of
interest in each cell and rows and columns in the same order as the
other input files. numRndmztns is the number of randomizations.
statstc is the statistic to calculate from the background samples.
seedAdd is the seed. aPosProbClmns is an array with the
probabilities of sampling the positions in columns. aPosClmns is an
array ofpositions in rows to be sampled. lenTrgtClms is the size
from the column samples to take (normarlly the same as the target).
lenTrgtRows is the size of the samples to take from the rows
(normally the same as the target).aPosClmnsToGns is an array of
position of gene to which each column in aBckgrndMetrcMskd and
aPosProbClmns is mapped. aPosRowsToGns is an array of position of
gene to which each row in aBckgrndMetrcMskd and aPosRows is mapped.
Optionally, aBckgrndMirnMetrcMskd is a masked array (3D supermatrix)
with an array of metric values for each column and row to be
summarized using the value statstc.
Output: aBcrkngdStatstc is the statistic value for each
randomization of columns and rows (of size lenTrgtClms and
lenTrgtRows respectively) in numRndmztns. aBcrkngdMirnStatstc is an
with the miRNA metrics summarized, is None if aBckgrndMirnMetrcMskd
is None.
"""
aBcrkngdStatstc = zeros(numRndmztns,dtype=float32)
aBcrkngdStatstc.fill(nan)
lenAPosProbClmns = len(aPosProbClmns)
lenClmnsToGns = max(aPosClmnsToGns)
lenRowsToGns = max(aPosRowsToGns)
#vectorize the function
rtrnRndmBckgrndMetrcPrGn =vectorize(rtrnAvrgMskdArray,excluded={0})
if aPosRow is None:
aPosRows = xrange(aBckgrndMetrcMskd.shape[0])
try:
raise exceptions.CelleryWarningObjct \
('Columns are going to be randomized but no positions were',
'provided for rows, all of them are being included.')
except exceptions.CelleryWarningObjct as err:
print err
pass
#to randomize miRNA metrics
if aBckgrndMirnMetrcMskd is not None:
nRows,nClmns,numMrns = aBckgrndMirnMetrcMskd.shape
assert nClmns==lenAPosProbClmns
aBcrkngdMirnStatstc = zeros((numRndmztns,numMrns),dtype=float32)
aBcrkngdMirnStatstc.fill(nan)
rtrnRndmBckgrndMirnMetrcPrGn = \
vectorize(rtrnStstMskdMirnArray,excluded={0})
else:
aBcrkngdMirnStatstc = None
#----------------------------
#Run randomization
for smpl in xrange(numRndmztns):
if vrbse and smpl%500 == 0:
print '\t...Running randomization %s out of %s'%(smpl, \
numRndmztns)
#
aPosClmsRndmGns = zeros(lenClmnsToGns,dytpe=float32)
aPosClmsRndmGns.fill(nan)
aPosRowsRndmGns = zeros(lenRowsToGns,dytpe=float32)
aPosRowsRndmGns.fill(nan)
#~
aPosClmns = xrange(lenAPosProbClmns)
seed('%s'%(smpl*seedAdd))#set a seed to make it
#replicable
aPosClmsRndm = random.choice(aPosClmns,lenTrgtClms, \
p=aPosProbClmns)
for rltvPos,absPos in enumerate(aPosClmsRndm):
aPosClmsRndmGns[aPosClmnsToGns[absPos]].append(rltvPos)
for rltvPos,absPos in enumerate(aPosRows):
aPosRowsRndmGns[aPosRowsToGns[absPos]].append(rltvPos)
#
aRndmBckgrndMetrc = rtrnRndmBckgrndMetrcPrGn(aBckgrndMetrcMskd, \
aPosRowsRndmGns,aPosClmsRndmGns)
aRndmBckgrndMetrc = ma.masked_invalid(aRndmBckgrndMetrc)
aBcrkngdStatstc[smpl] = getattr(ma,statstc)(aRndmBckgrndMetrc)
#----------------------------
#Run randomization miRNA metrics
if aBckgrndMirnMetrcMskd is not None:
aRndmBckgrndMirnMtrc = rtrnRndmBckgrndMirnMetrcPrGn \
(aBckgrndMirnMetrcMskd,aPosRowsRndmGns,aPosClmsRndmGns, \
numMrns)
aRndmBckgrndMirnMtrc=ma.masked_invalid(aRndmBckgrndMirnMtrc)
aBcrkngdMirnStatstc[smpl]=rtrnMirnStat(aRndmBckgrndMirnMtrc)
return aBcrkngdStatstc,aBcrkngdMirnStatstc
########################################################
#~ Run randomization on rows
########################################################
def rnRowRndmztn(aBckgrndMetrcMskd,numRndmztns,statstc,seedAdd, \
aPosClmns=None,aPosProbRows=None,lenTrgtClms=False, \
lenTrgtRows=False,aPosClmnsToGns=None,aPosRowsToGns=None, \
aBckgrndMirnMetrcMskd=None,vrbse=True):
"""
Input: aBckgrndMetrcMskd is a 2x2 masked array with a metric of
interest in each cell and rows and columns in the same order as the
other input files. numRndmztns is the number of randomizations.
statstc is the statistic to calculate from the background samples.
seedAdd is the seed. aPosProbRows is an array with the
probabilities of sampling the positions in rows. lenTrgtRows is the
size of the samples to take from the rows (normarlly the same as the
target).aPosClmns is an array of positions in columns to be sampled.
aPosClmnsToGns and aPosRowsToGns are always None. Optionally,
aBckgrndMirnMetrcMskd is a masked array (3D supermatrix) with an
array of metric values for each column and row to be summarized
using the value statstc.
Output: aBcrkngdStatstc is the statistic value for each
randomization of rows (of size lenTrgtRows) and values in aPosClmns
for numRndmztns randomizations. aBcrkngdMirnStatstc is an
with the miRNA metrics summarized, is None if aBckgrndMirnMetrcMskd
is None.
"""
aBcrkngdStatstc = zeros(numRndmztns,dtype=float32)
aBcrkngdStatstc.fill(nan)
lenAPosProbRows = len(aPosProbRows)
if aPosClmns is None:
aPosClmns = xrange(aBckgrndMetrcMskd.shape[1])
try:
raise exceptions.CelleryWarningObjct \
('Rows are going to be randomized but no positions were',
'provided for columns, all of them are being included.')
except exceptions.CelleryWarningObjct as err:
print err
pass
if aBckgrndMirnMetrcMskd is not None:
nRows,nClmns,numMrns = aBckgrndMirnMetrcMskd.shape
assert nRows==lenAPosProbRows
aBcrkngdMirnStatstc = zeros((numRndmztns,numMrns),dtype=float32)
aBcrkngdMirnStatstc.fill(nan)
else:
aBcrkngdMirnStatstc = None
#----------------------------
#Run randomization
for smpl in xrange(numRndmztns):
if vrbse and smpl%500 == 0:
print '\t...Running randomization %s out of %s'%(smpl, \
numRndmztns)
#
aPosRows = xrange(lenAPosProbRows)
seed('%s'%(smpl*seedAdd))#set a seed to make it
#replicable
aPosRowsRndm = random.choice(aPosRows,lenTrgtRows, \
p=aPosProbRows)
aRndmBckgrndMetrc = aBckgrndMetrcMskd[:,aPosClms]
aRndmBckgrndMetrc = aRndmBckgrndMetrc[aPosRowsRndm,:]
aRndmBckgrndMetrc = ma.masked_invalid(aRndmBckgrndMetrc)
aBcrkngdStatstc[smpl] = getattr(ma,statstc)(aRndmBckgrndMetrc)
#----------------------------
#Run randomization miRNA metrics
if aBckgrndMirnMetrcMskd is not None:
aRndmBckgrndMirnMtrc = aBckgrndMirnMetrcMskd[:,aPosClms]
aRndmBckgrndMirnMtrc = aRndmBckgrndMirnMtrc[aPosRowsRndm,:]
aRndmBckgrndMirnMtrc.fill_value = nan
aRndmBckgrndMirnMtrc = ma.masked_invalid \
(aRndmBckgrndMirnMtrc.filled())
aBcrkngdMirnStatstc[smpl]=rtrnMirnStat(aRndmBckgrndMirnMtrc)
return aBcrkngdStatstc,aBcrkngdMirnStatstc
########################################################
#~ Run randomization on only rows and average by gene
########################################################
def rnRowRndmztnByGns(aBckgrndMetrcMskd,numRndmztns,statstc,seedAdd, \
aPosClmns=None,aPosProbRows=None,lenTrgtClms=False,lenTrgtRows=False, \
aPosClmnsToGns=None,aPosRowsToGns=None,aBckgrndMirnMetrcMskd=None, \
vrbse=True):
"""
Input: aBckgrndMetrcMskd is a 2x2 masked array with a metric of
interest in each cell and rows and columns in the same order as the
other input files. numRndmztns is the number of randomizations.
statstc is the statistic to calculate from the background samples.
seedAdd is the seed. aPosClmns is an array of positions in columns
to be sampled. aPosProbRows is an array with the probabilities of
sampling the positions in rows. lenTrgtClms is the size from the
column samples to take (normarlly the same as the target).
lenTrgtRows is the size of the samples to take from the rows
(normally the same as the target).aPosClmnsToGns is an array of
position of gene to which each column in aBckgrndMetrcMskd and
aPosProbClmns is mapped. aPosRowsToGns is an array of position of
gene to which each row in aBckgrndMetrcMskd and aPosProbRows is
mapped. Optionally, aBckgrndMirnMetrcMskd is a masked array (3D
supermatrix) with an array of metric values for each column and row
to be summarized using the value statstc.
Output: aBcrkngdStatstc is the statistic value for each
randomization of rows (of size lenTrgtRows respectively) in
numRndmztns. aBcrkngdMirnStatstc is an with the miRNA metrics
summarized, is None if aBckgrndMirnMetrcMskd is None.
"""
aBcrkngdStatstc = zeros(numRndmztns,dtype=float32)
aBcrkngdStatstc.fill(nan)
lenAPosProbRows = len(aPosProbRows)
lenClmnsToGns = max(aPosClmnsToGns)
lenRowsToGns = max(aPosRowsToGns)
#vectorize the function
rtrnRndmBckgrndMetrcPrGn =vectorize(rtrnAvrgMskdArray,excluded={0})
if aPosClmns is None:
aPosClmns = xrange(aBckgrndMetrcMskd.shape[1])
try:
raise exceptions.CelleryWarningObjct \
('Rows are going to be randomized but no positions were',
'provided for columns, all of them are being included.')
except exceptions.CelleryWarningObjct as err:
print err
pass
#to randomize miRNA metrics
if aBckgrndMirnMetrcMskd is not None:
nRows,nClmns,numMrns = aBckgrndMirnMetrcMskd.shape
assert nRows==lenAPosProbRows
aBcrkngdMirnStatstc = zeros((numRndmztns,numMrns),dtype=float32)
aBcrkngdMirnStatstc.fill(nan)
rtrnRndmBckgrndMirnMetrcPrGn = \
vectorize(rtrnStstMskdMirnArray,excluded={0})
else:
aBcrkngdMirnStatstc = None
#----------------------------
#Run randomization
for smpl in xrange(numRndmztns):
if vrbse and smpl%500 == 0:
print '\t...Running randomization %s out of %s'%(smpl, \
numRndmztns)
#
aPosClmsRndmGns = zeros(lenClmnsToGns,dytpe=float32)
aPosClmsRndmGns.fill(nan)
aPosRowsRndmGns = zeros(lenRowsToGns,dytpe=float32)
aPosRowsRndmGns.fill(nan)
#~
seed('%s'%(smpl*seedAdd))#set a seed to make it
#replicable
aPosRows = xrange(lenAPosProbRows)
aPosRowsRndm = random.choice(aPosRows,lenTrgtRows, \
p=aPosProbRows)
for rltvPos,absPos in enumerate(aPosRowsRndm):
aPosRowsRndmGns[aPosRowsToGns[absPos]].append(rltvPos)
for rltvPos,absPos in enumerate(aPosClmns):
aPosClmsRndmGns[aPosClmnsToGns[absPos]].append(rltvPos)
#
aRndmBckgrndMetrc = rtrnRndmBckgrndMetrcPrGn(aBckgrndMetrcMskd, \
aPosRowsRndmGns,aPosClmsRndmGns)
aRndmBckgrndMetrc = ma.masked_invalid(aRndmBckgrndMetrc)
aBcrkngdStatstc[smpl] = getattr(ma,statstc)(aRndmBckgrndMetrc)
#----------------------------
#Run randomization miRNA metrics
if aBckgrndMirnMetrcMskd is not None:
aRndmBckgrndMirnMtrc = rtrnRndmBckgrndMirnMetrcPrGn \
(aBckgrndMirnMetrcMskd,aPosRowsRndmGns,aPosClmsRndmGns, \
numMrns)
aRndmBckgrndMirnMtrc=ma.masked_invalid(aRndmBckgrndMirnMtrc)
aBcrkngdMirnStatstc[smpl]=rtrnMirnStat(aRndmBckgrndMirnMtrc)
return aBcrkngdStatstc,aBcrkngdMirnStatstc
########################################################
#~ Core method to distribute and run different randomizations
########################################################
def rndmzCore(aBckgrndMetrcMskd,mthdBckgrndRndmztn,statstcTrgt,stdTrgt, \
outPltFl = False,aBckgrndPosProbClmnsORaPosClmns = None, \
aBckgrndPosProbRowsORaPosRows = None, lenTrgtClms = False, \
lenTrgtRows = False, seedAdd = False, numRndmztns = 1000, \
maxRndmztns = 25, statstc = 'mean', fnctn = 'sf',vrbse = True, \
aPosClmnsToGns = None, aPosRowsToGns = None, aBckgrndMirnMetrcMskd \
= None, aTrgtMirnStatstcMskd = None, mirnDtype = 'cnt', \
outMirnStstFl = None, aMirnNms = None):
"""
Input: aBckgrndMetrcMskd is masked array with background metric
values to be randomly sampled. mthdBckgrndRndmztn is the method to
make the randomization: {rnClmNRowRndmztn, rnRowRndmztn, or
rnClmRndmztn}. statstcTrgt is the statistic value of the target
whose probability is going to be calculated from the randomized
background using a z-score approach. stdTrgt is the standard
deviation of the target data. Optionally, outPltFl is a file to plot
the randomization and significance of target statistic.
aBckgrndPosProbClmnsORaPosClmns is the position of columns of
interest OR position probability of all columns in the background.
aBckgrndPosProbRowsORaPosRows is the position of rows of interest OR
position probability of all rows in the background. lenTrgtClms is
the number of columns to be sampled. lenTrgtRows is the number of
rows to be sampled. seedAdd is the seed to run the randomizations.
numRndmztns is the number of randomizations to run in the
background. maxRndmztns is the maximum number of iterations to
enforce normality. statstc is the statistic to sample from each
randomization to build the normal distribution to test the
significance of the target statistic. fnctn is the function to
compare the target statistic to the resulting normal distrubtion
from the randomized background (sf == survival function/ greater
equal than). If vrbse all log messages are going to be printed.
aPosClmnsToGns is an array of position of gene to which each column
in aBckgrndMetrcMskd and aPosProbClmns is mapped. aPosRowsToGns is
an array of position of gene to which each row in aBckgrndMetrcMskd
and aPosProbRows is mapped. If aPosRowsToGns and aPosClmnsToGns are
not None calculations are going to be run by gene.
aBckgrndMirnMetrcMskd is a masked array (3D supermatrix) with an
array of metric values for each column and row to be summarized
using the value statstc. aBcrkngdMirnStatstc is an array with the
miRNA metrics summarized for the background. aTrgtMirnStatstcMskd is
an array with the miRNA metrics for the target dataset. mirnDtype is
the datatype of the miRNA metric: {'cnt' for counts and 'scr' for
scores}. outMirnStstFl is the file to save the results of the miRNA
probability calculation. aMirnNms is an array of miRNA names. fnctn
is the statistical function to calculate the probability {'sf' is
greater or equal than}.
Output: p_val is the one-side probability (following fnctn input)
that the target statistic belongs to the normal distributed
statistic obtained from the randomized background, with value
zscore. meanBckgrnd is the mean metric of the normal distributed
statistic obtained from the randomized background and stdBckgrnd is
its standard error.
"""
#----------------------------
#Set required parameteres
logRun = []
if not seedAdd:
seedAdd = random.random()
try:
raise exceptions.CelleryWarningObjct \
('Seed for randomizations was set to',seedAdd)
except exceptions.CelleryWarningObjct as mssge:
if vrbse:
print mssge
logRun.append(mssge)
pass
#----------------------------
#Run randomizations
aBcrkngdStatstc,aBcrkngdMirnStatstc = \
mthdBckgrndRndmztn(aBckgrndMetrcMskd,numRndmztns,statstc,seedAdd, \
aBckgrndPosProbClmnsORaPosClmns,aBckgrndPosProbRowsORaPosRows, \
lenTrgtClms,lenTrgtRows,aPosClmnsToGns,aPosRowsToGns, \
aBckgrndMirnMetrcMskd,vrbse)
#----------------------------
#Test normality in the background randomization
k2,pval = mstats.normaltest(aBcrkngdStatstc)
if pval <= 0.05:
if maxRndmztns==0:
try:
raise exceptions.CelleryWarningObjct \
('Enforce background normality will stop.', \
'The maximum number of trials was exceeded.')
except exceptions.CelleryWarningObjct as err:
print err
else:
maxRndmztns-=1
try:
raise exceptions.CelleryWarningObjct \
('Re-running randomizations to enforce background', \
'normality.')
except exceptions.CelleryWarningObjct as mssge:
print mssge
pass
if vrbse:
print mssge
logRun.append(mssge)
seedAdd += numRndmztns
p_val,zscore,meanBckgrnd,stdBckgrnd,logRun = rndmzCore \
(aBckgrndMetrcMskd,mthdBckgrndRndmztn,statstcTrgt,stdTrgt, \
outPltFl,aBckgrndPosProbClmnsORaPosClmns, \
aBckgrndPosProbRowsORaPosRows,lenTrgtClms,lenTrgtRows, \
seedAdd,numRndmztns,maxRndmztns,statstc,fnctn,vrbse, \
aPosClmnsToGns,aPosRowsToGns,aBckgrndMirnMetrcMskd, \
aTrgtMirnStatstcMskd,mirnDtype,outMirnStstFl,aMirnNms)
return p_val,zscore,meanBckgrnd,stdBckgrnd,logRun
else:
mssge = '\n'.join([
'\t-----------------------------------------------------------',
'\t The normality of the background distribution to test the',
'\t mean score significance had a k2: %s '%k2,
'\t and p-value: %s'%pval,
'\t-----------------------------------------------------------'])
if vrbse:
print mssge
logRun.append(mssge)
#----------------------------
#Calculate significance
zscore = mstats.zmap(statstcTrgt,aBcrkngdStatstc)
p_val = getattr(norm,fnctn)(zscore)#equal or greater than if 'sf'
meanBckgrnd = np.mean(aBcrkngdStatstc)
stdBckgrnd = np.std(aBcrkngdStatstc)
#----------------------------
#Make plots
if outPltFl:
mkPlt(statstcTrgt,aBcrkngdStatstc,outPltFl)
#----------------------------
#Process miRNA results
if aBcrkngdMirnStatstc is not None:
mssge = procMirnRndmztn(aBcrkngdMirnStatstc, \
aTrgtMirnStatstcMskd,mirnDtype,outMirnStstFl,aMirnNms)
if vrbse:
print mssge
logRun.append(mssge)
return p_val,zscore,meanBckgrnd,stdBckgrnd,logRun
########################################################
#~ Function to be vectorized to calculate average values for matrix
#(i.e. per gene) given a supermatrix and a set of positions
########################################################
def rtrnAvrgMskdArray(aBckgrndMetrcMskd,aRowsPosGns,aClmnsPosGns):
"""
Input: aBckgrndMetrcMskd is a masked array (the supermatrix) with
background metric values to be averaged following aRowsPosGns and
aClmnsPosGns. aRowsPosGns is an array with the position of rows in
the supermatrix (aBckgrndMetrcMskd). aClmnsPosGns is an array with
the positions of columns in the supermatrix.
Output: avrgMskdVal is the average of the masked substracted
positions in aRowsPosGns and aClmnsPosGns of the supermatrix
(aBckgrndMetrcMskd).
"""
if len(aRowsPosGns) and len(aClmnsPosGns):
mskdECPsPerGn = ma.mean(aBckgrndMetrcMskd[aRowsPosGns,:] \
[:,aClmnsPosGns]).__float__()
avrgMskdVal = float32(mskdECPsPerGn)
return avrgMskdVal
else:
return nan
########################################################
#~ Calculate summary statistic for a miRNA-metric matrix.
########################################################
def rtrnMirnStat(aMirnMetrcMskd,mirnStatstc='sum'):
"""
Input: aMirnMetrcMskd is a masked array (3D supermatrix) with an
array of metric values for each column and row to be summarized
using the value statstc. Optionally, mirnStatstc is an statistic to
summarized the array of metric values.
Output: summrzdMirnMetrcMskd is an array of the size of the input
array of metric values with the values summarized for all columns-
rows.
NOTE: the mask in aMirnMetrcMskd is a mask on column-rows of size
rows x columns. Masked positions are going to be excluded from the
final summary.
NOTE: Invalid positions are going to return nan values.
"""
summrzdMirnMetrcMskd = getattr(ma,mirnStatstc) \
(ma.vstack(aMirnMetrcMskd),axis=0,dtype=float32)
summrzdMirnMetrcMskd.fill_value = nan
summrzdMirnMetrcMskd.filled()
return summrzdMirnMetrcMskd
########################################################
#~ Function to be vectorized to calculate an statistic of interest for
#RNA metrics given a supermatrix and a set of positions
########################################################
def rtrnStstMskdMirnArray(aBckgrndMirnMetrcMskd,aRowsPosGns,aClmnsPosGns, \
lenMirnMtrc):
"""
Input: aBckgrndMirnMetrcMskd is a masked array (the supermatrix) with
stats for the metrics of shared miRNAs for each masked row-column
pair to be averaged following aRowsPosGns and aClmnsPosGns.
aRowsPosGns is an array with the position of rows in the supermatrix
(aBckgrndMirnMetrcMskd). aClmnsPosGns is an array with the positions of
columns in the supermatrix. lenMirnMtrc is the length of the miRNA
metrics array.
Output: ststcMirnMetrcMskdVal is an array with the statistic of
interest caculated for aBckgrndMirnMetrcMskd following aRowsPosGns and
aClmnsPosGns.
NOTE: Invalid miRNA metric values are going to return nan for all
positions in lenMirnMtrc.
"""
if len(aRowsPosGns) and len(aClmnsPosGns):
ststcMirnMetrcMskdVal = ma.mean(aBckgrndMirnMetrcMskd[aRowsPosGns,:] \
[:,aClmnsPosGns],axis=0,dtype=np.float32)
ststcMirnMetrcMskdVal.fill_value = nan
return ststcMirnMetrcMskdVal.filled()
else:
return array([nan for x in xrange(lenMirnMtrc)])
########################################################
#~ Calculate input length probabilities given an input model and params.
########################################################
def rtrndStrtEndCnt(lDtLens,mdlWprms,intrvlLgth,intvlJmp):
"""
Input: lDtLens is a list with gene/lncrna lengths the position as
the index in the object list. mdlWprms is the model an parameters
for the distribution of lengths in lDtLens. intrvlLgth is the length
of the intervals (in nt) to sample, intvlJmp is the size of the
length interval (in nt) to increase in case of error (i.e. p ==
inf). intrvlLgth is the length of the intervals (in nt) to sample.
intvlJmp is the size of the length interval to increase in case of
error (i.e. p == inf).
Output: aDtPosProb is an array of genes/lncrna probabilities to
be sampled in the ordered of values dDtLenlPos.
"""
#----------------------------
#Index lengths
gnPos = -1
dDtLenlPos = {}
for gnLen in lDtLens:
gnPos+=1
if dDtLenlPos.has_key(gnLen):
dDtLenlPos[gnLen].append(gnPos)
else:
dDtLenlPos[gnLen]=[gnPos]
#----------------------------
#Calculate probabilities of intervals (uniform within them)
srtdlGnLenlPos = sorted(dDtLenlPos.keys())
maxLength = max(srtdlGnLenlPos)
minLength = min(srtdlGnLenlPos)
dStrtlngthEndlngthProb = {}
for strtLgnth in range(minLength,maxLength,intrvlLgth):
endLgnth = strtLgnth + intrvlLgth
if endLgnth>(maxLength):
endLgnth = maxLength
probd, abserr = integrate.quad(mdlWprms.pdf,strtLgnth,endLgnth)
if probd == inf:
intrvlLgth += intvlJmp
try:
raise exceptions.CelleryWarningObjct \
('Length interval size is going to be increase by', \
intvlJmp)
except exceptions.CelleryWarningObjct as err:
print err
pass
#increase the size of the jump
return rtrndStrtEndCnt(lDtLens,mdlWprms,intrvlLgth,intvlJmp)
else:
dStrtlngthEndlngthProb[strtLgnth,endLgnth] = float32(probd)
#----------------------------
#Calculate probabilities for input length
aDtPosProb = zeros(len(lDtLens),dtype=float32)#out probabilities
sStrtlngthEndlngth = sorted(dStrtlngthEndlngthProb.keys())
cStrt,cEnd = sStrtlngthEndlngth.pop()
cProb = dStrtlngthEndlngthProb.pop((cStrt,cEnd))
cEnd+=1#set a starter
gnLenStrt = True#set a starter
cPosIntrvlLens = []
while srtdlGnLenlPos:#from top to bottom
if gnLenStrt:
gnLen = srtdlGnLenlPos.pop()
gnLenStrt = False
while cStrt<=gnLen<cEnd:
cPosIntrvlLens.extend(dDtLenlPos[gnLen])
if srtdlGnLenlPos:
gnLen = srtdlGnLenlPos.pop()
else:
gnLen = -inf#set a dummny value to pass assertion
assert gnLen<cStrt
if cPosIntrvlLens:
indvldProb = divide(cProb,len(cPosIntrvlLens))
for pos in cPosIntrvlLens:
aDtPosProb[pos] = indvldProb
if gnLen<0:
break
else:
cStrt,cEnd = sStrtlngthEndlngth.pop()
cProb = dStrtlngthEndlngthProb.pop((cStrt,cEnd))
cPosIntrvlLens = []
#----------------------------
#test and correct for probability to sum 1
sumIndvldProb = np.sum(aDtPosProb,dtype=float32)
while sumIndvldProb < float32(1):
try:
raise exceptions.CelleryWarningObjct \
('probabilities were corrected to sum 1.0 from', \
sumIndvldProb)
except exceptions.CelleryWarningObjct as err:
print err
pass
fctr = divide(1,sumIndvldProb,dtype=float32)
aDtPosProb = multiply(aDtPosProb,fctr,dtype=float32)
sumIndvldProb = np.sum(aDtPosProb,dtype=float32)
return aDtPosProb
########################################################
#~ Wrapper for full randomization
########################################################
def wrprFllRndmztn(aBckgrndMetrcMskd,lBckgrndRowLens,lBckgrndClmnLens, \
mdlWprmsRows,mdlWprmsClmns,mskRowClmnDflt=None,intrvlLgth=15, \
intvlJmp=5,aTrgtMetrcMskd=None,statstcTrgt=False,stdTrgt=False, \
lenTrgtRows=False,lenTrgtClms=False,outPltFl=False,numRndmztns=1000, \
maxRndmztns=100,seedAdd=False,statstc='mean',vrbse=True, \
aPosClmnsToGns=None,aPosRowsToGns=None,aBckgrndMirnMetrcMskd=None, \
aTrgtMirnStatstcMskd=None,mirnDtype='cnt',outMirnStstFl=None, \
aMirnNms=None):
"""
Input: aBckgrndMetrcMskd is a masked array with background metric
values to be randomly sampled. lBckgrndRowLens is a list of lengths
for the rows of aBckgrndMetrcMskd. lBckgrndClmnLens is a list of
lengths for the rows of aBckgrndMetrcMskd. mdlWprmsRows is the
frozen model with parameters to sample lengths in rows
(==lBckgrndRowLens). mdlWprmsClmns is the frozen model with
parameters to sample lengths in columns (==lBckgrndClmnLens).
Optionally, mskRowClmnDflt is an aditional mask for
aBckgrndMetrcMskd (i.e. for values that are going to be excluded
from calculations). intrvlLgth is an interval size to bin the
lengths from the samples. intvlJmp is an integer value to increase
the size of the bins in case the porbability of one of them is inf.
aTrgtMetrcMskd is a masked array with metric values of interest for
the target. statstcTrgt is the statistic value of the
target whose probability is going to be calculated from the
randomized background using a z-score approach. stdTrgt is the
standard deviation of the target data. lenTrgtRows is the number of
rows to be sampled. lenTrgtClms is the number of columns to be
sampled. outPltFl is a file to plot the randomization and
significance of target statistic. numRndmztns is the number of
randomizations to run in the background. maxRndmztns is the maximum
number of iterations to enforce normality. seedAdd is the seed
to run the randomizations. statstc is the statistic to sample from
each randomization to build the normal distribution to test the
significance of the target statistic. If vrbse all log messages are
going to be printed. aPosClmnsToGns is an array of position of gene
to which each column in aBckgrndMetrcMskd and aPosProbClmns is
mapped. aPosRowsToGns is an array of position of gene to which each
row in aBckgrndMetrcMskd and aPosProbRows is mapped. IfaPosRowsToGns
and aPosClmnsToGns are not None calculations are going to be run by
gene. aBckgrndMirnMetrcMskd is a masked array (3D supermatrix) with
an array of metric values for each column and row to be summarized
using the value statstc. aBcrkngdMirnStatstc is an array with the
miRNA metrics summarized for the background. aTrgtMirnStatstcMskd is
an array with the miRNA metrics for the target dataset. mirnDtype is
the datatype of the miRNA metric: {'cnt' for counts and 'scr' for
scores}. outMirnStstFl is the file to save the results of the miRNA
probability calculation. aMirnNms is an array of miRNA names.
Output: ovrAllogRun is the log message of the randomization runs.
p_val is the one-side probability (following fnctn input) that the
target statistic belongs to the normal distributed statistic
obtained from the randomized background.
NOTE: Both, rows and columns are going to be radomized.
NOTE: lBckgrndRowLens must have the same length as rows in
aBckgrndMetrcMskd.
NOTE: lBckgrndClmnLens must have the same length as columns in
aBckgrndMetrcMskd.
NOTE: mskRowClmnDflt mus have the same size as aBckgrndMetrcMskd.
"""
#----------------------------
#Test for inputs
lenRows = len(lBckgrndRowLens)
lenClmns = len(lBckgrndClmnLens)
assert (lenRows,lenClmns) == aBckgrndMetrcMskd.shape
if mskRowClmnDflt is not None:
assert aBckgrndMetrcMskd.shape == mskRowClmnDflt.shape
#----------------------------
#Define variables
if mskRowClmnDflt is not None:
mskRowClmn = ma.mask_or(mskRowClmnDflt,aBckgrndMetrcMskd.mask)
aBckgrndMetrcMskd.mask = mskRowClmn
#----------------------------
#Calculate probability for column and row positions
aBckgrndRowsPosProb = rtrndStrtEndCnt(lBckgrndRowLens,mdlWprmsRows, \
intrvlLgth,intvlJmp)
aBckgrndClmnPosProb = rtrndStrtEndCnt(lBckgrndClmnLens, \
mdlWprmsClmns,intrvlLgth,intvlJmp)
ovrAllogRun,p_val = cmptFullRndmztn(aBckgrndMetrcMskd, \
aBckgrndRowsPosProb,aBckgrndClmnPosProb,aTrgtMetrcMskd,statstcTrgt, \
stdTrgt,lenTrgtRows,lenTrgtClms,outPltFl,numRndmztns,maxRndmztns, \
seedAdd,statstc,vrbse,aPosRowsToGns,aPosClmnsToGns, \
aBckgrndMirnMetrcMskd,aTrgtMirnStatstcMskd,mirnDtype,outMirnStstFl, \
aMirnNms)
return ovrAllogRun,p_val
########################################################
#~ Wrapper for full randomization excluding positions from background
########################################################
def wrprFllRndmztnExcldPos(aBckgrndMetrcMskd,lBckgrndRowLens, \
lBckgrndClmnLens,mdlWprmsRows,mdlWprmsClmns,lExcldBckgrndRowPos=None, \
lExcldBckgrndClmnPos=None,mskRowClmnDflt=None,intrvlLgth=15, \
intvlJmp=5,aTrgtMetrcMskd=None,statstcTrgt=False,stdTrgt=False, \
lenTrgtRows=False,lenTrgtClms=False,outPltFl=False,numRndmztns=1000, \
maxRndmztns=100,seedAdd=False,statstc='mean',vrbse=True, \
aPosClmnsToGns=None,aPosRowsToGns=None,aBckgrndMirnMetrcMskd=None, \
aTrgtMirnStatstcMskd=None,mirnDtype='cnt',outMirnStstFl=None, \
aMirnNms=None):
"""
Input: aBckgrndMetrcMskd is a masked array with background metric
values to be randomly sampled. lBckgrndRowLens is a list of lengths
for the rows of aBckgrndMetrcMskd. lBckgrndClmnLens is a list of
lengths for the columns of aBckgrndMetrcMskd. mdlWprmsRows is the
frozen model with parameters to sample lengths in rows
(==lBckgrndRowLens). mdlWprmsClmns is the frozen model with
parameters to sample lengths in columns (==lBckgrndClmnLens).
Optionally, lExcldBckgrndRowPos is the list of row positions to
exclude from the background. lExcldBckgrndClmnPos is the list of
column positions to exclude from the background. mskRowClmnDflt is
an aditional mask for aBckgrndMetrcMskd (i.e. for values that are
going to be excluded from calculations). intrvlLgth is an interval
size to bin the lengths from the samples. intvlJmp is an integer
value to increase the size of the bins in case the porbability of
one of them is inf. aTrgtMetrcMskd is a masked array with metric
values of interest for the target. statstcTrgt is the statistic
value of the target whose probability is going to be calculated from
the randomized background using a z-score approach. stdTrgt is the
standard deviation of the target data. lenTrgtRows is the number of
rows to be sampled. lenTrgtClms is the number of columns to be
sampled. outPltFl is a file to plot the randomization and
significance of target statistic. numRndmztns is the number of
randomizations to run in the background. maxRndmztns is the maximum
number of iterations to enforce normality. seedAdd is the seed
to run the randomizations. statstc is the statistic to sample from
each randomization to build the normal distribution to test the
significance of the target statistic. If vrbse all log messages are
going to be printed. aPosClmnsToGns is an array of position of gene
to which each column in aBckgrndMetrcMskd and aPosProbClmns is
mapped. aPosRowsToGns is an array of position of gene to which each
row in aBckgrndMetrcMskd and aPosProbRows is mapped. If
aPosRowsToGns and aPosClmnsToGns are not None calculations are going
to be run by gene. aBckgrndMirnMetrcMskd is a masked array (3D
supermatrix) with an array of metric values for each column and row
to be summarized using the value statstc. aBcrkngdMirnStatstc is an
array with the miRNA metrics summarized for the background.
aTrgtMirnStatstcMskd is an array with the miRNA metrics for the
target dataset. mirnDtype is the datatype of the miRNA metric:
{'cnt' for counts and 'scr' for scores}. outMirnStstFl is the file
to save the results of the miRNA probability calculation. aMirnNms
is an array of miRNA names.
Output: ovrAllogRun is the log message of the randomization runs.
p_val is the one-side probability (following fnctn input) that the
target statistic belongs to the normal distributed statistic
obtained from the randomized background.
NOTE: Both, rows and columns are going to be radomized.
NOTE: lBckgrndRowLens must have the same length as rows in
aBckgrndMetrcMskd.
NOTE: lBckgrndClmnLens must have the same length as columns in
aBckgrndMetrcMskd.
NOTE: mskRowClmnDflt mus have the same size as aBckgrndMetrcMskd.
"""
#----------------------------
#Test for inputs
lenRows = len(lBckgrndRowLens)
lenClmns = len(lBckgrndClmnLens)
assert (lenRows,lenClmns) == aBckgrndMetrcMskd.shape
if mskRowClmnDflt is not None:
assert aBckgrndMetrcMskd.shape == mskRowClmnDflt.shape
#----------------------------
#Define variables
if mskRowClmnDflt is not None:
mskRowClmn = ma.mask_or(mskRowClmnDflt,aBckgrndMetrcMskd.mask)
aBckgrndMetrcMskd.mask = mskRowClmn
#----------------------------
#Exclude positions
if lExcldBckgrndRowPos is not None:
sExcldBckgrndRowPos = set(lExcldBckgrndRowPos)
aBckgrndRowPosSlctd = []
for pos in xrange(lenRows):
if pos in sExcldBckgrndRowPos:
sExcldBckgrndRowPos.remove(pos)
else:
aBckgrndRowPosSlctd.append(pos)
aBckgrndRowPosSlctd = array(aBckgrndRowPosSlctd)
lBckgrndRowLens = array(lBckgrndRowLens) \
[aBckgrndRowPosSlctd].tolist()
aBckgrndMetrcMskd = aBckgrndMetrcMskd[aBckgrndRowPosSlctd,:]
if lExcldBckgrndClmnPos is not None:
sExcldBckgrndClmnPos = set(lExcldBckgrndClmnPos)
aBckgrndClmnPosSlctd = []
for pos in xrange(lenClmns):
if pos in sExcldBckgrndClmnPos:
sExcldBckgrndClmnPos.remove(pos)
else:
aBckgrndClmnPosSlctd.append(pos)
aBckgrndClmnPosSlctd = array(aBckgrndClmnPosSlctd)
lBckgrndClmnLens = array(lBckgrndClmnLens) \
[aBckgrndClmnPosSlctd].tolist()
aBckgrndMetrcMskd = aBckgrndMetrcMskd[:,aBckgrndClmnPosSlctd]
#----------------------------
#Calculate probability for column and row positions
aBckgrndRowsPosProb = rtrndStrtEndCnt(lBckgrndRowLens,mdlWprmsRows, \
intrvlLgth,intvlJmp)
aBckgrndClmnPosProb = rtrndStrtEndCnt(lBckgrndClmnLens, \
mdlWprmsClmns,intrvlLgth,intvlJmp)
ovrAllogRun,p_val = cmptFullRndmztn(aBckgrndMetrcMskd, \
aBckgrndRowsPosProb,aBckgrndClmnPosProb,aTrgtMetrcMskd,statstcTrgt, \
stdTrgt,lenTrgtRows,lenTrgtClms,outPltFl,numRndmztns,maxRndmztns, \
seedAdd,statstc,vrbse,aPosRowsToGns,aPosClmnsToGns, \
aBckgrndMirnMetrcMskd,aTrgtMirnStatstcMskd,mirnDtype,outMirnStstFl, \
aMirnNms)
return ovrAllogRun,p_val
########################################################
#~ Wrapper for full randomization including positions from background
########################################################
def wrprFllRndmztnIncldPos(aBckgrndMetrcMskd,lBckgrndRowLens, \
lBckgrndClmnLens,mdlWprmsRows,mdlWprmsClmns,lIncldBckgrndRowPos=None, \
lIncldBckgrndClmnPos=None,mskRowClmnDflt=None,intrvlLgth=15, \
intvlJmp=5,aTrgtMetrcMskd=None,statstcTrgt=False,stdTrgt=False, \
lenTrgtRows=False,lenTrgtClms=False,outPltFl=False,numRndmztns=1000, \
maxRndmztns=100,seedAdd=False,statstc='mean',vrbse=True, \
aPosClmnsToGns=None,aPosRowsToGns=None,aBckgrndMirnMetrcMskd=None, \
aTrgtMirnStatstcMskd=None,mirnDtype='cnt',outMirnStstFl=None, \
aMirnNms=None):
"""
Input: aBckgrndMetrcMskd is a masked array with background metric
values to be randomly sampled. lBckgrndRowLens is a list of lengths
for the rows of aBckgrndMetrcMskd. lBckgrndClmnLens is a list of
lengths for the columns of aBckgrndMetrcMskd. mdlWprmsRows is the
frozen model with parameters to sample lengths in rows
(==lBckgrndRowLens). mdlWprmsClmns is the frozen model with
parameters to sample lengths in columns (==lBckgrndClmnLens).
Optionally, lIncldBckgrndRowPos is the list of row positions to
include from the background. lIncldBckgrndClmnPos is the list of
column positions to include from the background. mskRowClmnDflt is
an aditional mask for aBckgrndMetrcMskd (i.e. for values that are
going to be included from calculations). intrvlLgth is an interval
size to bin the lengths from the samples. intvlJmp is an integer
value to increase the size of the bins in case the porbability of
one of them is inf. aTrgtMetrcMskd is a masked array with metric
values of interest for the target. statstcTrgt is the statistic
value of the target whose probability is going to be calculated from
the randomized background using a z-score approach. stdTrgt is the
standard deviation of the target data. lenTrgtRows is the number of
rows to be sampled. lenTrgtClms is the number of columns to be
sampled. outPltFl is a file to plot the randomization and
significance of target statistic. numRndmztns is the number of
randomizations to run in the background. maxRndmztns is the maximum
number of iterations to enforce normality. seedAdd is the seed
to run the randomizations. statstc is the statistic to sample from
each randomization to build the normal distribution to test the
significance of the target statistic. If vrbse all log messages are
going to be printed. aPosClmnsToGns is an array of position of gene
to which each column in aBckgrndMetrcMskd and aPosProbClmns is
mapped. aPosRowsToGns is an array of position of gene to which each
row in aBckgrndMetrcMskd and aPosProbRows is mapped. If
aPosRowsToGns and aPosClmnsToGns are not None calculations are going
to be run by gene. aBckgrndMirnMetrcMskd is a masked array (3D
supermatrix) with an array of metric values for each column and row
to be summarized using the value statstc. aBcrkngdMirnStatstc is an
array with the miRNA metrics summarized for the background.
aTrgtMirnStatstcMskd is an array with the miRNA metrics for the
target dataset. mirnDtype is the datatype of the miRNA metric:
{'cnt' for counts and 'scr' for scores}. outMirnStstFl is the file
to save the results of the miRNA probability calculation. aMirnNms
is an array of miRNA names.
Output: ovrAllogRun is the log message of the randomization runs.
p_val is the one-side probability (following fnctn input) that the
target statistic belongs to the normal distributed statistic
obtained from the randomized background.
NOTE: Both, rows and columns are going to be radomized.
NOTE: lBckgrndRowLens must have the same length as rows in
aBckgrndMetrcMskd.
NOTE: lBckgrndClmnLens must have the same length as columns in
aBckgrndMetrcMskd.
NOTE: mskRowClmnDflt mus have the same size as aBckgrndMetrcMskd.
"""
#----------------------------
#Test for inputs
lenRows = len(lBckgrndRowLens)
lenClmns = len(lBckgrndClmnLens)
assert (lenRows,lenClmns) == aBckgrndMetrcMskd.shape
if mskRowClmnDflt is not None:
assert aBckgrndMetrcMskd.shape == mskRowClmnDflt.shape
#----------------------------
#Define variables
if mskRowClmnDflt is not None:
mskRowClmn = ma.mask_or(mskRowClmnDflt,aBckgrndMetrcMskd.mask)
aBckgrndMetrcMskd.mask = mskRowClmn
#----------------------------
#Include positions
if lIncldBckgrndRowPos is not None:
aBckgrndRowPosSlctd = array(lIncldBckgrndRowPos)
lBckgrndRowLens = array(lBckgrndRowLens) \
[aBckgrndRowPosSlctd].tolist()
aBckgrndMetrcMskd = aBckgrndMetrcMskd[aBckgrndRowPosSlctd,:]
if lIncldBckgrndClmnPos is not None:
aBckgrndClmnPosSlctd = array(lIncldBckgrndClmnPos)
lBckgrndClmnLens = array(lBckgrndClmnLens) \
[aBckgrndClmnPosSlctd].tolist()
aBckgrndMetrcMskd = aBckgrndMetrcMskd[:,aBckgrndClmnPosSlctd]
#----------------------------
#Calculate probability for column and row positions
aBckgrndRowsPosProb = rtrndStrtEndCnt(lBckgrndRowLens,mdlWprmsRows, \
intrvlLgth,intvlJmp)
aBckgrndClmnPosProb = rtrndStrtEndCnt(lBckgrndClmnLens, \
mdlWprmsClmns,intrvlLgth,intvlJmp)
ovrAllogRun,p_val = cmptFullRndmztn(aBckgrndMetrcMskd, \
aBckgrndRowsPosProb,aBckgrndClmnPosProb,aTrgtMetrcMskd,statstcTrgt, \
stdTrgt,lenTrgtRows,lenTrgtClms,outPltFl,numRndmztns,maxRndmztns, \
seedAdd,statstc,vrbse,aPosRowsToGns,aPosClmnsToGns, \
aBckgrndMirnMetrcMskd,aTrgtMirnStatstcMskd,mirnDtype,outMirnStstFl, \
aMirnNms)
return ovrAllogRun,p_val
########################################################
#~ Wrapper for randomization on columns
########################################################
def wrprClmRndmztn(aBckgrndMetrcMskd,lBckgrndClmnLens,mdlWprmsClmns, \
mskRowClmnDflt=None,intrvlLgth=15,intvlJmp=5,aTrgtMetrcMskd=None, \
statstcTrgt=False,stdTrgt=False,lenTrgtClms=False, \
outPltFl=False,numRndmztns=1000,maxRndmztns=100,seedAdd=False, \
statstc='mean',vrbse=True,aPosClmnsToGns=None,aPosRowsToGns=None, \
aBckgrndMirnMetrcMskd=None,aTrgtMirnStatstcMskd=None,mirnDtype='cnt', \
outMirnStstFl=None,aMirnNms=None):
"""
Input: aBckgrndMetrcMskd is a masked array with background metric
values to be randomly sampled. lBckgrndClmnLens is a list of lengths
for the rows of aBckgrndMetrcMskd. mdlWprmsClmns is the frozen model
with parameters to sample lengths in columns (==lBckgrndClmnLens).
Optionally, mskRowClmnDflt is an aditional maskfor aBckgrndMetrcMskd
(i.e. for values that are going to be excluded from calculations).
intrvlLgth is an interval size to bin the lengths from the samples.
intvlJmp is an integer value to increase the size of the bins in
case the porbability of one of them is inf. aTrgtMetrcMskd is a
masked array with metric values of interest for the target.
statstcTrgt is the statistic value of the target whose probability
is going to be calculated from the randomized background using a
z-score approach. stdTrgt is the standard deviation of the target
data. lenTrgtClms is the number of columns to be sampled. outPltFl
is a file to plot the randomization and significance of target
statistic. numRndmztns is the number of randomizations to run in the
background. maxRndmztns is the maximum number of iterations to
enforce normality. seedAdd is the seed to run the randomizations.
statstc is the statistic to sample from each randomization to build
the normal distribution to test the significance of the target
statistic. If vrbse all log messages are going to be printed.
aPosClmnsToGns is an array of position of gene to which each column
in aBckgrndMetrcMskd and aPosProbClmns is mapped. aPosRowsToGns is
an array of position of gene to which each row in aBckgrndMetrcMskd
and aPosProbRows is mapped. If aPosRowsToGns and aPosClmnsToGns are
not None calculations are going to be run by gene.
aBckgrndMirnMetrcMskd is an array with the miRNA metrics summarized
for the background. aTrgtMirnStatstcMskd is an array with the miRNA
metrics for the target dataset. mirnDtype is the datatype of the
miRNA metric: {'cnt' for counts and 'scr' for scores}. outMirnStstFl
is the file to save the results of the miRNA probability
calculation. aMirnNms is an array of miRNA names.
Output: ovrAllogRun is the log message of the randomization runs.
p_val is the one-side probability (following fnctn input) that the
target statistic belongs to the normal distributed statistic
obtained from the randomized background.
NOTE: Only columns are going to be radomized.
NOTE: lBckgrndClmnLens must have the same length as columns in
aBckgrndMetrcMskd.
NOTE: mskRowClmnDflt mus have the same size as aBckgrndMetrcMskd.
"""
#----------------------------
#Test for inputs
lenClmns = len(lBckgrndClmnLens)
assert lenClmns == aBckgrndMetrcMskd.shape[1]
if mskRowClmnDflt is not None:
assert aBckgrndMetrcMskd.shape == mskRowClmnDflt.shape
#----------------------------
#Define variables
lenTrgtRows = False
if mskRowClmnDflt is not None:
mskRowClmn = ma.mask_or(mskRowClmnDflt,aBckgrndMetrcMskd.mask)
aBckgrndMetrcMskd.mask = mskRowClmn
#----------------------------
#Calculate probability for column and row positions
aBckgrndClmnPosProb = rtrndStrtEndCnt(lBckgrndClmnLens, \
mdlWprmsClmns,intrvlLgth,intvlJmp)
aBckgrndPosRows = array([pos for pos in xrange(aBckgrndMetrcMskd. \
shape[0])])
ovrAllogRun,p_val = cmptClmRndmztn(aBckgrndMetrcMskd, \
aBckgrndPosRows,aBckgrndClmnPosProb,aTrgtMetrcMskd,statstcTrgt, \
stdTrgt,lenTrgtRows,lenTrgtClms,outPltFl,numRndmztns,maxRndmztns, \
seedAdd,statstc,vrbse,aPosRowsToGns,aPosClmnsToGns, \
aBckgrndMirnMetrcMskd,aTrgtMirnStatstcMskd,mirnDtype,outMirnStstFl, \
aMirnNms)
return ovrAllogRun,p_val
########################################################
#~ Wrapper for randomization on columns excluding positions from
# background
########################################################
def wrprClmRndmztnExcldPos(aBckgrndMetrcMskd,lBckgrndClmnLens, \
mdlWprmsClmns,lExcldBckgrndRowPos=None,lExcldBckgrndClmnPos=None, \
mskRowClmnDflt=None,intrvlLgth=15,intvlJmp=5,aTrgtMetrcMskd=None, \
statstcTrgt=False,stdTrgt=False,lenTrgtClms=False,outPltFl=False, \
numRndmztns=1000,maxRndmztns=100,seedAdd=False,statstc='mean', \
vrbse=True,aPosClmnsToGns=None,aPosRowsToGns=None, \
aBckgrndMirnMetrcMskd=None,aTrgtMirnStatstcMskd=None,mirnDtype='cnt', \
outMirnStstFl=None,aMirnNms=None):
"""
Input: aBckgrndMetrcMskd is a masked array with background metric
values to be randomly sampled. lBckgrndClmnLens is a list of lengths
for the rows of aBckgrndMetrcMskd. mdlWprmsClmns is the frozen model
with parameters to sample lengths in columns (==lBckgrndClmnLens).
Optionally, lExcldBckgrndRowPos is the list of row positions to
exclude from the background. lExcldBckgrndClmnPos is the list of
column positions to exclude from the background. mskRowClmnDflt is
an aditional maskfor aBckgrndMetrcMskd (i.e. for values that are
going to be excluded from calculations). intrvlLgth is an interval
size to bin the lengths from the samples. intvlJmp is an integer
value to increase the size of the bins in case the porbability of
one of them is inf. aTrgtMetrcMskd is a masked array with metric
values of interest for the target. statstcTrgt is the statistic
value of the target whose probability is going to be calculated from
the randomized background using a z-score approach. stdTrgt is the
standard deviation of the target data. lenTrgtClms is the number of
columns to be sampled. outPltFl is a file to plot the randomization
and significance of target statistic. numRndmztns is the number of
randomizations to run in the background. maxRndmztns is the maximum
number of iterations to enforce normality. seedAdd is the seed to
run the randomizations. statstc is the statistic to sample from each
randomization to build the normal distribution to test the
significance of the target statistic. If vrbse all log messages are
going to be printed. aPosClmnsToGns is an array of position of gene
to which each column in aBckgrndMetrcMskd and aPosProbClmns is
mapped. aPosRowsToGns is an array of position of gene to which each
row in aBckgrndMetrcMskd and aPosProbRows is mapped. IfaPosRowsToGns
and aPosClmnsToGns are not None calculations are going to be run by
gene. aBckgrndMirnMetrcMskd is an array with the miRNA metrics
summarized for the background. aTrgtMirnStatstcMskd is an array with
the miRNA metrics for the target dataset. mirnDtype is the datatype
of the miRNA metric: {'cnt' for counts and 'scr' for scores}.
outMirnStstFl is the file to save the results of the miRNA
probability calculation. aMirnNms is an array of miRNA names.
Output: ovrAllogRun is the log message of the randomization runs.
p_val is the one-side probability (following fnctn input) that the
target statistic belongs to the normal distributed statistic
obtained from the randomized background.
NOTE: Only columns are going to be radomized.
NOTE: lBckgrndClmnLens must have the same length as columns in
aBckgrndMetrcMskd.
NOTE: mskRowClmnDflt mus have the same size as aBckgrndMetrcMskd.
"""
#----------------------------
#Test for inputs
lenClmns = len(lBckgrndClmnLens)
assert lenClmns == aBckgrndMetrcMskd.shape[1]
if mskRowClmnDflt is not None:
assert aBckgrndMetrcMskd.shape == mskRowClmnDflt.shape
#----------------------------
#Define variables
lenTrgtRows = False
if mskRowClmnDflt is not None:
mskRowClmn = ma.mask_or(mskRowClmnDflt,aBckgrndMetrcMskd.mask)
aBckgrndMetrcMskd.mask = mskRowClmn
#----------------------------
#Exclude positions
if lExcldBckgrndRowPos is not None:
sExcldBckgrndRowPos = set(lExcldBckgrndRowPos)
aBckgrndRowPosSlctd = []
for pos in xrange(lenRows):
if pos in sExcldBckgrndRowPos:
sExcldBckgrndRowPos.remove(pos)
else:
aBckgrndRowPosSlctd.append(pos)
aBckgrndRowPosSlctd = array(aBckgrndRowPosSlctd)
aBckgrndMetrcMskd = aBckgrndMetrcMskd[aBckgrndRowPosSlctd,:]
if lExcldBckgrndClmnPos is not None:
sExcldBckgrndClmnPos = set(lExcldBckgrndClmnPos)
aBckgrndClmnPosSlctd = []
for pos in xrange(lenClmns):
if pos in sExcldBckgrndClmnPos:
sExcldBckgrndClmnPos.remove(pos)
else:
aBckgrndClmnPosSlctd.append(pos)
aBckgrndClmnPosSlctd = array(aBckgrndClmnPosSlctd)
lBckgrndClmnLens = array(lBckgrndClmnLens) \
[aBckgrndClmnPosSlctd].tolist()
aBckgrndMetrcMskd = aBckgrndMetrcMskd[:,aBckgrndClmnPosSlctd]
#----------------------------
#Calculate probability for column and row positions
aBckgrndClmnPosProb = rtrndStrtEndCnt(lBckgrndClmnLens, \
mdlWprmsClmns,intrvlLgth,intvlJmp)
aBckgrndPosRows = array([pos for pos in xrange(aBckgrndMetrcMskd. \
shape[0])])
ovrAllogRun,p_val = cmptClmRndmztn(aBckgrndMetrcMskd, \
aBckgrndPosRows,aBckgrndClmnPosProb,aTrgtMetrcMskd,statstcTrgt, \
stdTrgt,lenTrgtRows,lenTrgtClms,outPltFl,numRndmztns,maxRndmztns, \
seedAdd,statstc,vrbse,aPosRowsToGns,aPosClmnsToGns, \
aBckgrndMirnMetrcMskd,aTrgtMirnStatstcMskd,mirnDtype,outMirnStstFl, \
aMirnNms)
return ovrAllogRun,p_val
########################################################
#~ Wrapper for randomization on columns including positions from
# background
########################################################
def wrprClmRndmztnIncldPos(aBckgrndMetrcMskd,lBckgrndClmnLens, \
mdlWprmsClmns,lIncldBckgrndRowPos=None,lIncldBckgrndClmnPos=None, \
mskRowClmnDflt=None,intrvlLgth=15,intvlJmp=5,aTrgtMetrcMskd=None, \
statstcTrgt=False,stdTrgt=False,lenTrgtClms=False,outPltFl=False, \
numRndmztns=1000,maxRndmztns=100,seedAdd=False,statstc='mean', \
vrbse=True,aPosClmnsToGns=None,aPosRowsToGns=None, \
aBckgrndMirnMetrcMskd=None,aTrgtMirnStatstcMskd=None, \
mirnDtype='cnt',outMirnStstFl=None,aMirnNms=None):
"""
Input: aBckgrndMetrcMskd is a masked array with background metric
values to be randomly sampled. lBckgrndClmnLens is a list of lengths
for the rows of aBckgrndMetrcMskd. mdlWprmsClmns is the frozen model
with parameters to sample lengths in columns (==lBckgrndClmnLens).
Optionally, lIncldBckgrndRowPos is the list of row positions to
include from the background. lIncldBckgrndClmnPos is the list of
column positions to include from the background. mskRowClmnDflt is
an aditional maskfor aBckgrndMetrcMskd (i.e. for values that are
going to be included from calculations). intrvlLgth is an interval
size to bin the lengths from the samples. intvlJmp is an integer
value to increase the size of the bins in case the porbability of
one of them is inf. aTrgtMetrcMskd is a masked array with metric
values of interest for the target. statstcTrgt is the statistic
value of the target whose probability is going to be calculated from
the randomized background using a z-score approach. stdTrgt is the
standard deviation of the target data. lenTrgtClms is the number of
columns to be sampled. outPltFl is a file to plot the randomization
and significance of target statistic. numRndmztns is the number of
randomizations to run in the background. maxRndmztns is the maximum
number of iterations to enforce normality. seedAdd is the seed to
run the randomizations. statstc is the statistic to sample from each
randomization to build the normal distribution to test the
significance of the target statistic. If vrbse all log messages are
going to be printed. aPosClmnsToGns is an array of position of gene
to which each column in aBckgrndMetrcMskd and aPosProbClmns is
mapped. aPosRowsToGns is an array of position of gene to which each
row in aBckgrndMetrcMskd and aPosProbRows is mapped. IfaPosRowsToGns
and aPosClmnsToGns are not None calculations are going to be run by
gene. aBckgrndMirnMetrcMskd is an array with the miRNA metrics
summarized for the background. aTrgtMirnStatstcMskd is an array with
the miRNA metrics for the target dataset. mirnDtype is the datatype
of the miRNA metric: {'cnt' for counts and 'scr' for scores}.
outMirnStstFl is the file to save the results of the miRNA
probability calculation. aMirnNms is an array of miRNA names.
Output: ovrAllogRun is the log message of the randomization runs.
p_val is the one-side probability (following fnctn input) that the
target statistic belongs to the normal distributed statistic
obtained from the randomized background.
NOTE: Only columns are going to be radomized.
NOTE: lBckgrndClmnLens must have the same length as columns in
aBckgrndMetrcMskd.
NOTE: mskRowClmnDflt mus have the same size as aBckgrndMetrcMskd.
"""
#----------------------------
#Test for inputs
lenClmns = len(lBckgrndClmnLens)
assert lenClmns == aBckgrndMetrcMskd.shape[1]
if mskRowClmnDflt is not None:
assert aBckgrndMetrcMskd.shape == mskRowClmnDflt.shape
if aPosClmnsToGns is not None and aPosRowsToGns is not None:
assert aBckgrndMetrcMskd.shape == (aPosRowsToGns,aPosClmnsToGns)
#----------------------------
#Define variables
lenTrgtRows = False
if mskRowClmnDflt is not None:
mskRowClmn = ma.mask_or(mskRowClmnDflt,aBckgrndMetrcMskd.mask)
aBckgrndMetrcMskd.mask = mskRowClmn
#----------------------------
#Include positions
if lIncldBckgrndRowPos is not None:
aBckgrndRowPosSlctd = array(lIncldBckgrndRowPos)
aBckgrndMetrcMskd = aBckgrndMetrcMskd[aBckgrndRowPosSlctd,:]
if lIncldBckgrndClmnPos is not None:
aBckgrndClmnPosSlctd = array(lIncldBckgrndClmnPos)
lBckgrndClmnLens = array(lBckgrndClmnLens) \
[aBckgrndClmnPosSlctd].tolist()
aBckgrndMetrcMskd = aBckgrndMetrcMskd[:,aBckgrndClmnPosSlctd]
#----------------------------
#Calculate probability for column and row positions
aBckgrndClmnPosProb = rtrndStrtEndCnt(lBckgrndClmnLens, \
mdlWprmsClmns,intrvlLgth,intvlJmp)
aBckgrndPosRows = array([pos for pos in xrange(aBckgrndMetrcMskd. \
shape[0])])
ovrAllogRun,p_val = cmptClmRndmztn(aBckgrndMetrcMskd, \
aBckgrndPosRows,aBckgrndClmnPosProb,aTrgtMetrcMskd,statstcTrgt, \
stdTrgt,lenTrgtRows,lenTrgtClms,outPltFl,numRndmztns,maxRndmztns, \
seedAdd,statstc,vrbse,aPosRowsToGns,aPosClmnsToGns, \
aBckgrndMirnMetrcMskd,aTrgtMirnStatstcMskd,mirnDtype,outMirnStstFl, \
aMirnNms)
return ovrAllogRun,p_val
########################################################
#~ Wrapper for randomization on rows
########################################################
def wrprRowRndmztn(aBckgrndMetrcMskd,lBckgrndRowLens,mdlWprmsRows, \
mskRowClmnDflt=None,intrvlLgth=15,intvlJmp=5,aTrgtMetrcMskd=None, \
statstcTrgt=False,stdTrgt=False,lenTrgtRows=False,outPltFl=False, \
numRndmztns=1000,maxRndmztns=100,seedAdd=False,statstc='mean', \
vrbse=True,aPosClmnsToGns=None,aPosRowsToGns=None, \
aBckgrndMirnMetrcMskd=None,aTrgtMirnStatstcMskd=None, \
mirnDtype='cnt',outMirnStstFl=None,aMirnNms=None):
"""
Input: aBckgrndMetrcMskd is a masked array with background metric
values to be randomly sampled. lBckgrndRowLens is a list of lengths
for the rows of aBckgrndMetrcMskd. mdlWprmsRows is the frozen model
with parameters to sample lengths in rows (==lBckgrndRowLens).
Optionally, mskRowClmnDflt is an aditional mask for
aBckgrndMetrcMskd (i.e. for values that are going to be excluded
from calculations). intrvlLgth is an interval size to bin the
lengths from the samples. intvlJmp is an integer value to increase
the size of the bins in case the porbability of one of them is inf.
aTrgtMetrcMskd is a masked array with metric values of interest for
the target. statstcTrgt is the statistic value of the target whose
probability is going to be calculated from the randomized background
using a z-score approach. stdTrgt is the standard deviation of the
target data. lenTrgtRows is the number of rows to be sampled.
lenTrgtClms is the number of columns to be sampled. outPltFl is a
file to plot the randomization and significance of target statistic.
numRndmztns is the number of randomizations to run in the background.
maxRndmztns is the maximum number of iterations to enforce normality.
seedAdd is the seed to run the randomizations. statstc is the
statistic to sample from each randomization to build the normal
distribution to test the significance of the target statistic. If
vrbse all log messages are going to be printed. aPosClmnsToGns is an
array of position of gene to which each column in aBckgrndMetrcMskd
and aPosProbClmns is mapped. aPosRowsToGns is an array of position
of gene to which each row in aBckgrndMetrcMskd and aPosProbRows is
mapped. If aPosRowsToGns and aPosClmnsToGns arenot None calculations
are going to be run by gene. aBckgrndMirnMetrcMskd is an array with
the miRNA metrics summarized for the background.
aTrgtMirnStatstcMskd is an array with the miRNA metrics for the
target dataset. mirnDtype is the datatype of the miRNA metric:
{'cnt' for counts and 'scr' for scores}. outMirnStstFl is the file
to save the results of the miRNA probability calculation. aMirnNms
is an array of miRNA names.
Output: ovrAllogRun is the log message of the randomization runs.
p_val is the one-side probability (following fnctn input) that the
target statistic belongs to the normal distributed statistic
obtained from the randomized background.
NOTE: Only rows are going to be radomized.
NOTE: lBckgrndRowLens must have the same length as rows in
aBckgrndMetrcMskd.
NOTE: mskRowClmnDflt mus have the same size as aBckgrndMetrcMskd.
"""
#----------------------------
#Test for inputs
lenRows = len(lBckgrndRowLens)
assert lenRows == aBckgrndMetrcMskd.shape[0]
if mskRowClmnDflt is not None:
assert aBckgrndMetrcMskd.shape == mskRowClmnDflt.shape
#----------------------------
#Define variables
lenTrgtClms = False
if mskRowClmnDflt is not None:
mskRowClmn = ma.mask_or(mskRowClmnDflt,aBckgrndMetrcMskd.mask)
aBckgrndMetrcMskd.mask = mskRowClmn
#----------------------------
#Calculate probability for column and row positions
aBckgrndRowsPosProb = rtrndStrtEndCnt(lBckgrndRowLens,mdlWprmsRows, \
intrvlLgth,intvlJmp)
aBckgrndPosClmns = array([pos for pos in xrange(aBckgrndMetrcMskd. \
shape[1])])
ovrAllogRun,p_val = cmptRowRndmztn(aBckgrndMetrcMskd, \
aBckgrndRowsPosProb,aBckgrndPosClmns,aTrgtMetrcMskd,statstcTrgt, \
stdTrgt,lenTrgtRows,lenTrgtClms,outPltFl,numRndmztns,maxRndmztns, \
seedAdd,statstc,vrbse,aPosRowsToGns,aPosClmnsToGns, \
aBckgrndMirnMetrcMskd,aTrgtMirnStatstcMskd,mirnDtype,outMirnStstFl, \
aMirnNms)
return ovrAllogRun,p_val
########################################################
#~ Wrapper for randomization on rows excluding positions from background
########################################################
def wrprRowRndmztnExcldPos(aBckgrndMetrcMskd,lBckgrndRowLens, \
mdlWprmsRows,lExcldBckgrndRowPos=None,lExcldBckgrndClmnPos=None, \
mskRowClmnDflt=None,intrvlLgth=15,intvlJmp=5,aTrgtMetrcMskd=None, \
statstcTrgt=False,stdTrgt=False,lenTrgtRows=False,outPltFl=False, \
numRndmztns=1000,maxRndmztns=100,seedAdd=False,statstc='mean', \
vrbse=True,aPosClmnsToGns=None,aPosRowsToGns=None, \
aBckgrndMirnMetrcMskd=None,aTrgtMirnStatstcMskd=None, \
mirnDtype='cnt',outMirnStstFl=None,aMirnNms=None):
"""
Input: aBckgrndMetrcMskd is a masked array with background metric
values to be randomly sampled. lBckgrndRowLens is a list of lengths
for the rows of aBckgrndMetrcMskd. mdlWprmsRows is the frozen model
with parameters to sample lengths in rows (==lBckgrndRowLens).
lExcldBckgrndRowPos is the list of row positions to exclude from the
background. lExcldBckgrndClmnPos is the list of column positions to
exclude from the background. Optionally, mskRowClmnDflt is an
aditional mask for aBckgrndMetrcMskd (i.e. for values that are going
to be excluded from calculations). intrvlLgth is an interval size to
bin the lengths from the samples. intvlJmp is an integer value to
increase the size of the bins in case the porbability of one of them
is inf. aTrgtMetrcMskd is a masked array with metric values of
interest for the target. statstcTrgt is the statistic value of the
target whose probability is going to be calculated from the
randomized background using a z-score approach. stdTrgt is the
standard deviation of the target data. lenTrgtRows is the number of
rows to be sampled. lenTrgtClms is the number of columns to be
sampled. outPltFl is a file to plot the randomization and
significance of target statistic. numRndmztns is the number of
randomizations to run in the background. maxRndmztns is the maximum
number of iterations to enforce normality. seedAdd is the seed to
run the randomizations. statstc is the statistic to sample from each
randomization to build the normal distribution to test the
significance of the target statistic. If vrbse all log messages are
going to be printed. aPosClmnsToGns is an array of position of gene
to which each column in aBckgrndMetrcMskd and aPosProbClmns is
mapped. aPosRowsToGns is an array of position of gene to which each
row in aBckgrndMetrcMskd and aPosProbRows is mapped. If
aPosRowsToGns and aPosClmnsToGns are not None calculations are going
to be run by gene. aBckgrndMirnMetrcMskd is an array with the miRNA
metrics summarized for the background. aTrgtMirnStatstcMskd is an
array with the miRNA metrics for the target dataset. mirnDtype is
the datatype of the miRNA metric: {'cnt' for counts and 'scr' for
scores}. outMirnStstFl is the file to save the results of the miRNA
probability calculation. aMirnNms is an array of miRNA names.
Output: ovrAllogRun is the log message of the randomization runs.
p_val is the one-side probability (following fnctn input) that the
target statistic belongs to the normal distributed statistic
obtained from the randomized background.
NOTE: Only rows are going to be radomized.
NOTE: lBckgrndRowLens must have the same length as rows in
aBckgrndMetrcMskd.
NOTE: mskRowClmnDflt mus have the same size as aBckgrndMetrcMskd.
"""
#----------------------------
#Test for inputs
lenRows = len(lBckgrndRowLens)
assert lenRows == aBckgrndMetrcMskd.shape[0]
if mskRowClmnDflt is not None:
assert aBckgrndMetrcMskd.shape == mskRowClmnDflt.shape
#----------------------------
#Define variables
lenTrgtClms = False
if mskRowClmnDflt is not None:
mskRowClmn = ma.mask_or(mskRowClmnDflt,aBckgrndMetrcMskd.mask)
aBckgrndMetrcMskd.mask = mskRowClmn
#----------------------------
#Exclude positions
if lExcldBckgrndRowPos is not None:
sExcldBckgrndRowPos = set(lExcldBckgrndRowPos)
aBckgrndRowPosSlctd = []
for pos in xrange(lenRows):
if pos in sExcldBckgrndRowPos:
sExcldBckgrndRowPos.remove(pos)
else:
aBckgrndRowPosSlctd.append(pos)
aBckgrndRowPosSlctd = array(aBckgrndRowPosSlctd)
lBckgrndRowLens = array(lBckgrndRowLens) \
[aBckgrndRowPosSlctd].tolist()
aBckgrndMetrcMskd = aBckgrndMetrcMskd[aBckgrndRowPosSlctd,:]
if lExcldBckgrndClmnPos is not None:
sExcldBckgrndClmnPos = set(lExcldBckgrndClmnPos)
aBckgrndClmnPosSlctd = []
for pos in xrange(lenClmns):
if pos in sExcldBckgrndClmnPos:
sExcldBckgrndClmnPos.remove(pos)
else:
aBckgrndClmnPosSlctd.append(pos)
aBckgrndClmnPosSlctd = array(aBckgrndClmnPosSlctd)
aBckgrndMetrcMskd = aBckgrndMetrcMskd[:,aBckgrndClmnPosSlctd]
#----------------------------
#Calculate probability for column and row positions
aBckgrndRowsPosProb = rtrndStrtEndCnt(lBckgrndRowLens,mdlWprmsRows, \
intrvlLgth,intvlJmp)
aBckgrndPosClmns = array([pos for pos in xrange(aBckgrndMetrcMskd. \
shape[1])])
ovrAllogRun,p_val = cmptRowRndmztn(aBckgrndMetrcMskd, \
aBckgrndRowsPosProb,aBckgrndPosClmns,aTrgtMetrcMskd,statstcTrgt, \
stdTrgt,lenTrgtRows,lenTrgtClms,outPltFl,numRndmztns,maxRndmztns, \
seedAdd,statstc,vrbse,aPosRowsToGns,aPosClmnsToGns, \
aBckgrndMirnMetrcMskd,aTrgtMirnStatstcMskd,mirnDtype,outMirnStstFl, \
aMirnNms)
return ovrAllogRun,p_val
########################################################
#~ Wrapper for randomization on rows including positions from background
########################################################
def wrprRowRndmztnIncldPos(aBckgrndMetrcMskd,lBckgrndRowLens, \
mdlWprmsRows,lIncldBckgrndRowPos=None,lIncldBckgrndClmnPos=None, \
mskRowClmnDflt=None,intrvlLgth=15,intvlJmp=5,aTrgtMetrcMskd=None, \
statstcTrgt=False,stdTrgt=False,lenTrgtRows=False,outPltFl=False, \
numRndmztns=1000,maxRndmztns=100,seedAdd=False,statstc='mean', \
vrbse=True,aPosClmnsToGns=None,aPosRowsToGns=None, \
aBckgrndMirnMetrcMskd=None,aTrgtMirnStatstcMskd=None, \
mirnDtype='cnt',outMirnStstFl=None,aMirnNms=None):
"""
Input: aBckgrndMetrcMskd is a masked array with background metric
values to be randomly sampled. lBckgrndRowLens is a list of lengths
for the rows of aBckgrndMetrcMskd. mdlWprmsRows is the frozen model
with parameters to sample lengths in rows (==lBckgrndRowLens).
lIncldBckgrndRowPos is the list of row positions to include from the
background. lIncldBckgrndClmnPos is the list of column positions to
include from the background. Optionally, mskRowClmnDflt is an
aditional mask for aBckgrndMetrcMskd (i.e. for values that are going
to be included from calculations). intrvlLgth is an interval size to
bin the lengths from the samples. intvlJmp is an integer value to
increase the size of the bins in case the porbability of one of them
is inf. aTrgtMetrcMskd is a masked array with metric values of
interest for the target. statstcTrgt is the statistic value of the
target whose probability is going to be calculated from the
randomized background using a z-score approach. stdTrgt is the
standard deviation of the target data. lenTrgtRows is the number of
rows to be sampled. lenTrgtClms is the number of columns to be
sampled. outPltFl is a file to plot the randomization and
significance of target statistic. numRndmztns is the number of
randomizations to run in the background. maxRndmztns is the maximum
number of iterations to enforce normality. seedAdd is the seed to
run the randomizations. statstc is the statistic to sample from each
randomization to build the normal distribution to test the
significance of the target statistic. If vrbse all log messages are
going to be printed. aPosClmnsToGns is an array of position of gene
to which each column in aBckgrndMetrcMskd and aPosProbClmns is
mapped. aPosRowsToGns is an array of position of gene to which each
row in aBckgrndMetrcMskd and aPosProbRows is mapped. IfaPosRowsToGns
and aPosClmnsToGns are not None calculations are going to be run by
gene. aBckgrndMirnMetrcMskd is an array with the miRNA metrics
summarized for the background. aTrgtMirnStatstcMskd is an array
with the miRNA metrics for the target dataset. mirnDtype is the
datatype of the miRNA metric: {'cnt' for counts and 'scr' for
scores}. outMirnStstFl is the file to save the results of the miRNA
probability calculation. aMirnNms is an array of miRNA names.
Output: ovrAllogRun is the log message of the randomization runs.
p_val is the one-side probability (following fnctn input) that the
target statistic belongs to the normal distributed statistic
obtained from the randomized background.
NOTE: Only rows are going to be radomized.
NOTE: lBckgrndRowLens must have the same length as rows in
aBckgrndMetrcMskd.
NOTE: mskRowClmnDflt mus have the same size as aBckgrndMetrcMskd.
"""
#----------------------------
#Test for inputs
lenRows = len(lBckgrndRowLens)
assert lenRows == aBckgrndMetrcMskd.shape[0]
if mskRowClmnDflt is not None:
assert aBckgrndMetrcMskd.shape == mskRowClmnDflt.shape
if aPosClmnsToGns is not None and aPosRowsToGns is not None:
assert aBckgrndMetrcMskd.shape == (aPosRowsToGns,aPosClmnsToGns)
#----------------------------
#Define variables
lenTrgtClms = False
if mskRowClmnDflt is not None:
mskRowClmn = ma.mask_or(mskRowClmnDflt,aBckgrndMetrcMskd.mask)
aBckgrndMetrcMskd.mask = mskRowClmn
#----------------------------
#Include positions
if lIncldBckgrndRowPos is not None:
aBckgrndRowPosSlctd = array(lIncldBckgrndRowPos)
lBckgrndRowLens = array(lBckgrndRowLens) \
[aBckgrndRowPosSlctd].tolist()
aBckgrndMetrcMskd = aBckgrndMetrcMskd[aBckgrndRowPosSlctd,:]
if lIncldBckgrndClmnPos is not None:
aBckgrndClmnPosSlctd = array(lIncldBckgrndClmnPos)
aBckgrndMetrcMskd = aBckgrndMetrcMskd[:,aBckgrndClmnPosSlctd]
#----------------------------
#Calculate probability for column and row positions
aBckgrndRowsPosProb = rtrndStrtEndCnt(lBckgrndRowLens,mdlWprmsRows, \
intrvlLgth,intvlJmp)
aBckgrndPosClmns = array([pos for pos in xrange(aBckgrndMetrcMskd. \
shape[1])])
ovrAllogRun,p_val = cmptRowRndmztn(aBckgrndMetrcMskd, \
aBckgrndRowsPosProb,aBckgrndPosClmns,aTrgtMetrcMskd,statstcTrgt, \
stdTrgt,lenTrgtRows,lenTrgtClms,outPltFl,numRndmztns,maxRndmztns, \
seedAdd,statstc,vrbse,aPosRowsToGns,aPosClmnsToGns, \
aBckgrndMirnMetrcMskd,aTrgtMirnStatstcMskd,mirnDtype,outMirnStstFl, \
aMirnNms)
return ovrAllogRun,p_val
| gpl-3.0 |
aflaxman/scikit-learn | sklearn/tests/test_grid_search.py | 25 | 29670 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import warnings
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import Ridge
from sklearn.exceptions import FitFailedWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler)
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
def transform(self, X):
return X - self.foo_param
def inverse_transform(self, X):
return X + self.foo_param
predict_proba = predict
decision_function = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
def test_transform_inverse_transform_round_trip():
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
grid_search.fit(X, y)
X_round_trip = grid_search.inverse_transform(grid_search.transform(X))
assert_array_equal(X, X_round_trip)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# ChangedBehaviourWarning occurred previously (prior to #9005)
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_no_warnings(search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_no_warnings(search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
def test_classes__property():
# Test that classes_ property matches best_esimator_.classes_
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
Cs = [.1, 1, 10]
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
grid_search.fit(X, y)
assert_array_equal(grid_search.best_estimator_.classes_,
grid_search.classes_)
# Test that regressors do not have a classes_ attribute
grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
| bsd-3-clause |
ktnyt/chainer | examples/vae/train_vae.py | 2 | 5546 | #!/usr/bin/env python
"""Chainer example: train a VAE on MNIST
"""
import argparse
import os
import numpy as np
import chainer
from chainer import training
from chainer.training import extensions
import net
def main():
parser = argparse.ArgumentParser(description='Chainer example: VAE')
parser.add_argument('--initmodel', '-m', default='',
help='Initialize the model from given file')
parser.add_argument('--resume', '-r', default='',
help='Resume the optimization from snapshot')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='results',
help='Directory to output the result')
parser.add_argument('--epoch', '-e', default=100, type=int,
help='number of epochs to learn')
parser.add_argument('--dim-z', '-z', default=20, type=int,
help='dimention of encoded vector')
parser.add_argument('--dim-h', default=500, type=int,
help='dimention of hidden layer')
parser.add_argument('--beta', default=1.0, type=float,
help='Regularization coefficient for '
'the second term of ELBO bound')
parser.add_argument('--k', '-k', default=1, type=int,
help='Number of Monte Carlo samples used in '
'encoded vector')
parser.add_argument('--binary', action='store_true',
help='Use binarized MNIST')
parser.add_argument('--batch-size', '-b', type=int, default=100,
help='learning minibatch size')
parser.add_argument('--test', action='store_true',
help='Use tiny datasets for quick tests')
args = parser.parse_args()
print('GPU: {}'.format(args.gpu))
print('# dim z: {}'.format(args.dim_z))
print('# Minibatch-size: {}'.format(args.batch_size))
print('# epoch: {}'.format(args.epoch))
print('')
# Prepare VAE model, defined in net.py
encoder = net.make_encoder(784, args.dim_z, args.dim_h)
decoder = net.make_decoder(784, args.dim_z, args.dim_h,
binary_check=args.binary)
prior = net.make_prior(args.dim_z)
avg_elbo_loss = net.AvgELBOLoss(encoder, decoder, prior,
beta=args.beta, k=args.k)
if args.gpu >= 0:
avg_elbo_loss.to_gpu(args.gpu)
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(avg_elbo_loss)
# Initialize
if args.initmodel:
chainer.serializers.load_npz(args.initmodel, avg_elbo_loss)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist(withlabel=False)
if args.binary:
# Binarize dataset
train = (train >= 0.5).astype(np.float32)
test = (test >= 0.5).astype(np.float32)
if args.test:
train, _ = chainer.datasets.split_dataset(train, 100)
test, _ = chainer.datasets.split_dataset(test, 100)
train_iter = chainer.iterators.SerialIterator(train, args.batch_size)
test_iter = chainer.iterators.SerialIterator(test, args.batch_size,
repeat=False, shuffle=False)
# Set up an updater. StandardUpdater can explicitly specify a loss function
# used in the training with 'loss_func' option
updater = training.updaters.StandardUpdater(
train_iter, optimizer,
device=args.gpu, loss_func=avg_elbo_loss)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.Evaluator(
test_iter, avg_elbo_loss, device=args.gpu))
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/reconstr', 'main/kl_penalty', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
# Visualize the results
def save_images(x, filename):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(3, 3, figsize=(9, 9), dpi=100)
for ai, xi in zip(ax.flatten(), x):
ai.imshow(xi.reshape(28, 28))
fig.savefig(filename)
avg_elbo_loss.to_cpu()
train_ind = [1, 3, 5, 10, 2, 0, 13, 15, 17]
x = chainer.Variable(np.asarray(train[train_ind]))
with chainer.using_config('train', False), chainer.no_backprop_mode():
x1 = decoder(encoder(x).mean, inference=True).mean
save_images(x.array, os.path.join(args.out, 'train'))
save_images(x1.array, os.path.join(args.out, 'train_reconstructed'))
test_ind = [3, 2, 1, 18, 4, 8, 11, 17, 61]
x = chainer.Variable(np.asarray(test[test_ind]))
with chainer.using_config('train', False), chainer.no_backprop_mode():
x1 = decoder(encoder(x).mean, inference=True).mean
save_images(x.array, os.path.join(args.out, 'test'))
save_images(x1.array, os.path.join(args.out, 'test_reconstructed'))
# draw images from randomly sampled z
z = prior().sample(9)
x = decoder(z, inference=True).mean
save_images(x.array, os.path.join(args.out, 'sampled'))
if __name__ == '__main__':
main()
| mit |
RobertABT/heightmap | build/matplotlib/lib/mpl_toolkits/axes_grid1/axes_grid.py | 4 | 30165 | import matplotlib.cbook as cbook
import matplotlib.pyplot as plt
import matplotlib.axes as maxes
#import matplotlib.colorbar as mcolorbar
import colorbar as mcolorbar
import matplotlib as mpl
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.ticker as ticker
from matplotlib.gridspec import SubplotSpec, GridSpec
from axes_divider import Size, SubplotDivider, LocatableAxes, Divider
#import numpy as np
def _tick_only(ax, bottom_on, left_on):
bottom_off = not bottom_on
left_off = not left_on
# [l.set_visible(bottom_off) for l in ax.get_xticklabels()]
# [l.set_visible(left_off) for l in ax.get_yticklabels()]
# ax.xaxis.label.set_visible(bottom_off)
# ax.yaxis.label.set_visible(left_off)
ax.axis["bottom"].toggle(ticklabels=bottom_off, label=bottom_off)
ax.axis["left"].toggle(ticklabels=left_off, label=left_off)
class Colorbar(mcolorbar.Colorbar):
def _config_axes_deprecated(self, X, Y):
'''
Make an axes patch and outline.
'''
ax = self.ax
ax.set_frame_on(False)
ax.set_navigate(False)
xy = self._outline(X, Y)
ax.update_datalim(xy)
ax.set_xlim(*ax.dataLim.intervalx)
ax.set_ylim(*ax.dataLim.intervaly)
self.outline = mlines.Line2D(xy[:, 0], xy[:, 1], color=mpl.rcParams['axes.edgecolor'],
linewidth=mpl.rcParams['axes.linewidth'])
ax.add_artist(self.outline)
self.outline.set_clip_box(None)
self.outline.set_clip_path(None)
c = mpl.rcParams['axes.facecolor']
self.patch = mpatches.Polygon(xy, edgecolor=c,
facecolor=c,
linewidth=0.01,
zorder=-1)
ax.add_artist(self.patch)
ticks, ticklabels, offset_string = self._ticker()
if self.orientation == 'vertical':
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.yaxis.get_major_formatter().set_offset_string(offset_string)
else:
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.get_major_formatter().set_offset_string(offset_string)
class CbarAxesBase(object):
def colorbar(self, mappable, **kwargs):
locator=kwargs.pop("locator", None)
if locator is None:
if "ticks" not in kwargs:
kwargs["ticks"] = ticker.MaxNLocator(5)
if locator is not None:
if "ticks" in kwargs:
raise ValueError("Either *locator* or *ticks* need to be given, not both")
else:
kwargs["ticks"] = locator
self.hold(True)
if self.orientation in ["top", "bottom"]:
orientation="horizontal"
else:
orientation="vertical"
cb = Colorbar(self, mappable, orientation=orientation, **kwargs)
self._config_axes()
def on_changed(m):
#print 'calling on changed', m.get_cmap().name
cb.set_cmap(m.get_cmap())
cb.set_clim(m.get_clim())
cb.update_bruteforce(m)
self.cbid = mappable.callbacksSM.connect('changed', on_changed)
mappable.colorbar = cb
self.locator = cb.cbar_axis.get_major_locator()
return cb
def _config_axes(self):
'''
Make an axes patch and outline.
'''
ax = self
ax.set_navigate(False)
ax.axis[:].toggle(all=False)
b = self._default_label_on
ax.axis[self.orientation].toggle(all=b)
# for axis in ax.axis.values():
# axis.major_ticks.set_visible(False)
# axis.minor_ticks.set_visible(False)
# axis.major_ticklabels.set_visible(False)
# axis.minor_ticklabels.set_visible(False)
# axis.label.set_visible(False)
# axis = ax.axis[self.orientation]
# axis.major_ticks.set_visible(True)
# axis.minor_ticks.set_visible(True)
#axis.major_ticklabels.set_size(int(axis.major_ticklabels.get_size()*.9))
#axis.major_tick_pad = 3
# axis.major_ticklabels.set_visible(b)
# axis.minor_ticklabels.set_visible(b)
# axis.label.set_visible(b)
def toggle_label(self, b):
self._default_label_on = b
axis = self.axis[self.orientation]
axis.toggle(ticklabels=b, label=b)
#axis.major_ticklabels.set_visible(b)
#axis.minor_ticklabels.set_visible(b)
#axis.label.set_visible(b)
class CbarAxes(CbarAxesBase, LocatableAxes):
def __init__(self, *kl, **kwargs):
orientation=kwargs.pop("orientation", None)
if orientation is None:
raise ValueError("orientation must be specified")
self.orientation = orientation
self._default_label_on = True
self.locator = None
super(LocatableAxes, self).__init__(*kl, **kwargs)
def cla(self):
super(LocatableAxes, self).cla()
self._config_axes()
class Grid(object):
"""
A class that creates a grid of Axes. In matplotlib, the axes
location (and size) is specified in the normalized figure
coordinates. This may not be ideal for images that needs to be
displayed with a given aspect ratio. For example, displaying
images of a same size with some fixed padding between them cannot
be easily done in matplotlib. AxesGrid is used in such case.
"""
_defaultLocatableAxesClass = LocatableAxes
def __init__(self, fig,
rect,
nrows_ncols,
ngrids = None,
direction="row",
axes_pad = 0.02,
add_all=True,
share_all=False,
share_x=True,
share_y=True,
#aspect=True,
label_mode="L",
axes_class=None,
):
"""
Build an :class:`Grid` instance with a grid nrows*ncols
:class:`~matplotlib.axes.Axes` in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* (in
:class:`~matplotlib.figure.Figure` coordinates) or
the subplot position code (e.g., "121").
Optional keyword arguments:
================ ======== =========================================
Keyword Default Description
================ ======== =========================================
direction "row" [ "row" | "column" ]
axes_pad 0.02 float| pad between axes given in inches
add_all True [ True | False ]
share_all False [ True | False ]
share_x True [ True | False ]
share_y True [ True | False ]
label_mode "L" [ "L" | "1" | "all" ]
axes_class None a type object which must be a subclass
of :class:`~matplotlib.axes.Axes`
================ ======== =========================================
"""
self._nrows, self._ncols = nrows_ncols
if ngrids is None:
ngrids = self._nrows * self._ncols
else:
if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
raise Exception("")
self.ngrids = ngrids
self._init_axes_pad(axes_pad)
if direction not in ["column", "row"]:
raise Exception("")
self._direction = direction
if axes_class is None:
axes_class = self._defaultLocatableAxesClass
axes_class_args = {}
else:
if (type(axes_class)) == type and \
issubclass(axes_class, self._defaultLocatableAxesClass.Axes):
axes_class_args = {}
else:
axes_class, axes_class_args = axes_class
self.axes_all = []
self.axes_column = [[] for i in range(self._ncols)]
self.axes_row = [[] for i in range(self._nrows)]
h = []
v = []
if cbook.is_string_like(rect) or cbook.is_numlike(rect):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=False)
elif isinstance(rect, SubplotSpec):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=False)
elif len(rect) == 3:
kw = dict(horizontal=h, vertical=v, aspect=False)
self._divider = SubplotDivider(fig, *rect, **kw)
elif len(rect) == 4:
self._divider = Divider(fig, rect, horizontal=h, vertical=v,
aspect=False)
else:
raise Exception("")
rect = self._divider.get_position()
# reference axes
self._column_refax = [None for i in range(self._ncols)]
self._row_refax = [None for i in range(self._nrows)]
self._refax = None
for i in range(self.ngrids):
col, row = self._get_col_row(i)
if share_all:
sharex = self._refax
sharey = self._refax
else:
if share_x:
sharex = self._column_refax[col]
else:
sharex = None
if share_y:
sharey = self._row_refax[row]
else:
sharey = None
ax = axes_class(fig, rect, sharex=sharex, sharey=sharey,
**axes_class_args)
if share_all:
if self._refax is None:
self._refax = ax
else:
if sharex is None:
self._column_refax[col] = ax
if sharey is None:
self._row_refax[row] = ax
self.axes_all.append(ax)
self.axes_column[col].append(ax)
self.axes_row[row].append(ax)
self.axes_llc = self.axes_column[0][-1]
self._update_locators()
if add_all:
for ax in self.axes_all:
fig.add_axes(ax)
self.set_label_mode(label_mode)
def _init_axes_pad(self, axes_pad):
self._axes_pad = axes_pad
self._horiz_pad_size = Size.Fixed(axes_pad)
self._vert_pad_size = Size.Fixed(axes_pad)
def _update_locators(self):
h = []
h_ax_pos = []
h_cb_pos = []
for ax in self._column_refax:
#if h: h.append(Size.Fixed(self._axes_pad))
if h: h.append(self._horiz_pad_size)
h_ax_pos.append(len(h))
sz = Size.Scaled(1)
h.append(sz)
v = []
v_ax_pos = []
v_cb_pos = []
for ax in self._row_refax[::-1]:
#if v: v.append(Size.Fixed(self._axes_pad))
if v: v.append(self._vert_pad_size)
v_ax_pos.append(len(v))
sz = Size.Scaled(1)
v.append(sz)
for i in range(self.ngrids):
col, row = self._get_col_row(i)
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_ax_pos[self._nrows -1 - row])
self.axes_all[i].set_axes_locator(locator)
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
def _get_col_row(self, n):
if self._direction == "column":
col, row = divmod(n, self._nrows)
else:
row, col = divmod(n, self._ncols)
return col, row
def __getitem__(self, i):
return self.axes_all[i]
def get_geometry(self):
"""
get geometry of the grid. Returns a tuple of two integer,
representing number of rows and number of columns.
"""
return self._nrows, self._ncols
def set_axes_pad(self, axes_pad):
"set axes_pad"
self._axes_pad = axes_pad
self._horiz_pad_size.fixed_size = axes_pad
self._vert_pad_size.fixed_size = axes_pad
def get_axes_pad(self):
"get axes_pad"
return self._axes_pad
def set_aspect(self, aspect):
"set aspect"
self._divider.set_aspect(aspect)
def get_aspect(self):
"get aspect"
return self._divider.get_aspect()
def set_label_mode(self, mode):
"set label_mode"
if mode == "all":
for ax in self.axes_all:
_tick_only(ax, False, False)
elif mode == "L":
# left-most axes
for ax in self.axes_column[0][:-1]:
_tick_only(ax, bottom_on=True, left_on=False)
# lower-left axes
ax = self.axes_column[0][-1]
_tick_only(ax, bottom_on=False, left_on=False)
for col in self.axes_column[1:]:
# axes with no labels
for ax in col[:-1]:
_tick_only(ax, bottom_on=True, left_on=True)
# bottom
ax = col[-1]
_tick_only(ax, bottom_on=False, left_on=True)
elif mode == "1":
for ax in self.axes_all:
_tick_only(ax, bottom_on=True, left_on=True)
ax = self.axes_llc
_tick_only(ax, bottom_on=False, left_on=False)
def get_divider(self):
return self._divider
def set_axes_locator(self, locator):
self._divider.set_locator(locator)
def get_axes_locator(self):
return self._divider.get_locator()
def get_vsize_hsize(self):
return self._divider.get_vsize_hsize()
# from axes_size import AddList
# vsize = AddList(self._divider.get_vertical())
# hsize = AddList(self._divider.get_horizontal())
# return vsize, hsize
class ImageGrid(Grid):
"""
A class that creates a grid of Axes. In matplotlib, the axes
location (and size) is specified in the normalized figure
coordinates. This may not be ideal for images that needs to be
displayed with a given aspect ratio. For example, displaying
images of a same size with some fixed padding between them cannot
be easily done in matplotlib. ImageGrid is used in such case.
"""
_defaultCbarAxesClass = CbarAxes
def __init__(self, fig,
rect,
nrows_ncols,
ngrids = None,
direction="row",
axes_pad = 0.02,
add_all=True,
share_all=False,
aspect=True,
label_mode="L",
cbar_mode=None,
cbar_location="right",
cbar_pad=None,
cbar_size="5%",
cbar_set_cax=True,
axes_class=None,
):
"""
Build an :class:`ImageGrid` instance with a grid nrows*ncols
:class:`~matplotlib.axes.Axes` in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* (in
:class:`~matplotlib.figure.Figure` coordinates) or
the subplot position code (e.g., "121").
Optional keyword arguments:
================ ======== =========================================
Keyword Default Description
================ ======== =========================================
direction "row" [ "row" | "column" ]
axes_pad 0.02 float| pad between axes given in inches
add_all True [ True | False ]
share_all False [ True | False ]
aspect True [ True | False ]
label_mode "L" [ "L" | "1" | "all" ]
cbar_mode None [ "each" | "single" | "edge" ]
cbar_location "right" [ "left" | "right" | "bottom" | "top" ]
cbar_pad None
cbar_size "5%"
cbar_set_cax True [ True | False ]
axes_class None a type object which must be a subclass
of :class:`~matplotlib.axes.Axes`
================ ======== =========================================
*cbar_set_cax* : if True, each axes in the grid has a cax
attribute that is bind to associated cbar_axes.
"""
self._nrows, self._ncols = nrows_ncols
if ngrids is None:
ngrids = self._nrows * self._ncols
else:
if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
raise Exception("")
self.ngrids = ngrids
self._axes_pad = axes_pad
self._colorbar_mode = cbar_mode
self._colorbar_location = cbar_location
if cbar_pad is None:
self._colorbar_pad = axes_pad
else:
self._colorbar_pad = cbar_pad
self._colorbar_size = cbar_size
self._init_axes_pad(axes_pad)
if direction not in ["column", "row"]:
raise Exception("")
self._direction = direction
if axes_class is None:
axes_class = self._defaultLocatableAxesClass
axes_class_args = {}
else:
if isinstance(axes_class, maxes.Axes):
axes_class_args = {}
else:
axes_class, axes_class_args = axes_class
self.axes_all = []
self.axes_column = [[] for i in range(self._ncols)]
self.axes_row = [[] for i in range(self._nrows)]
self.cbar_axes = []
h = []
v = []
if cbook.is_string_like(rect) or cbook.is_numlike(rect):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
elif isinstance(rect, SubplotSpec):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
elif len(rect) == 3:
kw = dict(horizontal=h, vertical=v, aspect=aspect)
self._divider = SubplotDivider(fig, *rect, **kw)
elif len(rect) == 4:
self._divider = Divider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
else:
raise Exception("")
rect = self._divider.get_position()
# reference axes
self._column_refax = [None for i in range(self._ncols)]
self._row_refax = [None for i in range(self._nrows)]
self._refax = None
for i in range(self.ngrids):
col, row = self._get_col_row(i)
if share_all:
sharex = self._refax
sharey = self._refax
else:
sharex = self._column_refax[col]
sharey = self._row_refax[row]
ax = axes_class(fig, rect, sharex=sharex, sharey=sharey,
**axes_class_args)
if share_all:
if self._refax is None:
self._refax = ax
else:
if sharex is None:
self._column_refax[col] = ax
if sharey is None:
self._row_refax[row] = ax
self.axes_all.append(ax)
self.axes_column[col].append(ax)
self.axes_row[row].append(ax)
cax = self._defaultCbarAxesClass(fig, rect,
orientation=self._colorbar_location)
self.cbar_axes.append(cax)
self.axes_llc = self.axes_column[0][-1]
self._update_locators()
if add_all:
for ax in self.axes_all+self.cbar_axes:
fig.add_axes(ax)
if cbar_set_cax:
if self._colorbar_mode == "single":
for ax in self.axes_all:
ax.cax = self.cbar_axes[0]
else:
for ax, cax in zip(self.axes_all, self.cbar_axes):
ax.cax = cax
self.set_label_mode(label_mode)
def _update_locators(self):
h = []
v = []
h_ax_pos = []
h_cb_pos = []
if self._colorbar_mode == "single" and self._colorbar_location in ('left', 'bottom'):
if self._colorbar_location == "left":
#sz = Size.Fraction(Size.AxesX(self.axes_llc), self._nrows)
sz = Size.Fraction(self._nrows, Size.AxesX(self.axes_llc))
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, ny=0, ny1=-1)
elif self._colorbar_location == "bottom":
#sz = Size.Fraction(Size.AxesY(self.axes_llc), self._ncols)
sz = Size.Fraction(self._ncols, Size.AxesY(self.axes_llc))
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=0)
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
for col,ax in enumerate(self._column_refax):
if h: h.append(self._horiz_pad_size) #Size.Fixed(self._axes_pad))
if ax:
sz = Size.AxesX(ax)
else:
sz = Size.AxesX(self.axes_llc)
if (self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
col == 0)) and self._colorbar_location == "left":
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
h_ax_pos.append(len(h))
h.append(sz)
if (self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
col == self._ncols - 1)) and self._colorbar_location == "right":
h.append(Size.from_any(self._colorbar_pad, sz))
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
v_ax_pos = []
v_cb_pos = []
for row,ax in enumerate(self._row_refax[::-1]):
if v: v.append(self._horiz_pad_size) #Size.Fixed(self._axes_pad))
if ax:
sz = Size.AxesY(ax)
else:
sz = Size.AxesY(self.axes_llc)
if (self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
row == 0)) and self._colorbar_location == "bottom":
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
v_ax_pos.append(len(v))
v.append(sz)
if (self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
row == self._nrows - 1)) and self._colorbar_location == "top":
v.append(Size.from_any(self._colorbar_pad, sz))
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
for i in range(self.ngrids):
col, row = self._get_col_row(i)
#locator = self._divider.new_locator(nx=4*col, ny=2*(self._nrows - row - 1))
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_ax_pos[self._nrows -1 - row])
self.axes_all[i].set_axes_locator(locator)
if self._colorbar_mode == "each":
if self._colorbar_location in ("right", "left"):
locator = self._divider.new_locator(nx=h_cb_pos[col],
ny=v_ax_pos[self._nrows -1 - row])
elif self._colorbar_location in ("top", "bottom"):
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_cb_pos[self._nrows -1 - row])
self.cbar_axes[i].set_axes_locator(locator)
elif self._colorbar_mode == 'edge':
if ((self._colorbar_location == 'left' and col == 0) or
(self._colorbar_location == 'right' and col == self._ncols-1)):
locator = self._divider.new_locator(nx=h_cb_pos[0],
ny=v_ax_pos[self._nrows -1 - row])
self.cbar_axes[row].set_axes_locator(locator)
elif ((self._colorbar_location == 'bottom' and row == self._nrows - 1) or
(self._colorbar_location == 'top' and row == 0)):
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_cb_pos[0])
self.cbar_axes[col].set_axes_locator(locator)
if self._colorbar_mode == "single":
if self._colorbar_location == "right":
#sz = Size.Fraction(Size.AxesX(self.axes_llc), self._nrows)
sz = Size.Fraction(self._nrows, Size.AxesX(self.axes_llc))
h.append(Size.from_any(self._colorbar_pad, sz))
h.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=-2, ny=0, ny1=-1)
elif self._colorbar_location == "top":
#sz = Size.Fraction(Size.AxesY(self.axes_llc), self._ncols)
sz = Size.Fraction(self._ncols, Size.AxesY(self.axes_llc))
v.append(Size.from_any(self._colorbar_pad, sz))
v.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=-2)
if self._colorbar_location in ("right", "top"):
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
elif self._colorbar_mode == "each":
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(True)
elif self._colorbar_mode == "edge":
if self._colorbar_location in ('right', 'left'):
count = self._nrows
else:
count = self._ncols
for i in range(count):
self.cbar_axes[i].set_visible(True)
for j in range(i + 1, self.ngrids):
self.cbar_axes[j].set_visible(False)
else:
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[i].set_position([1., 1., 0.001, 0.001],
which="active")
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
AxesGrid = ImageGrid
#if __name__ == "__main__":
if 0:
F = plt.figure(1, (7, 6))
F.clf()
F.subplots_adjust(left=0.15, right=0.9)
grid = Grid(F, 111, # similar to subplot(111)
nrows_ncols = (2, 2),
direction="row",
axes_pad = 0.05,
add_all=True,
label_mode = "1",
)
#if __name__ == "__main__":
if 0:
from axes_divider import get_demo_image
F = plt.figure(1, (9, 3.5))
F.clf()
F.subplots_adjust(left=0.05, right=0.98)
grid = ImageGrid(F, 131, # similar to subplot(111)
nrows_ncols = (2, 2),
direction="row",
axes_pad = 0.05,
add_all=True,
label_mode = "1",
)
Z, extent = get_demo_image()
plt.ioff()
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest")
# This only affects axes in first column and second row as share_all = False.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
plt.ion()
grid = ImageGrid(F, 132, # similar to subplot(111)
nrows_ncols = (2, 2),
direction="row",
axes_pad = 0.0,
add_all=True,
share_all=True,
label_mode = "1",
cbar_mode="single",
)
Z, extent = get_demo_image()
plt.ioff()
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest")
plt.colorbar(im, cax = grid.cbar_axes[0])
plt.setp(grid.cbar_axes[0].get_yticklabels(), visible=False)
# This affects all axes as share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
plt.ion()
grid = ImageGrid(F, 133, # similar to subplot(122)
nrows_ncols = (2, 2),
direction="row",
axes_pad = 0.1,
add_all=True,
label_mode = "1",
share_all = True,
cbar_location="top",
cbar_mode="each",
cbar_size="7%",
cbar_pad="2%",
)
plt.ioff()
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest")
plt.colorbar(im, cax = grid.cbar_axes[i],
orientation="horizontal")
grid.cbar_axes[i].xaxis.set_ticks_position("top")
plt.setp(grid.cbar_axes[i].get_xticklabels(), visible=False)
# This affects all axes as share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
plt.ion()
plt.draw()
| mit |
piyueh/PetIBM | examples/ibpm/cylinder2dRe3000/scripts/plotVorticity.py | 6 | 1402 | """
Computes, plots, and saves the 2D vorticity field from a PetIBM simulation
after 3000 time steps (3 non-dimensional time-units).
"""
import pathlib
import h5py
import numpy
from matplotlib import pyplot
simu_dir = pathlib.Path(__file__).absolute().parents[1]
data_dir = simu_dir / 'output'
# Read vorticity field and its grid from files.
name = 'wz'
filepath = data_dir / 'grid.h5'
f = h5py.File(filepath, 'r')
x, y = f[name]['x'][:], f[name]['y'][:]
X, Y = numpy.meshgrid(x, y)
timestep = 3000
filepath = data_dir / '{:0>7}.h5'.format(timestep)
f = h5py.File(filepath, 'r')
wz = f[name][:]
# Read body coordinates from file.
filepath = simu_dir / 'circle.body'
with open(filepath, 'r') as infile:
xb, yb = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, skiprows=1)
pyplot.rc('font', family='serif', size=16)
# Plot the filled contour of the vorticity.
fig, ax = pyplot.subplots(figsize=(6.0, 6.0))
ax.grid()
ax.set_xlabel('x')
ax.set_ylabel('y')
levels = numpy.linspace(-56.0, 56.0, 28)
ax.contour(X, Y, wz, levels=levels, colors='black')
ax.plot(xb, yb, color='red')
ax.set_xlim(-0.6, 1.6)
ax.set_ylim(-0.8, 0.8)
ax.set_aspect('equal')
fig.tight_layout()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'wz{:0>7}.png'.format(timestep)
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
jlegendary/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_fltkagg.py | 69 | 20839 | """
A backend for FLTK
Copyright: Gregory Lielens, Free Field Technologies SA and
John D. Hunter 2004
This code is released under the matplotlib license
"""
from __future__ import division
import os, sys, math
import fltk as Fltk
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib import rcParams, verbose
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import \
RendererBase, GraphicsContextBase, FigureManagerBase, FigureCanvasBase,\
NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import thread,time
Fl_running=thread.allocate_lock()
def Fltk_run_interactive():
global Fl_running
if Fl_running.acquire(0):
while True:
Fltk.Fl.check()
time.sleep(0.005)
else:
print "fl loop already running"
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord= {
cursors.HAND: Fltk.FL_CURSOR_HAND,
cursors.POINTER: Fltk.FL_CURSOR_ARROW,
cursors.SELECT_REGION: Fltk.FL_CURSOR_CROSS,
cursors.MOVE: Fltk.FL_CURSOR_MOVE
}
special_key={
Fltk.FL_Shift_R:'shift',
Fltk.FL_Shift_L:'shift',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
65515:'win',
65516:'win',
}
def error_msg_fltk(msg, parent=None):
Fltk.fl_message(msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def ishow():
"""
Show all the figures and enter the fltk mainloop in another thread
This allows to keep hand in interractive python session
Warning: does not work under windows
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
if show._needmain:
thread.start_new_thread(Fltk_run_interactive,())
show._needmain = False
def show():
"""
Show all the figures and enter the fltk mainloop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
#mainloop, if an fltk program exist no need to call that
#threaded (and interractive) version
if show._needmain:
Fltk.Fl.run()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Fltk.Fl_Double_Window(10,10,30,30)
canvas = FigureCanvasFltkAgg(figure)
window.end()
window.show()
window.make_current()
figManager = FigureManagerFltkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FltkCanvas(Fltk.Fl_Widget):
def __init__(self,x,y,w,h,l,source):
Fltk.Fl_Widget.__init__(self, 0, 0, w, h, "canvas")
self._source=source
self._oldsize=(None,None)
self._draw_overlay = False
self._button = None
self._key = None
def draw(self):
newsize=(self.w(),self.h())
if(self._oldsize !=newsize):
self._oldsize =newsize
self._source.resize(newsize)
self._source.draw()
t1,t2,w,h = self._source.figure.bbox.bounds
Fltk.fl_draw_image(self._source.buffer_rgba(0,0),0,0,int(w),int(h),4,0)
self.redraw()
def blit(self,bbox=None):
if bbox is None:
t1,t2,w,h = self._source.figure.bbox.bounds
else:
t1o,t2o,wo,ho = self._source.figure.bbox.bounds
t1,t2,w,h = bbox.bounds
x,y=int(t1),int(t2)
Fltk.fl_draw_image(self._source.buffer_rgba(x,y),x,y,int(w),int(h),4,int(wo)*4)
#self.redraw()
def handle(self, event):
x=Fltk.Fl.event_x()
y=Fltk.Fl.event_y()
yf=self._source.figure.bbox.height() - y
if event == Fltk.FL_FOCUS or event == Fltk.FL_UNFOCUS:
return 1
elif event == Fltk.FL_KEYDOWN:
ikey= Fltk.Fl.event_key()
if(ikey<=255):
self._key=chr(ikey)
else:
try:
self._key=special_key[ikey]
except:
self._key=None
FigureCanvasBase.key_press_event(self._source, self._key)
return 1
elif event == Fltk.FL_KEYUP:
FigureCanvasBase.key_release_event(self._source, self._key)
self._key=None
elif event == Fltk.FL_PUSH:
self.window().make_current()
if Fltk.Fl.event_button1():
self._button = 1
elif Fltk.Fl.event_button2():
self._button = 2
elif Fltk.Fl.event_button3():
self._button = 3
else:
self._button = None
if self._draw_overlay:
self._oldx=x
self._oldy=y
if Fltk.Fl.event_clicks():
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
else:
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
elif event == Fltk.FL_ENTER:
self.take_focus()
return 1
elif event == Fltk.FL_LEAVE:
return 1
elif event == Fltk.FL_MOVE:
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_DRAG:
self.window().make_current()
if self._draw_overlay:
self._dx=Fltk.Fl.event_x()-self._oldx
self._dy=Fltk.Fl.event_y()-self._oldy
Fltk.fl_overlay_rect(self._oldx,self._oldy,self._dx,self._dy)
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_RELEASE:
self.window().make_current()
if self._draw_overlay:
Fltk.fl_overlay_clear()
FigureCanvasBase.button_release_event(self._source, x, yf, self._button)
self._button = None
return 1
return 0
class FigureCanvasFltkAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self,figure)
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self.canvas=FltkCanvas(0, 0, w, h, "canvas",self)
#self.draw()
def resize(self,size):
w, h = size
# compute desired figure size in inches
dpival = self.figure.dpi.get()
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch,hinch)
def draw(self):
FigureCanvasAgg.draw(self)
self.canvas.redraw()
def blit(self,bbox):
self.canvas.blit(bbox)
show = draw
def widget(self):
return self.canvas
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def destroy_figure(ptr,figman):
figman.window.hide()
Gcf.destroy(figman._num)
class FigureManagerFltkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The fltk.Toolbar
window : The fltk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
#Fltk container window
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window = window
self.window.size(w,h+30)
self.window_title="Figure %d" % num
self.window.label(self.window_title)
self.window.size_range(350,200)
self.window.callback(destroy_figure,self)
self.canvas = canvas
self._num = num
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2FltkAgg( canvas, self )
else:
self.toolbar = None
self.window.add_resizable(canvas.widget())
if self.toolbar:
self.window.add(self.toolbar.widget())
self.toolbar.update()
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
_focus = windowing.FocusManager()
self.canvas.draw()
self.window.redraw()
def set_window_title(self, title):
self.window_title=title
self.window.label(title)
class AxisMenu:
def __init__(self, toolbar):
self.toolbar=toolbar
self._naxes = toolbar.naxes
self._mbutton = Fltk.Fl_Menu_Button(0,0,50,10,"Axes")
self._mbutton.add("Select All",0,select_all,self,0)
self._mbutton.add("Invert All",0,invert_all,self,Fltk.FL_MENU_DIVIDER)
self._axis_txt=[]
self._axis_var=[]
for i in range(self._naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes, naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
self._mbutton.remove(i+2)
if(naxes):
self._axis_var=self._axis_var[:naxes-1]
self._axis_txt=self._axis_txt[:naxes-1]
else:
self._axis_var=[]
self._axis_txt=[]
self._naxes = naxes
set_active(0,self)
def widget(self):
return self._mbutton
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].value()]
return a
def set_active(ptr,amenu):
amenu.toolbar.set_active(amenu.get_indices())
def invert_all(ptr,amenu):
for a in amenu._axis_var:
if not a.value(): a.set()
set_active(ptr,amenu)
def select_all(ptr,amenu):
for a in amenu._axis_var:
a.set()
set_active(ptr,amenu)
class FLTKButton:
def __init__(self, text, file, command,argument,type="classic"):
file = os.path.join(rcParams['datapath'], 'images', file)
self.im = Fltk.Fl_PNM_Image(file)
size=26
if type=="repeat":
self.b = Fltk.Fl_Repeat_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="classic":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="light":
self.b = Fltk.Fl_Light_Button(0,0,size+20,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="pushed":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_UP_BOX)
self.b.down_box(Fltk.FL_DOWN_BOX)
self.b.type(Fltk.FL_TOGGLE_BUTTON)
self.tooltiptext=text+" "
self.b.tooltip(self.tooltiptext)
self.b.callback(command,argument)
self.b.image(self.im)
self.b.deimage(self.im)
self.type=type
def widget(self):
return self.b
class NavigationToolbar:
"""
Public attriubutes
canvas - the FigureCanvas (FigureCanvasFltkAgg = customised fltk.Widget)
"""
def __init__(self, canvas, figman):
#xmin, xmax = canvas.figure.bbox.intervalx().get_bounds()
#height, width = 50, xmax-xmin
self.canvas = canvas
self.figman = figman
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bLeft = FLTKButton(
text="Left", file="stock_left.ppm",
command=pan,argument=(self,1,'x'),type="repeat")
self.bRight = FLTKButton(
text="Right", file="stock_right.ppm",
command=pan,argument=(self,-1,'x'),type="repeat")
self.bZoomInX = FLTKButton(
text="ZoomInX",file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'x'),type="repeat")
self.bZoomOutX = FLTKButton(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'x'),type="repeat")
self.bUp = FLTKButton(
text="Up", file="stock_up.ppm",
command=pan,argument=(self,1,'y'),type="repeat")
self.bDown = FLTKButton(
text="Down", file="stock_down.ppm",
command=pan,argument=(self,-1,'y'),type="repeat")
self.bZoomInY = FLTKButton(
text="ZoomInY", file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'y'),type="repeat")
self.bZoomOutY = FLTKButton(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'y'),type="repeat")
self.bSave = FLTKButton(
text="Save", file="stock_save_as.ppm",
command=save_figure, argument=self)
self._group.end()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
def pan(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.panx(direction)
else:
a.pany(direction)
base.figman.show()
def zoom(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.zoomx(direction)
else:
a.zoomy(direction)
base.figman.show()
def save_figure(ptr,base):
filetypes = base.canvas.get_supported_filetypes()
default_filetype = base.canvas.get_default_filetype()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
selected_filter = 0
filters = []
for i, (ext, name) in enumerate(sorted_filetypes):
filter = '%s (*.%s)' % (name, ext)
filters.append(filter)
if ext == default_filetype:
selected_filter = i
filters = '\t'.join(filters)
file_chooser=base._fc
file_chooser.filter(filters)
file_chooser.filter_value(selected_filter)
file_chooser.show()
while file_chooser.visible() :
Fltk.Fl.wait()
fname=None
if(file_chooser.count() and file_chooser.value(0) != None):
fname=""
(status,fname)=Fltk.fl_filename_absolute(fname, 1024, file_chooser.value(0))
if fname is None: # Cancel
return
#start from last directory
lastDir = os.path.dirname(fname)
file_chooser.directory(lastDir)
format = sorted_filetypes[file_chooser.filter_value()][0]
try:
base.canvas.print_figure(fname, format=format)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_fltk(msg)
class NavigationToolbar2FltkAgg(NavigationToolbar2):
"""
Public attriubutes
canvas - the FigureCanvas
figman - the Figure manager
"""
def __init__(self, canvas, figman):
self.canvas = canvas
self.figman = figman
NavigationToolbar2.__init__(self, canvas)
self.pan_selected=False
self.zoom_selected=False
def set_cursor(self, cursor):
Fltk.fl_cursor(cursord[cursor],Fltk.FL_BLACK,Fltk.FL_WHITE)
def dynamic_update(self):
self.canvas.draw()
def pan(self,*args):
self.pan_selected=not self.pan_selected
self.zoom_selected = False
self.canvas.canvas._draw_overlay= False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.pan(self,args)
def zoom(self,*args):
self.zoom_selected=not self.zoom_selected
self.canvas.canvas._draw_overlay=self.zoom_selected
self.pan_selected = False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.zoom(self,args)
def configure_subplots(self,*args):
window = Fltk.Fl_Double_Window(100,100,480,240)
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasFltkAgg(toolfig)
window.end()
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
window.show()
canvas.show()
def _init_toolbar(self):
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = self.canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bHome = FLTKButton(
text="Home", file="home.ppm",
command=self.home,argument=self)
self.bBack = FLTKButton(
text="Back", file="back.ppm",
command=self.back,argument=self)
self.bForward = FLTKButton(
text="Forward", file="forward.ppm",
command=self.forward,argument=self)
self.bPan = FLTKButton(
text="Pan/Zoom",file="move.ppm",
command=self.pan,argument=self,type="pushed")
self.bZoom = FLTKButton(
text="Zoom to rectangle",file="zoom_to_rect.ppm",
command=self.zoom,argument=self,type="pushed")
self.bsubplot = FLTKButton( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots,argument=self,type="pushed")
self.bSave = FLTKButton(
text="Save", file="filesave.ppm",
command=save_figure, argument=self)
self._group.end()
self.message = Fltk.Fl_Output(0,0,w,8)
self._group.add_resizable(self.message)
self.update()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def set_message(self, s):
self.message.value(s)
FigureManager = FigureManagerFltkAgg
| gpl-3.0 |
mcleonard/seekwell | seekwell/export.py | 1 | 1691 | def to_ascii(rows, n=None):
from terminaltables import AsciiTable
if n is None:
n = rows.max_display_rows
table_data = [rows.headers]
for each in rows.rows[:n]:
table_data.append(each)
if len(table_data) < len(rows):
table_data.append(['...']*len(table_data[0]))
asciitable = AsciiTable(table_data)
asciitable.outer_border = False
return asciitable.table
def _html_table_row(row, header=False):
tag = 'th' if header else 'td'
row_items = ' '.join('<{tag}>{}</{tag}>'.format(each, tag=tag) for each in row)
return '<tr>{}</tr>'.format(row_items)
def to_html(rows, n=None):
if n is None:
n = rows.max_display_rows
headers = rows.headers
print_rows = rows.rows[:n]
more = _html_table_row(['...']*len(headers)) if n < len(rows.rows) else ''
html_rows = '\n'.join(_html_table_row(row) for row in print_rows)
html_table = '\n'.join(['<table style="font-size:10pt; white-space:nowrap;">',
_html_table_row(headers, header=True),
html_rows,
more,
'</table>'])
return html_table
def to_pandas(rows):
import pandas as pd
return pd.DataFrame(rows.rows, columns=rows.headers)
def to_csv(rows, file, **kwargs):
"""
For the keyword arguments that let you set the delimiter, etc. :
https://docs.python.org/3/library/csv.html#csv-fmt-params
"""
import csv
with open(file, 'w') as f:
csv_writer = csv.writer(f, **kwargs)
csv_writer.writerow(rows.headers)
csv_writer.writerows(rows.rows) | mit |
brightchen/h2o-3 | h2o-py/tests/testdir_algos/kmeans/pyunit_iris_h2o_vs_sciKmeans.py | 5 | 1208 | import sys
sys.path.insert(1, "../../../")
import h2o, tests
import numpy as np
from sklearn.cluster import KMeans
def iris_h2o_vs_sciKmeans():
# Connect to a pre-existing cluster
# connect to localhost:54321
iris_h2o = h2o.import_file(path=h2o.locate("smalldata/iris/iris.csv"))
iris_sci = np.genfromtxt(h2o.locate("smalldata/iris/iris.csv"), delimiter=',')
iris_sci = iris_sci[:,0:4]
s =[[4.9,3.0,1.4,0.2],
[5.6,2.5,3.9,1.1],
[6.5,3.0,5.2,2.0]]
start = h2o.H2OFrame(s)
h2o_km = h2o.kmeans(x=iris_h2o[0:4], k=3, user_points=start, standardize=False)
sci_km = KMeans(n_clusters=3, init=np.asarray(s), n_init=1)
sci_km.fit(iris_sci)
# Log.info("Cluster centers from H2O:")
print "Cluster centers from H2O:"
h2o_centers = h2o_km.centers()
print h2o_centers
# Log.info("Cluster centers from scikit:")
print "Cluster centers from scikit:"
sci_centers = sci_km.cluster_centers_.tolist()
print sci_centers
for hcenter, scenter in zip(h2o_centers, sci_centers):
for hpoint, spoint in zip(hcenter,scenter):
assert (hpoint- spoint) < 1e-10, "expected centers to be the same"
if __name__ == "__main__":
tests.run_test(sys.argv, iris_h2o_vs_sciKmeans)
| apache-2.0 |
frank-tancf/scikit-learn | examples/cluster/plot_segmentation_toy.py | 91 | 3522 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
anetasie/sherpa | sherpa/astro/data.py | 1 | 137542 | #
# Copyright (C) 2008, 2015, 2016, 2017, 2018, 2019, 2020
# Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Classes for storing, inspecting, and manipulating astronomical data sets
"""
import os.path
import logging
import warnings
import numpy
from sherpa.data import Data1DInt, Data2D, Data, Data2DInt, Data1D, IntegratedDataSpace2D
from sherpa.models.regrid import EvaluationSpace1D
from sherpa.utils.err import DataErr, ImportErr
from sherpa.utils import SherpaFloat, pad_bounding_box, interpolate, \
create_expr, parse_expr, bool_cast, rebin, filter_bins
from sherpa.utils import formatting
# There are currently (Sep 2015) no tests that exercise the code that
# uses the compile_energy_grid symbols.
from sherpa.astro.utils import arf_fold, rmf_fold, filter_resp, \
compile_energy_grid, do_group, expand_grouped_mask
info = logging.getLogger(__name__).info
warning = logging.getLogger(__name__).warning
regstatus = False
try:
from sherpa.astro.utils._region import Region
regstatus = True
except ImportError:
warning('failed to import sherpa.astro.utils._region; Region routines ' +
'will not be available')
groupstatus = False
try:
import group as pygroup
groupstatus = True
except ImportError:
groupstatus = False
warning('the group module (from the CIAO tools package) is not ' +
'installed.\nDynamic grouping functions will not be available.')
__all__ = ('DataARF', 'DataRMF', 'DataPHA', 'DataIMG', 'DataIMGInt', 'DataRosatRMF')
def _notice_resp(chans, arf, rmf):
bin_mask = None
if rmf is not None and arf is not None:
bin_mask = rmf.notice(chans)
if len(rmf.energ_lo) == len(arf.energ_lo):
arf.notice(bin_mask)
# If the response is mis-matched, determine which energy bins in the
# RMF correspond to energy bins in the ARF and which are noticed.
# Propogate the noticed RMF energy bins to the ARF energy bins.
elif len(rmf.energ_lo) < len(arf.energ_lo):
arf_mask = None
if bin_mask is not None:
arf_mask = numpy.zeros(len(arf.energ_lo), dtype=bool)
for ii, val in enumerate(bin_mask):
if val:
los = (rmf.energ_lo[ii],)
his = (rmf.energ_hi[ii],)
grid = (arf.energ_lo, arf.energ_hi)
idx = filter_bins(los, his, grid).nonzero()[0]
arf_mask[idx] = True
arf.notice(arf_mask)
else:
if rmf is not None:
bin_mask = rmf.notice(chans)
if arf is not None:
arf.notice(bin_mask)
def display_header(header, key):
"""Return the header value for display by _repr_html
The value is not displayed if it doesn't exist, is None,
is empty, or is the string 'NONE'. This is intended for
PHA responses.
Parameters
----------
header : dict-like
key : str
The key to display
Returns
-------
value : None or value
The value to display, or None.
Notes
-----
It is not clear if the Meta class is intended to only store
string values or not. Limited protection is provided in case
the value stored is not a string.
"""
try:
val = header[key]
except KeyError:
return None
# Unclear if this can happen
if val is None:
return None
# The metadata value is not guaranteed to be a string
try:
val = val.strip()
if val in ['', 'NONE']:
return None
except AttributeError:
pass
return val
def make_metadata(header, items):
"""Create the metadata table.
Parameters
----------
header : dict-like
The header. Expected to be a sherpa.astro.io.meta.Meta
object but just needs to act like a dictionary.
items : list of (str, str)
The keys to display (in order), if set. The first element
is the key name, and the second is the label in the header
to display.
Returns
-------
meta : list of (str, str) or None
The two-element table rows to display. If no rows matched
return None.
"""
meta = []
for key, desc in items:
val = display_header(header, key)
if val is None:
continue
meta.append((desc, val))
if len(meta) == 0:
return None
return meta
def _extract_fields(obj, stop, summary, open_block=True):
"""Extract the fields up until the stop field.
Parameters
----------
obj : Data instance
It has to have a _fields attribute
stop : str
The attribute at which to stop (and is not included).
summary : str
The label for the details tab.
open_block : bool, optional
Is the details tab open or closed?
Returns
-------
html : str
The HTML for this section.
"""
meta = []
for f in obj._fields[1:]:
if f == stop:
break
v = getattr(obj, f)
if v is None:
continue
meta.append((f.upper(), v))
return formatting.html_section(meta, summary=summary,
open_block=open_block)
def html_pha(pha):
"""HTML representation: PHA"""
from sherpa.astro.plot import DataPHAPlot, backend
ls = []
plotter = DataPHAPlot()
plotter.prepare(pha)
try:
out = backend.as_html_plot(plotter, 'PHA Plot')
except AttributeError:
out = None
if out is None:
out = _extract_fields(pha, 'grouped', 'PHA Data')
ls.append(out)
# Summary properties
meta = []
if pha.name is not None and pha.name != '':
meta.append(('Identifier', pha.name))
if pha.exposure is not None:
meta.append(('Exposure', '{:g} s'.format(pha.exposure)))
meta.append(('Number of bins', len(pha.channel)))
meta.append(('Channel range', '{} - {}'.format(int(pha.channel[0]),
int(pha.channel[-1]))))
# Although assume the counts are integers, do not force this
cmin = pha.counts.min()
cmax = pha.counts.max()
meta.append(('Count range', '{} - {}'.format(cmin, cmax)))
if pha.background_ids != []:
if pha.subtracted:
msg = 'Subtracted'
else:
msg = 'Not subtracted'
meta.append(('Background', msg))
# Make sure show all groups (not just those that are within
# the filter applied to the object).
#
if pha.grouping is not None:
if pha.grouped:
ngrp = pha.apply_grouping(pha.counts).size
msg = 'Applied ({} groups)'.format(ngrp)
else:
msg = 'Not applied'
meta.append(('Grouping', msg))
# Should this only be displayed if a filter has been applied?
#
fexpr = pha.get_filter_expr()
bintype = 'groups' if pha.grouped else 'channels'
nbins = pha.get_dep(filter=True).size
meta.append(('Using', '{} with {} {}'.format(fexpr, nbins, bintype)))
ls.append(formatting.html_section(meta, summary='Summary',
open_block=True))
# TODO:
# correction factors
# Display a subset of header values
# - maybe don't display the FITLER if NONE
# - how about RESPFILE / PHAFILE
if pha.header is not None:
meta = make_metadata(pha.header,
[('TELESCOP', 'Mission or Satellite'),
('INSTRUME', 'Instrument or Detector'),
('FILTER', 'Instrument filter'),
('OBJECT', 'Object'),
('TITLE', 'Program description'),
('DATE-OBS', 'Observation date'),
('CREATOR', 'Program that created the PHA'),
('CHANTYPE', 'The channel type'),
('HDUCLAS2', 'Data stored'),
('HDUCLAS3', 'Data format'),
('HDUCLAS4', 'PHA format')])
if meta is not None:
ls.append(formatting.html_section(meta, summary='Metadata'))
return formatting.html_from_sections(pha, ls)
def _calc_erange(elo, ehi):
"""Create the energy range information.
Parameters
----------
elo, ehi - NumPy array
The low and high energy bins, in keV.
Returns
-------
erange : str
The string representation of the energy range
"""
# Have we guaranteed the ordering here or not? Assuming
# NumPy arrays.
e1 = elo[0]
e2 = ehi[-1]
emin, emax = (e1, e2) if e1 <= e2 else (e2, e1)
erange = '{:g} - {:g} keV'.format(emin, emax)
# Randomly pick 1% as the cut-off for a constant bin width
#
de = numpy.abs(ehi - elo)
demin = de.min()
demax = de.max()
if demin > 0.0:
dedelta = (demax - demin) / demin
else:
dedelta = 1
if dedelta <= 0.01:
erange += ', bin size {:g} keV'.format(demax)
else:
erange += ', bin size {:g} - {:g} keV'.format(demin, demax)
return erange
def _calc_wrange(wlo, whi):
"""Create the wavelength range information.
Parameters
----------
wlo, whi - NumPy array
The low and high wavelength bins, in Angstroms.
Returns
-------
wrange : str
The string representation of the wavelength range
"""
w1 = wlo[0]
w2 = whi[-1]
wmin, wmax = (w1, w2) if w1 <= w2 else (w2, w1)
wrange = '{:g} - {:g} Å'.format(wmin, wmax)
# Randomly pick 1% as the cut-off for a constant bin width
#
dw = numpy.abs(whi - wlo)
dwmin = dw.min()
dwmax = dw.max()
if dwmin > 0.0:
dwdelta = (dwmax - dwmin) / dwmin
else:
dwdelta = 1
if dwdelta <= 0.01:
wrange += ', bin size {:g} Å'.format(dwmax)
else:
wrange += ', bin size {:g} - {:g} Å'.format(dwmin, dwmax)
return wrange
def html_arf(arf):
"""HTML representation: ARF"""
# Unlike the string representation, this provides extra
# information (e.g. energy range covered). Should it include
# any filters or masks? How about bin_lo/hi values?
#
# It also assumes the units are keV/cm^2 which is not
# guaranteed.
from sherpa.astro.plot import ARFPlot, backend
ls = []
plotter = ARFPlot()
plotter.prepare(arf)
try:
out = backend.as_html_plot(plotter, 'ARF Plot')
except AttributeError:
out = None
if out is None:
out = _extract_fields(arf, 'exposure', 'ARF Data')
ls.append(out)
# Summary properties
meta = []
if arf.name is not None and arf.name != '':
meta.append(('Identifier', arf.name))
if arf.exposure is not None:
meta.append(('Exposure', '{:g} s'.format(arf.exposure)))
meta.append(('Number of bins', len(arf.specresp)))
erange = _calc_erange(arf.energ_lo, arf.energ_hi)
meta.append(('Energy range', erange))
# repeat for wavelengths (without the energy threshold)
#
if arf.bin_lo is not None and arf.bin_hi is not None:
wrange = _calc_wrange(arf.bin_lo, arf.bin_hi)
meta.append(('Wavelength range', wrange))
a1 = numpy.min(arf.specresp)
a2 = numpy.max(arf.specresp)
meta.append(('Area range', '{:g} - {:g} cm<sup>2</sup>'.format(a1, a2)))
ls.append(formatting.html_section(meta, summary='Summary',
open_block=True))
# Display a subset of header values
# - maybe don't display the FITLER if NONE
# - how about RESPFILE / PHAFILE
if arf.header is not None:
meta = make_metadata(arf.header,
[('TELESCOP', 'Mission or Satellite'),
('INSTRUME', 'Instrument or Detector'),
('FILTER', 'Instrument filter'),
('OBJECT', 'Object'),
('TITLE', 'Program description'),
('DATE-OBS', 'Observation date'),
('CREATOR', 'Program that created the ARF')])
if meta is not None:
ls.append(formatting.html_section(meta, summary='Metadata'))
return formatting.html_from_sections(arf, ls)
def html_rmf(rmf):
"""HTML representation: RMF"""
# See _html_arf for general comments
ls = []
svg = simulate_rmf_plot(rmf)
if svg is not None:
out = formatting.html_svg(svg, 'RMF Plot')
else:
out = _extract_fields(rmf, 'ethresh', 'RMF Data')
ls.append(out)
# Summary properties
meta = []
if rmf.name is not None and rmf.name != '':
meta.append(('Identifier', rmf.name))
meta.append(('Number of channels', rmf.detchans))
meta.append(('Number of energies', len(rmf.energ_hi)))
erange = _calc_erange(rmf.energ_lo, rmf.energ_hi)
if rmf.ethresh is not None and rmf.energ_lo[0] <= rmf.ethresh:
# Not entirely happy with the wording of this
erange += ' (minimum threshold of {} was used)'.format(rmf.ethresh)
meta.append(('Energy range', erange))
meta.append(('Channel range', '{} - {}'.format(int(rmf.offset),
int(rmf.offset + rmf.detchans - 1))))
# Could show the energy range as given by e_min/e_max but
# is this useful?
ls.append(formatting.html_section(meta, summary='Summary',
open_block=True))
# Display a subset of header values
# - how about PHAFILE
if rmf.header is not None:
meta = make_metadata(rmf.header,
[('TELESCOP', 'Mission or Satellite'),
('INSTRUME', 'Instrument or Detector'),
('FILTER', 'Instrument filter'),
('OBJECT', 'Object'),
('TITLE', 'Program description'),
('DATE-OBS', 'Observation date'),
('CREATOR', 'Program that created the RMF'),
('CHANTYPE', 'The channel type'),
('LO_THRES', 'The minimum probability threshold'),
('HDUCLAS3', 'Matrix contents')])
if meta is not None:
ls.append(formatting.html_section(meta, summary='Metadata'))
return formatting.html_from_sections(rmf, ls)
def html_img(img):
"""HTML representation: IMG
Special-case of the Data2D handling. It would be nice to re-use
parts of the superclass behavior.
"""
ls = []
dtype = type(img).__name__
svg = img_plot(img)
if svg is not None:
out = formatting.html_svg(svg, '{} Plot'.format(dtype))
summary = ''
else:
# Only add prefix to summary if there's no plot
summary = '{} '.format(dtype)
# Summary properties
#
meta = []
if img.name is not None and img.name != '':
meta.append(('Identifier', img.name))
# shape is better defined for DataIMG than Data2D
meta.append(('Shape',
('{1} by {0} pixels'.format(*img.shape))))
meta.append(('Number of bins', len(img.y)))
# Rely on the _fields ordering, ending at shape
for f in img._fields[1:]:
if f == 'shape':
break
meta.append((f.upper(), getattr(img, f)))
if img.staterror is not None:
meta.append(('Statistical error', img.staterror))
if img.syserror is not None:
meta.append(('Systematic error', img.syserror))
out = formatting.html_section(meta, summary=summary + 'Data',
open_block=True)
ls.append(out)
# Add coordinate-system information. The WCS structure in Sherpa
# is not really sufficient to identify the transform.
#
if img.sky is not None:
meta = []
meta.append(('Center pixel (logical)', img.sky.crpix))
meta.append(('Center pixel (physical)', img.sky.crval))
meta.append(('Pixel size', img.sky.cdelt))
ls.append(formatting.html_section(meta,
summary='Coordinates: {}'.format(img.sky.name)))
if img.eqpos is not None:
meta = []
meta.append(('Center pixel (physical)', img.eqpos.crpix))
# could convert to RA/Dec
meta.append(('Center pixel (world)', img.eqpos.crval))
meta.append(('Pixel size', img.eqpos.cdelt))
meta.append(('Rotation', img.eqpos.crota))
meta.append(('Epoch', img.eqpos.epoch))
meta.append(('Equinox', img.eqpos.equinox))
ls.append(formatting.html_section(meta,
summary='Coordinates: {}'.format(img.eqpos.name)))
if img.header is not None:
meta = make_metadata(img.header,
[('TELESCOP', 'Mission or Satellite'),
('INSTRUME', 'Instrument or Detector'),
('FILTER', 'Instrument filter'),
('OBJECT', 'Object'),
('TITLE', 'Program description'),
('OBSERVER', 'Observer'),
('EXPOSURE', 'Exposure time'),
('DATE-OBS', 'Observation date'),
('CREATOR', 'Program that created the image')])
if meta is not None:
ls.append(formatting.html_section(meta, summary='Metadata'))
return formatting.html_from_sections(img, ls)
def simulate_rmf_plot(rmf):
"""Create a plot which shows the response to monochromatic energies.
The SVG of the plot is returned if matplotlib is selected as the
backend. The choice of energies used to create the response to
monochromatic energies is based on the data range (using log
scaling).
"""
from sherpa.models.basic import Delta1D
from sherpa.plot import backend
try:
from matplotlib import pyplot as plt
except ImportError:
return None
# X access
#
if rmf.e_min is None:
x = numpy.arange(rmf.offset, rmf.detchans + rmf.offset)
xlabel = 'Channel'
else:
x = 0.5 * (rmf.e_min + rmf.e_max)
xlabel = 'Energy (keV)'
# How many monochromatic lines to use
#
nlines = 5
# for now let's just create log-spaced energies
#
elo, ehi = rmf.energ_lo, rmf.energ_hi
l1 = numpy.log10(elo[0])
l2 = numpy.log10(ehi[-1])
dl = (l2 - l1) / (nlines + 1)
lines = l1 + dl * numpy.arange(1, nlines + 1)
energies = numpy.power(10, lines)
mdl = Delta1D()
def plotfunc():
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2)
for energy in energies:
mdl.pos = energy
y = rmf.apply_rmf(mdl(elo, ehi))
ax.plot(x, y, label='{:.2g} keV'.format(energy))
# Try to get the legend centered nicely below the plot
fig.legend(loc='center', ncol=nlines, bbox_to_anchor=(0.0, 0, 1, 0.1))
ax.set_xlabel(xlabel)
ax.set_title(rmf.name)
ax.set_xscale('log')
ax.set_yscale('log')
return fig
try:
return backend.as_svg(plotfunc)
except AttributeError:
return None
def img_plot(img):
"""Display the image.
The SVG of the plot is returned if matplotlib is selected as the
backend.
The eqpos/wcs coordinate system is not used; it uses physical
instead. This greatly simplifies the plot (no need to handle WCS).
"""
from sherpa.plot import backend
try:
from matplotlib import pyplot as plt
except ImportError:
return None
# Apply filter and coordinate system
#
y = img.get_img()
# extent is left, right, bottom, top and describes the
# outer-edge of the pixels.
#
ny, nx = img.shape
coord = img.coord
if coord in ['physical', 'world']:
x0, y0 = img._logical_to_physical(0.5, 0.5)
x1, y1 = img._logical_to_physical(nx + 0.5, ny + 0.5)
extent = (x0, x1, y0, y1)
lbl = 'physical'
cdelt = img.sky.cdelt
aspect = 'equal' if cdelt[1] == cdelt[0] else 'auto'
else:
extent = (0.5, nx + 0.5, 0.5, ny + 0.5)
aspect = 'equal'
lbl = 'logical'
# What is the filtered dataset?
#
if img.get_filter_expr() != '':
x0, x1 = img.get_indep(filter=True)
x0min, x0max = numpy.min(x0), numpy.max(x0)
x1min, x1max = numpy.min(x1), numpy.max(x1)
# Should add in half cdelt to padd these, but
# it looks like it isn't necessary.
filtered = (x0min, x1min, x0max, x1max)
else:
filtered = None
def plotfunc():
fig, ax = plt.subplots()
im = ax.imshow(y, origin='lower', extent=extent, aspect=aspect)
fig.colorbar(im, ax=ax)
if filtered != None:
ax.set_xlim(filtered[0], filtered[2])
ax.set_ylim(filtered[1], filtered[3])
ax.set_xlabel('X ({})'.format(lbl))
ax.set_ylabel('Y ({})'.format(lbl))
if img.name is not None and img.name != '':
ax.set_title(img.name)
return fig
try:
return backend.as_svg(plotfunc)
except AttributeError:
return None
class DataOgipResponse(Data1DInt):
"""
Parent class for OGIP responses, in particular ARF and RMF. This class implements some common validation code that
inheriting classes can call in their initializers.
Inheriting classes should override the protected class field `_ui_name` to provide a more specific label for user
messages.
"""
_ui_name = "OGIP Response"
# FIXME For a future time when we'll review this code in a deeper way: we
# could have better separation of concerns if the initializers of `DataARF`
# and `DataRMF` did not rely on the `Data` initializer, and if the
# class hierarchy was better organized (e.g. it looks like children must
# not call their super's initializer. Also, I'd expect validation to
# happen in individual methods rather than in a large one, and nested ifs
# should be avoided if possible.
#
# The shift to creating a warning message instead of raising an
# error has made this messier.
#
def _validate_energy_ranges(self, label, elo, ehi, ethresh):
"""Check the lo/hi values are > 0, handling common error case.
Several checks are made, to make sure the parameters follow
the OGIP standard. At present a failed check can result in
either a warning message being logged, or an error raised.
It was felt that raising an error in all cases would not be
helpful to a user, who can't (easily) change the response
files.
Parameters
----------
label : str
The response file identifier.
elo, ehi : numpy.ndarray
The input ENERG_LO and ENERG_HI arrays. They are assumed
to be one-dimensional and have the same number of elements.
ethresh : None or float, optional
If None, then elo must be greater than 0. When set, the
start bin can have a low-energy edge of 0; it is replaced
by ethresh. If set, ethresh must be greater than 0.
An error is raised if ethresh is larger than the upper-edge
of the first bin (only if the lower edge has been replaced).
Returns
-------
elo, ehi : numpy arrays
The validated energy limits. These can be the input arrays
or a copy of them. At present the ehi array is the same as
the input array, but this may change in the future.
Notes
-----
Only some of the constraints provided by the OGIP standard are
checked here, since there are issues involving numerical effects
(e.g. when checking that two bins do not overlap), as well as
uncertainty over what possible behavior is seen in released
data products for missions. The current set of checks are:
- ehi > elo for each bin
- elo is monotonic (ascending or descending)
- when emin is set, the lowest value in elo is >= 0,
otherwise it is > 0.
- ethresh (if set) is less than the minimum value in ENERG_HI
"""
rtype = self._ui_name
if elo.size != ehi.size:
raise ValueError("The energy arrays must have the same size, not {} and {}" .format(elo.size, ehi.size))
if ethresh is not None and ethresh <= 0.0:
raise ValueError("ethresh is None or > 0")
if (elo >= ehi).any():
# raise DataErr('ogip-error', rtype, label,
# 'has at least one bin with ENERG_HI < ENERG_LO')
wmsg = "The {} '{}' ".format(rtype, label) + \
'has at least one bin with ENERG_HI < ENERG_LO'
warnings.warn(wmsg)
# if elo is monotonically increasing, all elements will be True
# decreasing, False
#
# so the sum will be number of elements or 0
#
increasing = numpy.diff(elo, n=1) > 0.0
nincreasing = increasing.sum()
if nincreasing > 0 and nincreasing != len(increasing):
# raise DataErr('ogip-error', rtype, label,
# 'has a non-monotonic ENERG_LO array')
wmsg = "The {} '{}' ".format(rtype, label) + \
'has a non-monotonic ENERG_LO array'
warnings.warn(wmsg)
if nincreasing == 0:
startidx = -1
else:
startidx = 0
e0 = elo[startidx]
if ethresh is None:
if e0 <= 0.0:
raise DataErr('ogip-error', rtype, label,
'has an ENERG_LO value <= 0')
else:
# TODO: should this equality be replaced by an approximation test?
if e0 == 0.0:
if ehi[startidx] <= ethresh:
raise DataErr('ogip-error', rtype, label,
'has an ENERG_HI value <= the replacement ' +
'value of {}'.format(ethresh))
elo = elo.copy()
elo[startidx] = ethresh
wmsg = "The minimum ENERG_LO in the " + \
"{} '{}' was 0 and has been ".format(rtype, label) + \
"replaced by {}".format(ethresh)
warnings.warn(wmsg)
elif e0 < 0.0:
# raise DataErr('ogip-error', rtype, label,
# 'has an ENERG_LO value < 0')
wmsg = "The {} '{}' ".format(rtype, label) + \
'has an ENERG_LO value < 0'
warnings.warn(wmsg)
return elo, ehi
def _get_data_space(self, filter=False):
return EvaluationSpace1D(self._lo, self._hi)
class DataARF(DataOgipResponse):
"""ARF data set.
The ARF format is described in OGIP documents [1]_ and [2]_.
Parameters
----------
name : str
The name of the data set; often set to the name of the file
containing the data.
energ_lo, energ_hi, specresp : numpy.ndarray
The values of the ENERG_LO, ENERG_HI, and SPECRESP columns
for the ARF. The ENERG_HI values must be greater than the
ENERG_LO values for each bin, and the energy arrays must be
in increasing or decreasing order.
bin_lo, bin_hi : array or None, optional
exposure : number or None, optional
The exposure time for the ARF, in seconds.
header : dict or None, optional
ethresh : number or None, optional
If set it must be greater than 0 and is the replacement value
to use if the lowest-energy value is 0.0.
Raises
------
sherpa.utils.err.DataErr
This is raised if the energy arrays do not follow some of the
OGIP standards.
Notes
-----
There is limited checking that the ARF matches the OGIP standard,
but as there are cases of released data products that do not follow
the standard, these checks can not cover all cases.
References
----------
.. [1] "The Calibration Requirements for Spectral Analysis (Definition of RMF and ARF file formats)", https://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002/cal_gen_92_002.html
.. [2] "The Calibration Requirements for Spectral Analysis Addendum: Changes log", https://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002a/cal_gen_92_002a.html
"""
_ui_name = "ARF"
_fields = ("name", "energ_lo", "energ_hi", "specresp", "bin_lo", "bin_hi", "exposure", "ethresh")
def _get_specresp(self):
return self._specresp
def _set_specresp(self, val):
self._specresp = val
self._rsp = val
specresp = property(_get_specresp, _set_specresp)
def __init__(self, name, energ_lo, energ_hi, specresp, bin_lo=None,
bin_hi=None, exposure=None, header=None, ethresh=None):
self.specresp = specresp
self.bin_lo = bin_lo
self.bin_hi = bin_hi
self.exposure = exposure
self.header = header
self.ethresh = ethresh
energ_lo, energ_hi = self._validate_energy_ranges(name, energ_lo, energ_hi, ethresh)
self._lo, self._hi = energ_lo, energ_hi
self.energ_lo = energ_lo
self.energ_hi = energ_hi
Data1DInt.__init__(self, name, energ_lo, energ_hi, specresp)
def __str__(self):
# Print the metadata first
try:
ss = Data.__str__(self)
except:
ss = self._fields
return ss
def _repr_html_(self):
"""Return a HTML (string) representation of the ARF
"""
return html_arf(self)
def __setstate__(self, state):
if 'header' not in state:
self.header = None
self.__dict__.update(state)
if '_specresp' not in state:
self.__dict__['_specresp'] = state.get('specresp', None)
self.__dict__['_rsp'] = state.get('specresp', None)
def apply_arf(self, src, *args, **kwargs):
"Fold the source array src through the ARF and return the result"
# an external function must be called so all ARFs go through
# a single entry point in order for caching to 'work'
model = arf_fold(src, self._rsp)
# Rebin the high-res source model folded through ARF down to the size
# the PHA or RMF expects.
if args != ():
(arf, rmf) = args
if rmf != () and len(arf[0]) > len(rmf[0]):
model = rebin(model, arf[0], arf[1], rmf[0], rmf[1])
return model
def notice(self, bin_mask=None):
self._rsp = self.specresp
self._lo = self.energ_lo
self._hi = self.energ_hi
if bin_mask is not None:
self._rsp = self.specresp[bin_mask]
self._lo = self.energ_lo[bin_mask]
self._hi = self.energ_hi[bin_mask]
def get_indep(self, filter=False):
return (self._lo, self._hi)
def get_dep(self, filter=False):
return self._rsp
def get_xlabel(self):
return 'Energy (keV)'
def get_ylabel(self):
from sherpa.plot import backend
return 'cm' + backend.get_latex_for_string('^2')
class DataRMF(DataOgipResponse):
"""RMF data set.
The RMF format is described in OGIP documents [1]_ and [2]_.
Parameters
----------
name : str
The name of the data set; often set to the name of the file
containing the data.
detchans : int
energ_lo, energ_hi : array
The values of the ENERG_LO, ENERG_HI, and SPECRESP columns
for the ARF. The ENERG_HI values must be greater than the
ENERG_LO values for each bin, and the energy arrays must be
in increasing or decreasing order.
n_grp, f_chan, n_chan, matrix : array-like
offset : int, optional
e_min, e_max : array-like or None, optional
header : dict or None, optional
ethresh : number or None, optional
If set it must be greater than 0 and is the replacement value
to use if the lowest-energy value is 0.0.
Notes
-----
There is limited checking that the RMF matches the OGIP standard,
but as there are cases of released data products that do not follow
the standard, these checks can not cover all cases. If a check fails
then a warning message is logged.
References
----------
.. [1] "The Calibration Requirements for Spectral Analysis (Definition of RMF and ARF file formats)", https://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002/cal_gen_92_002.html
.. [2] "The Calibration Requirements for Spectral Analysis Addendum: Changes log", https://heasarc.gsfc.nasa.gov/docs/heasarc/caldb/docs/memos/cal_gen_92_002a/cal_gen_92_002a.html
"""
_ui_name = "RMF"
_fields = ("name", "detchans", "energ_lo", "energ_hi", "n_grp", "f_chan", "n_chan", "matrix", "offset", "e_min",
"e_max", "ethresh")
def __init__(self, name, detchans, energ_lo, energ_hi, n_grp, f_chan,
n_chan, matrix, offset=1, e_min=None, e_max=None,
header=None, ethresh=None):
energ_lo, energ_hi = self._validate(name, energ_lo, energ_hi, ethresh)
if offset < 0:
raise ValueError("offset must be >=0, not {}".format(offset))
self.energ_lo = energ_lo
self.energ_hi = energ_hi
self.offset = offset
self.detchans = detchans
self.e_min = e_min
self.e_max = e_max
self.header = header
self.n_grp = n_grp
self.f_chan = f_chan
self.n_chan = n_chan
self.matrix = matrix
self.ethresh = ethresh
self._fch = f_chan
self._nch = n_chan
self._grp = n_grp
self._rsp = matrix
self._lo = energ_lo
self._hi = energ_hi
Data1DInt.__init__(self, name, energ_lo, energ_hi, matrix)
def __str__(self):
# Print the metadata first
old = self._fields
ss = old
try:
self._fields = tuple(filter((lambda x: x != 'header'),
self._fields))
ss = Data.__str__(self)
finally:
self._fields = old
return ss
def _repr_html_(self):
"""Return a HTML (string) representation of the RMF
"""
return html_rmf(self)
def __setstate__(self, state):
if 'header' not in state:
self.header = None
self.__dict__.update(state)
def _validate(self, name, energy_lo, energy_hi, ethresh):
"""
Validate energy ranges and, if necessary, make adjustments.
Subclasses may override this method to perform different validations
or skip validation altogether.
Parameters
----------
name : str
The name/label of the current file
energy_lo, energ_hi : NumPy array
The lower bounds of the energy bins. The arrays must have the same size
ethresh : float
The lowest energy value
Returns
-------
energy_lo, energ_hi : NumPy array
The energy values to use for the bin boundaries
"""
return self._validate_energy_ranges(name, energy_lo, energy_hi, ethresh)
def apply_rmf(self, src, *args, **kwargs):
"Fold the source array src through the RMF and return the result"
# Rebin the high-res source model from the PHA down to the size
# the RMF expects.
if args != ():
(rmf, pha) = args
if pha != () and len(pha[0]) > len(rmf[0]):
src = rebin(src, pha[0], pha[1], rmf[0], rmf[1])
if len(src) != len(self._lo):
raise TypeError("Mismatched filter between ARF and RMF " +
"or PHA and RMF")
return rmf_fold(src, self._grp, self._fch, self._nch, self._rsp,
self.detchans, self.offset)
def notice(self, noticed_chans=None):
bin_mask = None
self._fch = self.f_chan
self._nch = self.n_chan
self._grp = self.n_grp
self._rsp = self.matrix
self._lo = self.energ_lo
self._hi = self.energ_hi
if noticed_chans is not None:
(self._grp, self._fch, self._nch, self._rsp,
bin_mask) = filter_resp(noticed_chans, self.n_grp, self.f_chan,
self.n_chan, self.matrix, self.offset)
self._lo = self.energ_lo[bin_mask]
self._hi = self.energ_hi[bin_mask]
return bin_mask
def get_indep(self, filter=False):
return (self._lo, self._hi)
def get_dep(self, filter=False):
return self.apply_rmf(numpy.ones(self.energ_lo.shape, SherpaFloat))
def get_xlabel(self):
if (self.e_min is not None) and (self.e_max is not None):
return 'Energy (keV)'
return 'Channel'
def get_ylabel(self):
return 'Counts'
# FIXME There are places in the code that explicitly check if an object is an instance of sherpa.astro.data.DataRMF.
# So it's safer to make DataRosatRMF a subclass of the default class, although in principle they should be siblings
# and subclasses of the same superclass.
class DataRosatRMF(DataRMF):
ui_name = "ROSAT RMF"
def _validate(self, name, energy_lo, energy_hi, ethresh):
return energy_lo, energy_hi
class DataPHA(Data1D):
"""PHA data set, including any associated instrument and background data.
The PHA format is described in an OGIP document [1]_.
Parameters
----------
name : str
The name of the data set; often set to the name of the file
containing the data.
channel, counts : array of int
The PHA data.
staterror, syserror : scalar or array or None, optional
The statistical and systematic errors for the data, if
defined.
bin_lo, bin_hi : array or None, optional
grouping : array of int or None, optional
quality : array of int or None, optional
exposure : number or None, optional
The exposure time for the PHA data set, in seconds.
backscal : scalar or array or None, optional
areascal : scalar or array or None, optional
header : dict or None, optional
Attributes
----------
name : str
Used to store the file name, for data read from a file.
channel
counts
staterror
syserror
bin_lo
bin_hi
grouping
quality
exposure
backscal
areascal
Notes
-----
The original data is stored in the attributes - e.g. `counts` - and
the data-access methods, such as `get_dep` and `get_staterror`,
provide any necessary data manipulation to handle cases such as:
background subtraction, filtering, and grouping.
The handling of the AREASCAl value - whether it is a scalar or
array - is currently in flux. It is a value that is stored with the
PHA file, and the OGIP PHA standard ([1]_) describes the observed
counts being divided by the area scaling before comparison to the
model. However, this is not valid for Poisson-based statistics, and
is also not how XSPEC handles AREASCAL ([2]_); the AREASCAL values
are used to scale the exposure times instead. The aim is to add
this logic to the instrument models in `sherpa.astro.instrument`,
such as `sherpa.astro.instrument.RMFModelPHA`. The area scaling still
has to be applied when calculating the background contribution to
a spectrum, as well as when calculating the data and model values used
for plots (following XSPEC so as to avoid sharp discontinuities where
the area-scaling factor changes strongly).
References
----------
.. [1] "The OGIP Spectral File Format", https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/ogip_92_007.html
.. [2] Private communication with Keith Arnaud
"""
_fields = ("name", "channel", "counts", "bin_lo", "bin_hi", "grouping", "quality",
"exposure", "backscal", "areascal")
def _get_grouped(self):
return self._grouped
def _set_grouped(self, val):
val = bool(val)
if val and self.grouping is None:
raise DataErr('nogrouping', self.name)
if self._grouped == val:
return
# As the grouping status is being changed, we need to reset the mask
# to be correct size, while still noticing groups within the filter
#
if numpy.iterable(self.mask):
old_filter = self.get_filter(group=val)
self._grouped = val
self.ignore()
for vals in parse_expr(old_filter):
self.notice(*vals)
self._grouped = val
grouped = property(_get_grouped, _set_grouped,
doc='Are the data grouped?')
def _get_subtracted(self):
return self._subtracted
def _set_subtracted(self, val):
val = bool(val)
if len(self._backgrounds) == 0:
raise DataErr('nobkg', self.name)
self._subtracted = val
subtracted = property(_get_subtracted, _set_subtracted,
doc='Are the background data subtracted?')
def _get_units(self):
return self._units
def _set_units(self, val):
units = str(val).strip().lower()
if units == 'bin':
units = 'channel'
if units.startswith('chan'):
# Note: the names of these routines appear confusing because of the
# way group values are used
self._to_channel = self._channel_to_group
self._from_channel = self._group_to_channel
units = 'channel'
elif units.startswith('ener'):
self._to_channel = self._energy_to_channel
self._from_channel = self._channel_to_energy
units = 'energy'
elif units.startswith('wave'):
self._to_channel = self._wavelength_to_channel
self._from_channel = self._channel_to_wavelength
units = 'wavelength'
else:
raise DataErr('bad', 'quantity', val)
for id in self.background_ids:
bkg = self.get_background(id)
if bkg.get_response() != (None, None) or \
(bkg.bin_lo is not None and bkg.bin_hi is not None):
bkg.units = units
self._units = units
units = property(_get_units, _set_units,
doc='Units of the independent axis')
def _get_rate(self):
return self._rate
def _set_rate(self, val):
self._rate = bool_cast(val)
for id in self.background_ids:
# TODO: shouldn't this store bool_cast(val) instead?
self.get_background(id).rate = val
rate = property(_get_rate, _set_rate,
doc='Quantity of y-axis: counts or counts/sec')
def _get_plot_fac(self):
return self._plot_fac
def _set_plot_fac(self, val):
self._plot_fac = int(val)
for id in self.background_ids:
self.get_background(id).plot_fac = val
plot_fac = property(_get_plot_fac, _set_plot_fac,
doc='Number of times to multiply the y-axis ' +
'quantity by x-axis bin size')
def _get_response_ids(self):
return self._response_ids
def _set_response_ids(self, ids):
if not numpy.iterable(ids):
raise DataErr('idsnotarray', 'response', str(ids))
keys = self._responses.keys()
for id in ids:
if id not in keys:
raise DataErr('badids', str(id), 'response', str(keys))
ids = list(ids)
self._response_ids = ids
response_ids = property(_get_response_ids, _set_response_ids,
doc=('IDs of defined instrument responses ' +
'(ARF/RMF pairs)'))
def _get_background_ids(self):
return self._background_ids
def _set_background_ids(self, ids):
if not numpy.iterable(ids):
raise DataErr('idsnotarray', 'background', str(ids))
keys = self._backgrounds.keys()
for id in ids:
if id not in keys:
raise DataErr('badids', str(id), 'background', str(keys))
ids = list(ids)
self._background_ids = ids
background_ids = property(_get_background_ids, _set_background_ids,
doc='IDs of defined background data sets')
_fields = ('name', 'channel', 'counts', 'staterror', 'syserror', 'bin_lo', 'bin_hi', 'grouping', 'quality',
'exposure', 'backscal', 'areascal', 'grouped', 'subtracted', 'units', 'rate', 'plot_fac', 'response_ids',
'background_ids')
def __init__(self, name, channel, counts, staterror=None, syserror=None,
bin_lo=None, bin_hi=None, grouping=None, quality=None,
exposure=None, backscal=None, areascal=None, header=None):
self.channel = channel
self.counts = counts
self.bin_lo = bin_lo
self.bin_hi = bin_hi
self.quality = quality
self.grouping = grouping
self.exposure = exposure
self.backscal = backscal
self.areascal = areascal
self.header = header
self._grouped = (grouping is not None)
self._original_groups = True
self._subtracted = False
self._response_ids = []
self._background_ids = []
self._responses = {}
self._backgrounds = {}
self._rate = True
self._plot_fac = 0
self.units = 'channel'
self.quality_filter = None
Data1D.__init__(self, name, channel, counts, staterror, syserror)
def __str__(self):
# Print the metadata first
old = self._fields
ss = old
try:
self._fields = tuple(filter((lambda x: x != 'header'),
self._fields))
ss = Data.__str__(self)
finally:
self._fields = old
return ss
def _repr_html_(self):
"""Return a HTML (string) representation of the PHA
"""
return html_pha(self)
def __getstate__(self):
state = self.__dict__.copy()
del state['_to_channel']
del state['_from_channel']
return state
def __setstate__(self, state):
self._background_ids = state['_background_ids']
self._backgrounds = state['_backgrounds']
self._set_units(state['_units'])
if 'header' not in state:
self.header = None
self.__dict__.update(state)
primary_response_id = 1
"""The identifier for the response component when not set."""
def set_analysis(self, quantity, type='rate', factor=0):
"""Return the units used when fitting spectral data.
Parameters
----------
quantity : {'channel', 'energy', 'wavelength'}
The analysis setting.
type : {'rate', 'counts'}, optional
Do plots display a rate or show counts?
factor : int, optional
The Y axis of plots is multiplied by Energy^factor or
Wavelength^factor before display. The default is 0.
Raises
------
sherpa.utils.err.DatatErr
If the type argument is invalid, the RMF or ARF has the
wrong size, or there in no response.
See Also
--------
get_analysis
Examples
--------
>>> pha.set_analysis('energy')
>>> pha.set_analysis('wave', type='counts' factor=1)
"""
self.plot_fac = factor
type = str(type).strip().lower()
if not (type.startswith('counts') or type.startswith('rate')):
raise DataErr("plottype", type, "'rate' or 'counts'")
self.rate = (type == 'rate')
arf, rmf = self.get_response()
if rmf is not None and rmf.detchans != len(self.channel):
raise DataErr("incompatibleresp", rmf.name, self.name)
if (rmf is None and arf is None) and \
(self.bin_lo is None and self.bin_hi is None) and \
quantity != 'channel':
raise DataErr('norsp', self.name)
if rmf is None and arf is not None and quantity != 'channel' and \
len(arf.energ_lo) != len(self.channel):
raise DataErr("incompleteresp", self.name)
self.units = quantity
def get_analysis(self):
"""Return the units used when fitting spectral data.
Returns
-------
setting : { 'channel', 'energy', 'wavelength' }
The analysis setting.
Raises
------
sherpa.utils.err.ArgumentErr
If the data set does not contain PHA data.
sherpa.utils.err.IdentifierErr
If the `id` argument is not recognized.
See Also
--------
set_analysis
Examples
--------
>>> is_wave = pha.get_analysis() == 'wavelength'
"""
return self.units
def _fix_response_id(self, id):
if id is None:
id = self.primary_response_id
return id
def get_response(self, id=None):
"""Return the response component.
Parameters
----------
id : int or str, optional
The identifier of the response component. If it is None
then the default response identifier is used.
Returns
-------
arf, rmf: sherpa.astro.data.DataARF,sherpa.astro.data.DataRMF instances or None
The response, as an ARF and RMF. Either, or both,
components can be None.
See Also
--------
delete_response, get_arf, get_rmf, set_response
"""
id = self._fix_response_id(id)
return self._responses.get(id, (None, None))
def set_response(self, arf=None, rmf=None, id=None):
"""Add or replace a response component.
To remove a response use delete_response(), as setting arf and
rmf to None here does nothing.
Parameters
----------
arf : sherpa.astro.data.DataARF instance or None, optional
The ARF to add if any.
rmf : sherpa.astro.data.DataRMF instance or None, optional
The RMF to add, if any.
id : int or str, optional
The identifier of the response component. If it is None
then the default response identifier is used.
See Also
--------
delete_response, get_response, set_arf, set_rmf
"""
if (arf is None) and (rmf is None):
return
id = self._fix_response_id(id)
self._responses[id] = (arf, rmf)
ids = self.response_ids[:]
if id not in ids:
ids.append(id)
self.response_ids = ids
def delete_response(self, id=None):
"""Remove the response component.
If the response component does not exist then the method
does nothing.
Parameters
----------
id : int or str, optional
The identifier of the response component. If it is None
then the default response identifier is used.
See Also
--------
set_response
"""
id = self._fix_response_id(id)
self._responses.pop(id, None)
ids = self.response_ids[:]
ids.remove(id)
self.response_ids = ids
def get_arf(self, id=None):
"""Return the ARF from the response.
Parameters
----------
id : int or str, optional
The identifier of the response component. If it is None
then the default response identifier is used.
Returns
-------
arf: sherpa.astro.data.DataARF instance or None
The ARF, if set.
See Also
--------
get_response, get_rmf
"""
return self.get_response(id)[0]
def get_rmf(self, id=None):
"""Return the RMF from the response.
Parameters
----------
id : int or str, optional
The identifier of the response component. If it is None
then the default response identifier is used.
Returns
-------
rmf: sherpa.astro.data.DataRMF instance or None
The RMF, if set.
See Also
--------
get_arf, get_response
"""
return self.get_response(id)[1]
def set_arf(self, arf, id=None):
"""Add or replace the ARF in a response component.
This replaces the existing ARF of the response, keeping the
previous RMF (if set). Use the delete_response method to
remove the response, rather than setting arf to None.
Parameters
----------
arf : sherpa.astro.data.DataARF instance
The ARF to add.
id : int or str, optional
The identifier of the response component. If it is None
then the default response identifier is used.
See Also
--------
delete_response, set_response, set_rmf
"""
self.set_response(arf, self.get_rmf(id), id)
def set_rmf(self, rmf, id=None):
"""Add or replace the RMF in a response component.
This replaces the existing RMF of the response, keeping the
previous ARF (if set). Use the delete_response method to
remove the response, rather than setting rmf to None.
Parameters
----------
rmf : sherpa.astro.data.DataRMF instance
The RMF to add.
id : int or str, optional
The identifier of the response component. If it is None
then the default response identifier is used.
See Also
--------
delete_response, set_response, set_arf
"""
self.set_response(self.get_arf(id), rmf, id)
def get_specresp(self, filter=False):
"""Return the effective area values for the data set.
Parameters
----------
filter : bool, optional
Should the filter attached to the data set be applied to
the ARF or not. The default is `False`.
Returns
-------
arf : array
The effective area values for the data set (or background
component).
"""
filter = bool_cast(filter)
self.notice_response(False)
arf, rmf = self.get_response()
newarf = None
if arf is not None and rmf is not None:
specresp = arf.get_dep()
elo, ehi = arf.get_indep()
lo, hi = self._get_ebins(group=False)
newarf = interpolate(lo, elo, specresp)
newarf[newarf <= 0] = 1.
if filter:
newarf = self.apply_filter(newarf, self._middle)
return newarf
# The energy bins can be grouped or ungrouped. By default,
# they should be grouped if the data are grouped. There are
# certain contexts (e.g., plotting) where we will retrieve the
# energy bins, and later filter the data; but filtering
# is automatically followed by grouping. Grouping the data
# twice is an error.
def _get_ebins(self, response_id=None, group=True):
group = bool_cast(group)
arf, rmf = self.get_response(response_id)
if (self.bin_lo is not None) and (self.bin_hi is not None):
elo = self.bin_lo
ehi = self.bin_hi
if (elo[0] > elo[-1]) and (ehi[0] > ehi[-1]):
elo = self._hc / self.bin_hi
ehi = self._hc / self.bin_lo
elif rmf is not None:
if (rmf.e_min is None) or (rmf.e_max is None):
raise DataErr('noenergybins', 'RMF')
elo = rmf.e_min
ehi = rmf.e_max
elif arf is not None:
elo = arf.energ_lo
ehi = arf.energ_hi
else:
elo = self.channel - 0.5
ehi = self.channel + 0.5
if self.units == 'channel':
elo = self.channel - 0.5
ehi = self.channel + 0.5
# If the data are grouped, then we should group up
# the energy bins as well. E.g., if group 1 is
# channels 1-5, then the energy boundaries for the
# *group* should be elo[0], ehi[4].
if self.grouped and group:
elo = self.apply_grouping(elo, self._min)
ehi = self.apply_grouping(ehi, self._max)
return (elo, ehi)
def get_indep(self, filter=True):
if filter:
return (self.get_noticed_channels(),)
return (self.channel,)
def _get_indep(self, filter=False):
if (self.bin_lo is not None) and (self.bin_hi is not None):
elo = self.bin_lo
ehi = self.bin_hi
if (elo[0] > elo[-1]) and (ehi[0] > ehi[-1]):
if self.units == 'wavelength':
return (elo, ehi)
elo = self._hc / self.bin_hi
ehi = self._hc / self.bin_lo
else:
energylist = []
for id in self.response_ids:
arf, rmf = self.get_response(id)
lo = None
hi = None
if rmf is not None:
lo = rmf.energ_lo
hi = rmf.energ_hi
if filter:
lo, hi = rmf.get_indep()
elif arf is not None:
lo = arf.energ_lo
hi = arf.energ_hi
if filter:
lo, hi = arf.get_indep()
energylist.append((lo, hi))
if len(energylist) > 1:
elo, ehi, lookuptable = compile_energy_grid(energylist)
elif (not energylist or
(len(energylist) == 1 and
numpy.equal(energylist[0], None).any())):
raise DataErr('noenergybins', 'Response')
else:
elo, ehi = energylist[0]
lo, hi = elo, ehi
if self.units == 'wavelength':
lo = self._hc / ehi
hi = self._hc / elo
return (lo, hi)
def _channel_to_group(self, val):
"""Convert channel number to group number.
For ungrouped data channel and group numbering are the
same.
"""
if not self.grouped:
return val
# The edge channels of each group.
#
lo = self.apply_grouping(self.channel, self._min)
hi = self.apply_grouping(self.channel, self._max)
val = numpy.asarray(val).astype(numpy.int_)
res = []
for v in val.flat:
# could follow _energy_to_channel but for now go
# with something simple
if v < self.channel[0]:
ans = self.channel[0]
elif v > self.channel[-1]:
ans = self.channel[-1]
else:
idx, = numpy.where((v >= lo) & (v <= hi))
ans = idx[0] + 1
res.append(ans)
res = numpy.asarray(res, SherpaFloat)
if val.shape == ():
return res[0]
return res
def _group_to_channel(self, val, group=True, response_id=None):
"""Convert group number to channel number.
For ungrouped data channel and group numbering are the
same. The mid-point of each group is used (rounded down
if not an integer).
"""
if not self.grouped or not group:
return val
# The middle channel of each group.
#
mid = self.apply_grouping(self.channel, self._middle)
# Convert to an integer (this keeps the channel within
# the group).
#
mid = numpy.floor(mid)
val = numpy.asarray(val).astype(numpy.int_) - 1
try:
return mid[val]
except IndexError:
raise DataErr('invalid group number: {}'.format(val))
def _channel_to_energy(self, val, group=True, response_id=None):
elo, ehi = self._get_ebins(response_id=response_id, group=group)
val = numpy.asarray(val).astype(numpy.int_) - 1
try:
return (elo[val] + ehi[val]) / 2.0
except IndexError:
raise DataErr('invalidchannel', val)
def _energy_to_channel(self, val):
elo, ehi = self._get_ebins()
# special case handling no noticed data (e.g. ignore_bad
# removes all bins); assume if elo is empty then so is ehi.
#
if len(elo) == 0:
raise DataErr('notmask')
val = numpy.asarray(val)
res = []
for v in val.flat:
if tuple(numpy.flatnonzero(elo <= v)) == ():
if elo[0] > elo[-1] and ehi[0] > ehi[-1]:
res.append(SherpaFloat(len(elo)))
else:
res.append(SherpaFloat(1))
elif tuple(numpy.flatnonzero(ehi > v)) == ():
if elo[0] > elo[-1] and ehi[0] > ehi[-1]:
res.append(SherpaFloat(1))
else:
res.append(SherpaFloat(len(ehi)))
elif tuple(numpy.flatnonzero((elo <= v) & (ehi > v)) + 1) != ():
res.append(SherpaFloat(
numpy.flatnonzero((elo <= v) & (ehi > v)) + 1))
elif (elo <= v).argmin() == (ehi > v).argmax():
res.append(SherpaFloat((elo <= v).argmin()))
else:
raise DataErr("energytochannel", v)
if val.shape == ():
return res[0]
return numpy.asarray(res, SherpaFloat)
_hc = 12.39841874 # nist.gov in [keV-Angstrom]
def _channel_to_wavelength(self, val, group=True, response_id=None):
tiny = numpy.finfo(numpy.float32).tiny
vals = numpy.asarray(self._channel_to_energy(val, group, response_id))
if vals.shape == ():
if vals == 0.0:
vals = tiny
else:
vals[vals == 0.0] = tiny
vals = self._hc / vals
return vals
def _wavelength_to_channel(self, val):
tiny = numpy.finfo(numpy.float32).tiny
vals = numpy.asarray(val)
if vals.shape == ():
if vals == 0.0:
vals = tiny
else:
vals[vals == 0.0] = tiny
vals = self._hc / vals
return self._energy_to_channel(vals)
default_background_id = 1
"""The identifier for the background component when not set."""
def _fix_background_id(self, id):
if id is None:
id = self.default_background_id
return id
def get_background(self, id=None):
"""Return the background component.
Parameters
----------
id : int or str, optional
The identifier of the background component. If it is None
then the default background identifier is used.
Returns
-------
bkg : sherpa.astro.data.DataPHA instance or None
The background dataset. If there is no component then None
is returned.
See Also
--------
delete_background, set_background
"""
id = self._fix_background_id(id)
return self._backgrounds.get(id)
def set_background(self, bkg, id=None):
"""Add or replace a background component.
If the background has no grouping of quality arrays then they
are copied from the source region. If the background has no
response information (ARF or RMF) then the response is copied
from the source region.
Parameters
----------
bkg : sherpa.astro.data.DataPHA instance
The background dataset to add. This object may be changed
by this method.
id : int or str, optional
The identifier of the background component. If it is None
then the default background identifier is used.
See Also
--------
delete_background, get_background
"""
id = self._fix_background_id(id)
self._backgrounds[id] = bkg
ids = self.background_ids[:]
if id not in ids:
ids.append(id)
self.background_ids = ids
# Copy over data from the source to the background
# if its not present in the background:
# - background and grouping
# - response information (ONLY THE FIRST TERM)
#
# The units (only when a response is present), rate, and
# plot_fac values are always copied.
#
if bkg.grouping is None:
bkg.grouping = self.grouping
bkg.grouped = bkg.grouping is not None
if bkg.quality is None:
bkg.quality = self.quality
if bkg.get_response() == (None, None):
bkg.set_response(*self.get_response())
if bkg.get_response() != (None, None):
bkg.units = self.units
bkg.rate = self.rate
bkg.plot_fac = self.plot_fac
def delete_background(self, id=None):
"""Remove the background component.
If the background component does not exist then the method
does nothing.
Parameters
----------
id : int or str, optional
The identifier of the background component. If it is None
then the default background identifier is used.
See Also
--------
set_background
Notes
-----
If this call removes the last of the background components
then the subtracted flag is cleared (if set).
"""
id = self._fix_background_id(id)
self._backgrounds.pop(id, None)
if len(self._backgrounds) == 0:
self._subtracted = False
ids = self.background_ids[:]
if id in ids:
ids.remove(id)
self.background_ids = ids
def get_background_scale(self, bkg_id=1, units='counts',
group=True, filter=False):
"""Return the correction factor for the background dataset.
.. versionchanged:: 4.12.2
The bkg_id, units, group, and filter parameters have been
added and the routine no-longer calculates the average
scaling for all the background components but just for the
given component.
Parameters
----------
bkg_id : int or str, optional
The background component to use (the default is 1).
units : {'counts', 'rate'}, optional
The correction is applied to a model defined as counts, the
default, or a rate. The latter should be used when
calculating the correction factor for adding the background
data to the source aperture.
group : bool, optional
Should the values be grouped to match the data?
filter : bool, optional
Should the values be filtered to match the data?
Returns
-------
scale : None, number, or NumPy array
The scaling factor to correct the background data onto the
source data set. If bkg_id is not valid then None is
returned.
Notes
-----
The correction factor when units is 'counts' is::
scale_exposure * scale_backscal * scale_areascal / nbkg
where nbkg is the number of background components and
scale_x is the source value divided by the background
value for the field x.
When units is 'rate' the correction is:
scale_backscal / nbkg
and it is currently uncertain whether it should include the
AREASCAL scaling.
"""
if units not in ['counts', 'rate']:
raise ValueError("Invalid units argument: {}".format(units))
if bkg_id not in self.background_ids:
return None
nbkg = len(self.background_ids)
def correct(obj):
"""Correction factor for the object"""
ans = 1.0
# Should we set 0 values to 1 at this stage?
#
if obj.backscal is not None:
ans *= self._check_scale(obj.backscal, group=False)
if obj.areascal is not None and units == 'counts':
ans *= self._check_scale(obj.areascal, group=False)
if obj.exposure is not None and units == 'counts':
ans *= self._check_scale(obj.exposure, group=False)
return ans
src = correct(self)
bkg = correct(self.get_background(bkg_id))
scale = src / bkg / nbkg
return self._check_scale(scale, group=group, filter=filter)
def _check_scale(self, scale, group=True, filter=False):
"""Ensure the scale value is positive and filtered/grouped.
Parameters
----------
scale : number or numpy array
The scale factor.
group : bool, optional
Is any grouping applied to the data? This is only
relevant for an array.
filter : bool, optional
Is any filter applied? This is only checked if group
is True.
Returns
-------
scale : number or numpy array
Negative values are replaced by 1.0.
"""
if numpy.isscalar(scale) and scale <= 0.0:
scale = 1.0
elif numpy.iterable(scale):
scale = numpy.asarray(scale, dtype=SherpaFloat)
if group:
if filter:
scale = self.apply_filter(scale, self._middle)
else:
scale = self.apply_grouping(scale, self._middle)
scale[scale <= 0.0] = 1.0
return scale
def get_backscal(self, group=True, filter=False):
"""Return the background scaling of the PHA data set.
Return the BACKSCAL setting [BSCAL]_ for the PHA data set.
Parameters
----------
group : bool, optional
Should the values be grouped to match the data?
filter : bool, optional
Should the values be filtered to match the data?
Returns
-------
backscal : number or ndarray
The BACKSCAL value, which can be a scalar or a 1D array.
See Also
--------
get_areascal, get_background_scale
Notes
-----
The BACKSCAL value can be defined as the ratio of the area of
the source (or background) extraction region in image pixels
to the total number of image pixels. The fact that there is no
ironclad definition for this quantity does not matter so long
as the value for a source dataset and its associated
background dataset are defined in the same manner, because
only the ratio of source and background BACKSCAL values is
used. It can be a scalar or an array.
References
----------
.. [BSCAL] "The OGIP Spectral File Format", Arnaud, K. & George, I.
http://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/ogip_92_007.html
Examples
--------
>>> pha.get_backscal()
7.8504301607718007e-06
"""
backscal = self.backscal
if backscal is not None:
backscal = self._check_scale(backscal, group, filter)
return backscal
def get_areascal(self, group=True, filter=False):
"""Return the fractional area factor of the PHA data set.
Return the AREASCAL setting [ASCAL]_ for the PHA data set.
Parameters
----------
group : bool, optional
Should the values be grouped to match the data?
filter : bool, optional
Should the values be filtered to match the data?
Returns
-------
areascal : number or ndarray
The AREASCAL value, which can be a scalar or a 1D array.
See Also
--------
get_backscal, get_background_scale
Notes
-----
The fractional area scale is normally set to 1, with the ARF used
to scale the model.
References
----------
.. [ASCAL] "The OGIP Spectral File Format", Arnaud, K. & George, I.
http://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/ogip_92_007.html
Examples
--------
>>> pha.get_areascal()
1.0
"""
areascal = self.areascal
if areascal is not None:
areascal = self._check_scale(areascal, group, filter)
return areascal
def apply_filter(self, data, groupfunc=numpy.sum):
"""
Filter the array data, first passing it through apply_grouping()
(using groupfunc) and then applying the general filters
"""
if data is None:
return data
if len(data) != len(self.counts):
counts = numpy.zeros(len(self.counts), dtype=SherpaFloat)
mask = self.get_mask()
if mask is not None:
counts[mask] = numpy.asarray(data, dtype=SherpaFloat)
data = counts
# else:
# raise DataErr('mismatch', "filter", "data array")
return super().apply_filter(self.apply_grouping(data, groupfunc))
def apply_grouping(self, data, groupfunc=numpy.sum):
"""
Apply the data set's grouping scheme to the array data,
combining the grouped data points with groupfunc, and return
the grouped array. If the data set has no associated grouping
scheme or the data are ungrouped, data is returned unaltered.
"""
if data is None or not self.grouped:
return data
groups = self.grouping
filter = self.quality_filter
if filter is None:
return do_group(data, groups, groupfunc.__name__)
if len(data) != len(filter) or len(groups) != len(filter):
raise DataErr('mismatch', "quality filter", "data array")
filtered_data = numpy.asarray(data)[filter]
groups = numpy.asarray(groups)[filter]
grouped_data = do_group(filtered_data, groups, groupfunc.__name__)
if data is self.channel and groupfunc is self._make_groups:
return numpy.arange(1, len(grouped_data) + 1, dtype=int)
return grouped_data
def ignore_bad(self):
"""Exclude channels marked as bad.
Ignore any bin in the PHA data set which has a quality value
that is larger than zero.
Raises
------
sherpa.utils.err.DataErr
If the data set has no quality array.
See Also
--------
ignore : Exclude data from the fit.
notice : Include data in the fit.
Notes
-----
Bins with a non-zero quality setting are not automatically
excluded when a data set is created.
If the data set has been grouped, then calling `ignore_bad`
will remove any filter applied to the data set. If this
happens a warning message will be displayed.
"""
if self.quality is None:
raise DataErr("noquality", self.name)
qual_flags = ~numpy.asarray(self.quality, bool)
if self.grouped and (self.mask is not True):
self.notice()
warning('filtering grouped data with quality flags,' +
' previous filters deleted')
elif not self.grouped:
# if ungrouped, create/combine with self.mask
if self.mask is not True:
self.mask = self.mask & qual_flags
return
else:
self.mask = qual_flags
return
# self.quality_filter used for pre-grouping filter
self.quality_filter = qual_flags
def _dynamic_group(self, group_func, *args, **kwargs):
keys = list(kwargs.keys())[:]
for key in keys:
if kwargs[key] is None:
kwargs.pop(key)
old_filter = self.get_filter(group=False)
do_notice = numpy.iterable(self.mask)
self.grouping, self.quality = group_func(*args, **kwargs)
self.group()
self._original_groups = False
if do_notice:
# self.group() above has cleared the filter if applicable
# No, that just sets a flag. So manually clear filter
# here
self.ignore()
for vals in parse_expr(old_filter):
self.notice(*vals)
# warning('grouping flags have changed, noticing all bins')
# Have to move this check here; as formerly written, reference
# to pygroup functions happened *before* checking groupstatus,
# in _dynamic_group. So we did not return the intended error
# message; rather, a NameError was raised stating that pygroup
# did not exist in global scope (not too clear to the user).
#
# The groupstatus check thus has to be done in *each* of the following
# group functions.
# # Dynamic grouping functions now automatically impose the
# # same grouping conditions on *all* associated background data sets.
# # CIAO 4.5 bug fix, 05/01/2012
def group_bins(self, num, tabStops=None):
"""Group into a fixed number of bins.
Combine the data so that there `num` equal-width bins (or
groups). The binning scheme is applied to all the channels,
but any existing filter - created by the `ignore` or `notice`
set of functions - is re-applied after the data has been
grouped.
Parameters
----------
num : int
The number of bins in the grouped data set. Each bin
will contain the same number of channels.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
`True` means that the channel should be ignored from the
grouping (use 0 or `False` otherwise).
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_counts : Group into a minimum number of counts per bin.
group_snr : Group into a minimum signal-to-noise ratio.
group_width : Group into a fixed bin width.
Notes
-----
Since the bin width is an integer number of channels, it is
likely that some channels will be "left over". This is even
more likely when the `tabStops` parameter is set. If this
happens, a warning message will be displayed to the screen and
the quality value for these channels will be set to 2.
"""
if not groupstatus:
raise ImportErr('importfailed', 'group', 'dynamic grouping')
self._dynamic_group(pygroup.grpNumBins, len(self.channel), num,
tabStops=tabStops)
for bkg_id in self.background_ids:
bkg = self.get_background(bkg_id)
bkg.group_bins(num, tabStops=tabStops)
def group_width(self, val, tabStops=None):
"""Group into a fixed bin width.
Combine the data so that each bin contains `num` channels.
The binning scheme is applied to all the channels, but any
existing filter - created by the `ignore` or `notice` set of
functions - is re-applied after the data has been grouped.
Parameters
----------
val : int
The number of channels to combine into a group.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
`True` means that the channel should be ignored from the
grouping (use 0 or `False` otherwise).
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_bins : Group into a fixed number of bins.
group_counts : Group into a minimum number of counts per bin.
group_snr : Group into a minimum signal-to-noise ratio.
Notes
-----
Unless the requested bin width is a factor of the number of
channels (and no `tabStops` parameter is given), then some
channels will be "left over". If this happens, a warning
message will be displayed to the screen and the quality value
for these channels will be set to 2.
"""
if not groupstatus:
raise ImportErr('importfailed', 'group', 'dynamic grouping')
self._dynamic_group(pygroup.grpBinWidth, len(self.channel), val,
tabStops=tabStops)
for bkg_id in self.background_ids:
bkg = self.get_background(bkg_id)
bkg.group_width(val, tabStops=tabStops)
def group_counts(self, num, maxLength=None, tabStops=None):
"""Group into a minimum number of counts per bin.
Combine the data so that each bin contains `num` or more
counts. The binning scheme is applied to all the channels, but
any existing filter - created by the `ignore` or `notice` set
of functions - is re-applied after the data has been grouped.
The background is *not* included in this calculation; the
calculation is done on the raw data even if `subtract` has
been called on this data set.
Parameters
----------
num : int
The number of channels to combine into a group.
maxLength : int, optional
The maximum number of channels that can be combined into a
single group.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
`True` means that the channel should be ignored from the
grouping (use 0 or `False` otherwise).
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_bins : Group into a fixed number of bins.
group_snr : Group into a minimum signal-to-noise ratio.
group_width : Group into a fixed bin width.
Notes
-----
If channels can not be placed into a "valid" group, then a
warning message will be displayed to the screen and the
quality value for these channels will be set to 2.
"""
if not groupstatus:
raise ImportErr('importfailed', 'group', 'dynamic grouping')
self._dynamic_group(pygroup.grpNumCounts, self.counts, num,
maxLength=maxLength, tabStops=tabStops)
for bkg_id in self.background_ids:
bkg = self.get_background(bkg_id)
bkg.group_counts(num, maxLength=maxLength, tabStops=tabStops)
# DOC-TODO: see discussion in astro.ui.utils regarding errorCol
def group_snr(self, snr, maxLength=None, tabStops=None, errorCol=None):
"""Group into a minimum signal-to-noise ratio.
Combine the data so that each bin has a signal-to-noise ratio
which exceeds `snr`. The binning scheme is applied to all the
channels, but any existing filter - created by the `ignore` or
`notice` set of functions - is re-applied after the data has
been grouped. The background is *not* included in this
calculation; the calculation is done on the raw data even if
`subtract` has been called on this data set.
Parameters
----------
snr : number
The minimum signal-to-noise ratio that must be exceeded
to form a group of channels.
maxLength : int, optional
The maximum number of channels that can be combined into a
single group.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
`True` means that the channel should be ignored from the
grouping (use 0 or `False` otherwise).
errorCol : array of num, optional
If set, the error to use for each channel when calculating
the signal-to-noise ratio. If not given then Poisson
statistics is assumed. A warning is displayed for each
zero-valued error estimate.
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_bins : Group into a fixed number of bins.
group_counts : Group into a minimum number of counts per bin.
group_width : Group into a fixed bin width.
Notes
-----
If channels can not be placed into a "valid" group, then a
warning message will be displayed to the screen and the
quality value for these channels will be set to 2.
"""
if not groupstatus:
raise ImportErr('importfailed', 'group', 'dynamic grouping')
self._dynamic_group(pygroup.grpSnr, self.counts, snr,
maxLength=maxLength, tabStops=tabStops,
errorCol=errorCol)
for bkg_id in self.background_ids:
bkg = self.get_background(bkg_id)
bkg.group_snr(snr, maxLength=maxLength, tabStops=tabStops,
errorCol=errorCol)
def group_adapt(self, minimum, maxLength=None, tabStops=None):
"""Adaptively group to a minimum number of counts.
Combine the data so that each bin contains `num` or more
counts. The difference to `group_counts` is that this
algorithm starts with the bins with the largest signal, in
order to avoid over-grouping bright features, rather than at
the first channel of the data. The adaptive nature means that
low-count regions between bright features may not end up in
groups with the minimum number of counts. The binning scheme
is applied to all the channels, but any existing filter -
created by the `ignore` or `notice` set of functions - is
re-applied after the data has been grouped.
Parameters
----------
minimum : int
The number of channels to combine into a group.
maxLength : int, optional
The maximum number of channels that can be combined into a
single group.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
`True` means that the channel should be ignored from the
grouping (use 0 or `False` otherwise).
See Also
--------
group_adapt_snr : Adaptively group to a minimum signal-to-noise ratio.
group_bins : Group into a fixed number of bins.
group_counts : Group into a minimum number of counts per bin.
group_snr : Group into a minimum signal-to-noise ratio.
group_width : Group into a fixed bin width.
Notes
-----
If channels can not be placed into a "valid" group, then a
warning message will be displayed to the screen and the
quality value for these channels will be set to 2.
"""
if not groupstatus:
raise ImportErr('importfailed', 'group', 'dynamic grouping')
self._dynamic_group(pygroup.grpAdaptive, self.counts, minimum,
maxLength=maxLength, tabStops=tabStops)
for bkg_id in self.background_ids:
bkg = self.get_background(bkg_id)
bkg.group_adapt(minimum, maxLength=maxLength,
tabStops=tabStops)
# DOC-TODO: see discussion in astro.ui.utils regarding errorCol
def group_adapt_snr(self, minimum, maxLength=None, tabStops=None,
errorCol=None):
"""Adaptively group to a minimum signal-to-noise ratio.
Combine the data so that each bin has a signal-to-noise ratio
which exceeds `minimum`. The difference to `group_snr` is that
this algorithm starts with the bins with the largest signal,
in order to avoid over-grouping bright features, rather than
at the first channel of the data. The adaptive nature means
that low-count regions between bright features may not end up
in groups with the minimum number of counts. The binning
scheme is applied to all the channels, but any existing filter
- created by the `ignore` or `notice` set of functions - is
re-applied after the data has been grouped.
Parameters
----------
minimum : number
The minimum signal-to-noise ratio that must be exceeded
to form a group of channels.
maxLength : int, optional
The maximum number of channels that can be combined into a
single group.
tabStops : array of int or bool, optional
If set, indicate one or more ranges of channels that should
not be included in the grouped output. The array should
match the number of channels in the data set and non-zero or
`True` means that the channel should be ignored from the
grouping (use 0 or `False` otherwise).
errorCol : array of num, optional
If set, the error to use for each channel when calculating
the signal-to-noise ratio. If not given then Poisson
statistics is assumed. A warning is displayed for each
zero-valued error estimate.
See Also
--------
group_adapt : Adaptively group to a minimum number of counts.
group_bins : Group into a fixed number of bins.
group_counts : Group into a minimum number of counts per bin.
group_snr : Group into a minimum signal-to-noise ratio.
group_width : Group into a fixed bin width.
Notes
-----
If channels can not be placed into a "valid" group, then a
warning message will be displayed to the screen and the
quality value for these channels will be set to 2.
"""
if not groupstatus:
raise ImportErr('importfailed', 'group', 'dynamic grouping')
self._dynamic_group(pygroup.grpAdaptiveSnr, self.counts, minimum,
maxLength=maxLength, tabStops=tabStops,
errorCol=errorCol)
for bkg_id in self.background_ids:
bkg = self.get_background(bkg_id)
bkg.group_adapt_snr(minimum, maxLength=maxLength,
tabStops=tabStops, errorCol=errorCol)
def eval_model(self, modelfunc):
return modelfunc(*self.get_indep(filter=False))
def eval_model_to_fit(self, modelfunc):
return self.apply_filter(modelfunc(*self.get_indep(filter=True)))
def sum_background_data(self,
get_bdata_func=(lambda key, bkg: bkg.counts)):
"""Sum up data, applying the background correction value.
Parameters
----------
get_bdata_func : function, optional
What data should be used for each background dataset. The
function takes the background identifier and background
DataPHA object and returns the data to use. The default is
to use the counts array of the background dataset.
Returns
-------
value : scalar or NumPy array
The sum of the data, including any area, background, and
exposure-time corrections.
Notes
-----
For each associated background, the data is retrieved (via
the get_bdata_func parameter), and then
- divided by its BACKSCAL value (if set)
- divided by its AREASCAL value (if set)
- divided by its exposure time (if set)
The individual background components are then summed together,
and then multiplied by the source BACKSCAL (if set),
multiplied by the source AREASCAL (if set), and multiplied
by the source exposure time (if set). The final step is
to divide by the number of background files used.
Example
-------
Calculate the background counts, per channel, scaled to match
the source:
>>> bcounts = src.sum_background_data()
Calculate the scaling factor that you need to multiply the
background data to match the source data. In this case the
background data has been replaced by the value 1 (rather than
the per-channel values used with the default argument):
>>> bscale = src.sum_background_data(lambda k, d: 1)
"""
bdata_list = []
for key in self.background_ids:
bkg = self.get_background(key)
bdata = get_bdata_func(key, bkg)
backscal = bkg.backscal
if backscal is not None:
backscal = self._check_scale(backscal, group=False)
bdata = bdata / backscal
areascal = bkg.get_areascal(group=False)
if areascal is not None:
bdata = bdata / areascal
if bkg.exposure is not None:
bdata = bdata / bkg.exposure
bdata_list.append(bdata)
nbkg = len(bdata_list)
if nbkg == 0:
# do not have a good id to use for the error message
raise DataErr('nobkg', self.name)
if nbkg == 1:
bkgsum = bdata_list[0]
else:
bkgsum = sum(bdata_list)
backscal = self.backscal
if backscal is not None:
backscal = self._check_scale(backscal, group=False)
bkgsum = backscal * bkgsum
areascal = self.areascal
if areascal is not None:
areascal = self._check_scale(areascal, group=False)
bkgsum = areascal * bkgsum
if self.exposure is not None:
bkgsum = self.exposure * bkgsum
return bkgsum / SherpaFloat(nbkg)
def get_dep(self, filter=False):
# FIXME: Aneta says we need to group *before* subtracting, but that
# won't work (I think) when backscal is an array
# if not self.subtracted:
# return self.counts
# return self.counts - self.sum_background_data()
dep = self.counts
filter = bool_cast(filter)
# The area scaling is not applied to the data, since it
# should be being applied to the model via the *PHA
# instrument model. Note however that the background
# contribution does include the source AREASCAL value
# (in the same way that the source BACKSCAL value
# is used).
#
if self.subtracted:
bkg = self.sum_background_data()
if len(dep) != len(bkg):
raise DataErr("subtractlength")
dep = dep - bkg
if filter:
dep = self.apply_filter(dep)
return dep
def set_dep(self, val):
# QUS: should this "invert" the areascaling to val
# to get the stored values?
#
# Otherwise, when areascal /= 1
# y1 = d.get_dep()
# d.set_dep(y1)
# y2 = d.get_dep()
# y1 != y2
#
# Or perhaps it removes the areascal value in this case?
# We already have this split in the API when background data
# is available and is subtracted.
#
if numpy.iterable(val):
dep = numpy.asarray(val, SherpaFloat)
else:
val = SherpaFloat(val)
dep = numpy.array([val] * len(self.get_indep()[0]))
setattr(self, 'counts', dep)
def get_staterror(self, filter=False, staterrfunc=None):
"""Return the statistical error.
The staterror column is used if defined, otherwise the
function provided by the staterrfunc argument is used to
calculate the values.
Parameters
----------
filter : bool, optional
Should the channel filter be applied to the return values?
staterrfunc : function reference, optional
The function to use to calculate the errors if the
staterror field is None. The function takes one argument,
the counts (after grouping and filtering), and returns an
array of values which represents the one-sigma error for each
element of the input array. This argument is designed to
work with implementations of the sherpa.stats.Stat.calc_staterror
method.
Returns
-------
staterror : array or None
The statistical error. It will be grouped and,
if filter=True, filtered. The contribution from any
associated background components will be included if
the background-subtraction flag is set.
Notes
-----
There is no scaling by the AREASCAL setting, but background
values are scaled by their AREASCAL settings. It is not at all
obvious that the current code is doing the right thing, or that
this is the right approach.
Examples
--------
>>> dy = dset.get_staterror()
Ensure that there is no pre-defined statistical-error column
and then use the Chi2DataVar statistic to calculate the errors:
>>> stat = sherpa.stats.Chi2DataVar()
>>> dset.set_staterror(None)
>>> dy = dset.get_staterror(staterrfunc=stat.calc_staterror)
"""
staterr = self.staterror
filter = bool_cast(filter)
if filter:
staterr = self.apply_filter(staterr, self._sum_sq)
else:
staterr = self.apply_grouping(staterr, self._sum_sq)
# The source AREASCAL is not applied here, but the
# background term is.
#
if (staterr is None) and (staterrfunc is not None):
cnts = self.counts
if filter:
cnts = self.apply_filter(cnts)
else:
cnts = self.apply_grouping(cnts)
staterr = staterrfunc(cnts)
# Need to apply the area scaling to the calculated
# errors. Grouping and filtering complicate this; is
# _middle the best choice here?
#
"""
area = self.areascal
if staterr is not None and area is not None:
if numpy.isscalar(area):
area = numpy.zeros(self.channel.size) + area
# TODO: replace with _check_scale?
if filter:
area = self.apply_filter(area, self._middle)
else:
area = self.apply_grouping(area, self._middle)
staterr = staterr / area
"""
if (staterr is not None) and self.subtracted:
bkg_staterr_list = []
# for bkg in self._backgrounds.values():
for key in self.background_ids:
bkg = self.get_background(key)
berr = bkg.staterror
if filter:
berr = self.apply_filter(berr, self._sum_sq)
else:
berr = self.apply_grouping(berr, self._sum_sq)
if (berr is None) and (staterrfunc is not None):
bkg_cnts = bkg.counts
if filter:
bkg_cnts = self.apply_filter(bkg_cnts)
else:
bkg_cnts = self.apply_grouping(bkg_cnts)
# TODO: shouldn't the following logic be somewhere
# else more general?
if hasattr(staterrfunc, '__name__') and \
staterrfunc.__name__ == 'calc_chi2datavar_errors' and \
0.0 in bkg_cnts:
mask = (numpy.asarray(bkg_cnts) != 0.0)
berr = numpy.zeros(len(bkg_cnts))
berr[mask] = staterrfunc(bkg_cnts[mask])
else:
berr = staterrfunc(bkg_cnts)
# FIXME: handle this
# assert (berr is not None)
# This case appears when the source dataset has an error
# column and at least one of the background(s) do not.
# Because the staterr is not None and staterrfunc is, I think
# we should return None. This way the user knows to call with
# staterrfunc next time.
if berr is None:
return None
bksl = bkg.backscal
if bksl is not None:
bksl = self._check_scale(bksl, filter=filter)
berr = berr / bksl
# Need to apply filter/grouping of the source dataset
# to the background areascal, so can not just say
# area = bkg.get_areascal(filter=filter)
#
area = bkg.areascal
if area is not None:
area = self._check_scale(area, filter=filter)
berr = berr / area
if bkg.exposure is not None:
berr = berr / bkg.exposure
berr = berr * berr
bkg_staterr_list.append(berr)
nbkg = len(bkg_staterr_list)
assert (nbkg > 0)
if nbkg == 1:
bkgsum = bkg_staterr_list[0]
else:
bkgsum = sum(bkg_staterr_list)
bscal = self.backscal
if bscal is not None:
bscal = self._check_scale(bscal, filter=filter)
bkgsum = (bscal * bscal) * bkgsum
# Correct the background counts by the source AREASCAL
# setting. Is this correct?
ascal = self.areascal
if ascal is not None:
ascal = self._check_scale(ascal, filter=filter)
bkgsum = (ascal * ascal) * bkgsum
if self.exposure is not None:
bkgsum = (self.exposure * self.exposure) * bkgsum
nbkg = SherpaFloat(nbkg)
if staterr is not None:
staterr = staterr * staterr + bkgsum / (nbkg * nbkg)
staterr = numpy.sqrt(staterr)
return staterr
def get_syserror(self, filter=False):
"""Return any systematic error.
Parameters
----------
filter : bool, optional
Should the channel filter be applied to the return values?
Returns
-------
syserror : array or None
The systematic error, if set. It will be grouped and,
if filter=True, filtered.
Notes
-----
There is no scaling by the AREASCAL setting.
"""
syserr = self.syserror
filter = bool_cast(filter)
if filter:
syserr = self.apply_filter(syserr, self._sum_sq)
else:
syserr = self.apply_grouping(syserr, self._sum_sq)
return syserr
def get_x(self, filter=False, response_id=None):
# We want the full channel grid with no grouping.
#
return self._from_channel(self.channel, group=False, response_id=response_id)
def get_xlabel(self):
xlabel = self.units.capitalize()
if self.units == 'energy':
xlabel += ' (keV)'
elif self.units == 'wavelength':
xlabel += ' (Angstrom)'
# elif self.units == 'channel' and self.grouped:
# xlabel = 'Group Number'
return xlabel
def _set_initial_quantity(self):
arf, rmf = self.get_response()
# Change analysis if ARFs equal or of higher resolution to
# allow for high-res model evaluation.
if arf is not None and rmf is None:
if len(arf.energ_lo) == len(self.channel):
self.units = 'energy'
# Only change analysis if RMF matches the parent PHA dataset.
if rmf is not None:
if len(self.channel) != len(rmf.e_min):
raise DataErr("incompatibleresp", rmf.name, self.name)
self.units = 'energy'
def _fix_y_units(self, val, filter=False, response_id=None):
"""Rescale the data to match the 'y' axis."""
if val is None:
return val
filter = bool_cast(filter)
# make a copy of data for units manipulation
val = numpy.array(val, dtype=SherpaFloat)
if self.rate and self.exposure is not None:
val /= self.exposure
# TODO: It is not clear if the areascal should always be applied,
# or only if self.rate is set (since it is being considered
# a "correction" to the exposure time, but don't we want
# to apply it in plots even if the Y axis is in counts?)
#
if self.areascal is not None:
areascal = self._check_scale(self.areascal, filter=filter)
val /= areascal
if self.grouped or self.rate:
if self.units != 'channel':
elo, ehi = self._get_ebins(response_id, group=False)
else:
elo, ehi = (self.channel, self.channel + 1.)
if filter:
# If we apply a filter, make sure that
# ebins are ungrouped before applying
# the filter.
elo = self.apply_filter(elo, self._min)
ehi = self.apply_filter(ehi, self._max)
elif self.grouped:
elo = self.apply_grouping(elo, self._min)
ehi = self.apply_grouping(ehi, self._max)
if self.units == 'energy':
ebin = ehi - elo
elif self.units == 'wavelength':
ebin = self._hc / elo - self._hc / ehi
elif self.units == 'channel':
ebin = ehi - elo
else:
raise DataErr("bad", "quantity", self.units)
val /= numpy.abs(ebin)
# The final step is to multiply by the X axis self.plot_fac
# times.
if self.plot_fac <= 0:
return val
scale = self.apply_filter(self.get_x(response_id=response_id),
self._middle)
for ii in range(self.plot_fac):
val *= scale
return val
def get_y(self, filter=False, yfunc=None, response_id=None, use_evaluation_space=False):
vallist = Data.get_y(self, yfunc=yfunc)
filter = bool_cast(filter)
if not isinstance(vallist, tuple):
vallist = (vallist,)
newvallist = []
for val in vallist:
if filter:
val = self.apply_filter(val)
else:
val = self.apply_grouping(val)
val = self._fix_y_units(val, filter, response_id)
newvallist.append(val)
if len(vallist) == 1:
vallist = newvallist[0]
else:
vallist = tuple(newvallist)
return vallist
def get_yerr(self, filter=False, staterrfunc=None, response_id=None):
filter = bool_cast(filter)
err = self.get_error(filter, staterrfunc)
return self._fix_y_units(err, filter, response_id)
def get_xerr(self, filter=False, response_id=None):
elo, ehi = self._get_ebins(response_id=response_id)
filter = bool_cast(filter)
if filter:
# If we apply a filter, make sure that
# ebins are ungrouped before applying
# the filter.
elo, ehi = self._get_ebins(response_id, group=False)
elo = self.apply_filter(elo, self._min)
ehi = self.apply_filter(ehi, self._max)
return ehi - elo
def get_ylabel(self):
ylabel = 'Counts'
if self.rate and self.exposure:
ylabel += '/sec'
if self.rate or self.grouped:
if self.units == 'energy':
ylabel += '/keV'
elif self.units == 'wavelength':
ylabel += '/Angstrom'
elif self.units == 'channel':
ylabel += '/channel'
if self.plot_fac:
from sherpa.plot import backend
latex = backend.get_latex_for_string(
'^{}'.format(self.plot_fac))
ylabel += ' X {}{}'.format(self.units.capitalize(), latex)
return ylabel
@staticmethod
# Dummy function to tell apply_grouping to construct
# an array of groups.
def _make_groups(array):
pass
@staticmethod
def _middle(array):
array = numpy.asarray(array)
return (array.min() + array.max()) / 2.0
@staticmethod
def _min(array):
array = numpy.asarray(array)
return array.min()
@staticmethod
def _max(array):
array = numpy.asarray(array)
return array.max()
@staticmethod
def _sum_sq(array):
return numpy.sqrt(numpy.sum(array * array))
def get_noticed_channels(self):
"""Return the noticed channels.
Returns
-------
channels : ndarray
The noticed channels (this is independent of the
analysis setting).
"""
chans = self.channel
mask = self.get_mask()
if mask is None:
return chans
# This is added to address issue #361
#
# If there is a quality filter then the mask may be
# smaller than the chans array. It is not clear if this
# is the best location for this. If it is, then are there
# other locations where this logic is needed?
#
if self.quality_filter is not None and \
self.quality_filter.size != mask.size:
chans = chans[self.quality_filter]
return chans[mask]
def get_mask(self):
"""Returns the (ungrouped) mask.
Returns
-------
mask : ndarray or None
The mask, in channels, or None.
"""
groups = self.grouping
if self.mask is False:
return None
if self.mask is True or not self.grouped:
if self.quality_filter is not None:
return self.quality_filter
elif numpy.iterable(self.mask):
return self.mask
return None
if self.quality_filter is not None:
groups = groups[self.quality_filter]
return expand_grouped_mask(self.mask, groups)
def get_noticed_expr(self):
chans = self.get_noticed_channels()
if self.mask is False or len(chans) == 0:
return 'No noticed channels'
return create_expr(chans, format='%i')
def get_filter(self, group=True, format='%.12f', delim=':'):
"""Return the data filter as a string.
For grouped data, or when the analysis setting is not
channel, filter values refer to the center of the
channel or group.
Parameters
----------
group : bool, optional
Should the filter reflect the grouped data?
format : str, optional
The formatting of the numeric values (this is
ignored for channel units, as a format of "%i"
is used).
delim : str, optional
The string used to mark the low-to-high range.
Examples
--------
For a Chandra non-grating dataset which has been grouped:
>>> pha.set_analysis('energy')
>>> pha.notice(0.5, 7)
>>> pha.get_filter(format=%.4f')
''0.5183:8.2198''
>>> pha.set_analysis('channel')
>>> pha.get_filter()
'36:563'
The default is to show the data range for the grouped
dataset, which uses the center of each group. If
the grouping is turned off then the center of the
start and ending channel of each group is used
(and so show a larger data range):
>>> pha.get_filter(format=%.4f')
'0.5183:8.2198'
>>> pha.get_filter(group=False, format=%.4f')
'0.4745:9.8623'
"""
if self.mask is False:
return 'No noticed bins'
if numpy.iterable(self.mask):
mask = self.mask
else:
mask = None
if group:
# grouped noticed channels
#
x = self.apply_filter(self.channel, self._make_groups)
else:
# ungrouped noticed channels
x = self.get_noticed_channels()
# We need the "ungrouped" mask array. Need to check
# issue #361 since get_noticed_channels notes an
# issue that may be relevant here (so far this
# doesn't seem to be the case).
#
mask = self.get_mask()
# Safety check for users. Warn, but continue.
#
if mask is not None and mask.sum() != x.size:
warning("There is a mis-match in the ungrouped mask " +
"and data ({} vs {})".format(mask.sum(), x.size))
# Convert channels to appropriate quantity if necessary
x = self._from_channel(x, group=group)
if mask is None:
mask = numpy.ones(len(x), dtype=bool)
# Ensure the data is in ascending order for create_expr.
#
if self.units == 'wavelength':
x = x[::-1]
mask = mask[::-1]
if self.units == 'channel':
format = '%i'
return create_expr(x, mask=mask, format=format, delim=delim)
def get_filter_expr(self):
return (self.get_filter(format='%.4f', delim='-') +
' ' + self.get_xlabel())
def notice_response(self, notice_resp=True, noticed_chans=None):
notice_resp = bool_cast(notice_resp)
if notice_resp and noticed_chans is None:
noticed_chans = self.get_noticed_channels()
for id in self.response_ids:
arf, rmf = self.get_response(id)
_notice_resp(noticed_chans, arf, rmf)
def notice(self, lo=None, hi=None, ignore=False, bkg_id=None):
# If any background IDs are actually given, then impose
# the filter on those backgrounds *only*, and return. Do
# *not* impose filter on data itself. (Revision possibly
# this should be done in high-level UI?) SMD 10/25/12
filter_background_only = False
if bkg_id is not None:
if not numpy.iterable(bkg_id):
bkg_id = [bkg_id]
filter_background_only = True
else:
bkg_id = self.background_ids
# Automatically impose data's filter on background data sets.
# Units must agree for this to be meaningful, so temporarily
# make data and background units match. SMD 10/25/12
for bid in bkg_id:
bkg = self.get_background(bid)
old_bkg_units = bkg.units
try:
bkg.units = self.units
# If the background is all ignored then bkg.notice will
# do nothing (other than display an INFO message).
#
bkg.notice(lo, hi, ignore)
finally:
bkg.units = old_bkg_units
# If we're only supposed to filter backgrounds, return
if filter_background_only:
return
# Go on if we are also supposed to filter the source data
ignore = bool_cast(ignore)
if lo is None and hi is None:
self.quality_filter = None
self.notice_response(False)
# We do not want a "all data are masked out" error to cause
# this to fail; it should just do nothing (as trying to set
# a noticed range to include masked-out ranges would also
# be ignored).
#
# Convert to "group number" (which, for ungrouped data,
# is just channel number).
#
if lo is not None and type(lo) != str:
try:
lo = self._to_channel(lo)
except DataErr as de:
info("Skipping dataset {}: {}".format(self.name,
de))
return
if hi is not None and type(hi) != str:
try:
hi = self._to_channel(hi)
except DataErr as de:
info("Skipping dataset {}: {}".format(self.name,
de))
return
elo, ehi = self._get_ebins()
if ((self.units == "wavelength" and
elo[0] < elo[-1] and ehi[0] < ehi[-1]) or
(self.units == "energy" and
elo[0] > elo[-1] and ehi[0] > ehi[-1])):
lo, hi = hi, lo
# Don't use the middle of the channel anymore as the
# grouping function. That was just plain dumb.
# So just get back an array of groups 1-N, if grouped
# DATA-NOTE: need to clean this up.
#
groups = self.apply_grouping(self.channel,
self._make_groups)
self._data_space.filter.notice((lo,), (hi,),
(groups,), ignore)
def to_guess(self):
elo, ehi = self._get_ebins(group=False)
elo = self.apply_filter(elo, self._min)
ehi = self.apply_filter(ehi, self._max)
if self.units == "wavelength":
lo = self._hc / ehi
hi = self._hc / elo
elo = lo
ehi = hi
cnt = self.get_dep(True)
arf = self.get_specresp(filter=True)
y = cnt / (ehi - elo)
if self.exposure is not None:
y /= self.exposure # photons/keV/sec or photons/Ang/sec
# y = cnt/arf/self.exposure
if arf is not None:
y /= arf # photons/keV/cm^2/sec or photons/Ang/cm^2/sec
return (y, elo, ehi)
def to_fit(self, staterrfunc=None):
return (self.get_dep(True),
self.get_staterror(True, staterrfunc),
self.get_syserror(True))
def to_plot(self, yfunc=None, staterrfunc=None, response_id=None):
return (self.apply_filter(self.get_x(response_id=response_id),
self._middle),
self.get_y(True, yfunc, response_id=response_id),
self.get_yerr(True, staterrfunc, response_id=response_id),
self.get_xerr(True, response_id=response_id),
self.get_xlabel(),
self.get_ylabel())
def group(self):
"Group the data according to the data set's grouping scheme"
self.grouped = True
def ungroup(self):
"Ungroup the data"
self.grouped = False
def subtract(self):
"Subtract the background data"
self.subtracted = True
def unsubtract(self):
"Remove background subtraction"
self.subtracted = False
class DataIMG(Data2D):
"Image data set, including functions for coordinate transformations"
_fields = Data2D._fields + ("sky", "eqpos", "coord", "header")
def _get_coord(self):
return self._coord
def _set_coord(self, val):
coord = str(val).strip().lower()
if coord in ('logical', 'image'):
coord = 'logical'
elif coord in ('physical',):
self._check_physical_transform()
coord = 'physical'
elif coord in ('world', 'wcs'):
self._check_world_transform()
coord = 'world'
else:
raise DataErr('bad', 'coordinates', val)
self._coord = coord
# You should use set_coord rather than changing coord directly,
# otherwise constraints set in set_coord are not run. This is
# probably an error in set_coord (i.e. this logic should be
# moved into _set_coord).
#
coord = property(_get_coord, _set_coord,
doc='Coordinate system of independent axes')
def __init__(self, name, x0, x1, y, shape=None, staterror=None,
syserror=None, sky=None, eqpos=None, coord='logical',
header=None):
self.sky = sky
self.eqpos = eqpos
self.coord = coord
self.header = header
self._region = None
Data2D.__init__(self, name, x0, x1, y, shape, staterror, syserror)
def __str__(self):
# Print the metadata first
old = self._fields
ss = old
try:
self._fields = tuple(filter((lambda x: x != 'header'),
self._fields))
ss = Data.__str__(self)
finally:
self._fields = old
return ss
def _repr_html_(self):
"""Return a HTML (string) representation of the data
"""
return html_img(self)
def __getstate__(self):
state = self.__dict__.copy()
# Function pointers to methods of the class
# (of type 'instancemethod') are NOT picklable
# remove them and restore later with a coord init
# del state['_get_logical']
# del state['_get_physical']
# del state['_get_world']
# PyRegion objects (of type 'extension') are NOT picklable, yet.
# preserve the region string and restore later with constructor
state['_region'] = state['_region'].__str__()
return state
def __setstate__(self, state):
# Populate the function pointers we deleted at pickle time with
# no-ops.
# self.__dict__['_get_logical']=(lambda : None)
# self.__dict__['_get_physical']=(lambda : None)
# self.__dict__['_get_world']=(lambda : None)
if 'header' not in state:
self.header = None
self.__dict__.update(state)
# _set_coord will correctly define the _get_* WCS function pointers.
self._set_coord(state['_coord'])
if regstatus:
self._region = Region(self._region)
else:
# An ImportErr could be raised rather than display a
# warnng, but that would make it harder for the user
# to extract useful data (e.g. in the case of triggering
# this when loading a pickled file).
#
if self._region is not None and self._region != '':
warning("Unable to restore region={} as region module is not avaialable.".format(self._region))
self._region = None
def _check_physical_transform(self):
if self.sky is None:
raise DataErr('nocoord', self.name, 'physical')
def _check_world_transform(self):
if self.eqpos is None:
raise DataErr('nocoord', self.name, 'world')
def _logical_to_physical(self, x0=None, x1=None):
if x0 is None or x1 is None:
x0, x1 = self.get_indep()
self._check_shape()
self._check_physical_transform()
# logical -> physical
x0, x1 = self.sky.apply(x0, x1)
return (x0, x1)
def _logical_to_world(self, x0=None, x1=None):
if x0 is None or x1 is None:
x0, x1 = self.get_indep()
self._check_shape()
self._check_world_transform()
# logical -> physical
if self.sky is not None:
x0, x1 = self.sky.apply(x0, x1)
# physical -> world
x0, x1 = self.eqpos.apply(x0, x1)
return (x0, x1)
def _physical_to_logical(self, x0=None, x1=None):
if x0 is None or x1 is None:
x0, x1 = self.get_indep()
self._check_shape()
self._check_physical_transform()
# physical -> logical
x0, x1 = self.sky.invert(x0, x1)
return (x0, x1)
def _physical_to_world(self, x0=None, x1=None):
if x0 is None or x1 is None:
x0, x1 = self.get_indep()
self._check_shape()
self._check_world_transform()
# physical -> world
x0, x1 = self.eqpos.apply(x0, x1)
return (x0, x1)
def _world_to_logical(self, x0=None, x1=None):
if x0 is None or x1 is None:
x0, x1 = self.get_indep()
self._check_shape()
self._check_world_transform()
# world -> physical
x0, x1 = self.eqpos.invert(x0, x1)
# physical -> logical
if self.sky is not None:
x0, x1 = self.sky.invert(x0, x1)
return (x0, x1)
def _world_to_physical(self, x0=None, x1=None):
if x0 is None or x1 is None:
x0, x1 = self.get_indep()
self._check_shape()
self._check_world_transform()
# world -> physical
x0, x1 = self.eqpos.invert(x0, x1)
return (x0, x1)
def get_logical(self):
coord = self.coord
x0, x1 = self.get_indep()
if coord != 'logical':
x0 = x0.copy()
x1 = x1.copy()
x0, x1 = getattr(self, '_' + coord + '_to_logical')(x0, x1)
return (x0, x1)
def get_physical(self):
coord = self.coord
x0, x1 = self.get_indep()
if coord != 'physical':
x0 = x0.copy()
x1 = x1.copy()
x0, x1 = getattr(self, '_' + coord + '_to_physical')(x0, x1)
return (x0, x1)
def get_world(self):
coord = self.coord
x0, x1 = self.get_indep()
if coord != 'world':
x0 = x0.copy()
x1 = x1.copy()
x0, x1 = getattr(self, '_' + coord + '_to_world')(x0, x1)
return (x0, x1)
# For compatibility with old Sherpa keywords
get_image = get_logical
get_wcs = get_world
def set_coord(self, coord):
coord = str(coord).strip().lower()
# Destroys original data to conserve memory for big imgs
good = ('logical', 'image', 'physical', 'world', 'wcs')
if coord not in good:
raise DataErr('badchoices', 'coordinates', coord, ", ".join(good))
if coord.startswith('wcs'):
coord = 'world'
elif coord.startswith('image'):
coord = 'logical'
func = getattr(self, 'get_' + coord)
self.indep = func()
self._set_coord(coord)
def get_filter_expr(self):
if self._region is not None:
return str(self._region)
return ''
get_filter = get_filter_expr
def notice2d(self, val=None, ignore=False):
"""Apply a 2D filter.
Parameters
----------
val : str or None, optional
The filter to apply. It can be a region string or a
filename.
ignore : bool, optional
If set then the filter should be ignored, not noticed.
"""
ignore = bool_cast(ignore)
# This was originally a bit-more complex, but it has been
# simplified.
#
if val is None:
self.mask = not ignore
self._region = None
return
if not regstatus:
raise ImportErr('importfailed', 'region', 'notice2d')
# Crete the new region
#
val = str(val).strip()
isfile = os.path.isfile(val)
reg = Region(val, isfile)
# Calculate the mask for this region as an "included"
# region.
#
mask = reg.mask(self.get_x0(), self.get_x1())
mask = mask.astype(numpy.bool)
# Apply the new mask to the existing mask.
#
if not ignore:
if self.mask is True:
self.mask = mask
else:
self.mask |= mask
else:
# Invert the response from region_mask
mask = ~mask
if self.mask is False:
self.mask = mask
else:
self.mask &= mask
# Create the new region shape.
#
if self._region is None:
if ignore:
reg.invert()
self._region = reg
else:
self._region = self._region.combine(reg, ignore)
def get_bounding_mask(self):
mask = self.mask
shape = None
if numpy.iterable(self.mask):
# create bounding box around noticed image regions
mask = numpy.array(self.mask).reshape(*self.shape)
# TODO: should replace 'mask == True' with mask but
# not sure we have a good set of tests
x0_i, x1_i = numpy.where(mask == True)
x0_lo = x0_i.min()
x0_hi = x0_i.max()
x1_lo = x1_i.min()
x1_hi = x1_i.max()
# TODO: subset mask and then ask its shape
shape = mask[x0_lo:x0_hi + 1, x1_lo:x1_hi + 1].shape
mask = mask[x0_lo:x0_hi + 1, x1_lo:x1_hi + 1]
mask = mask.ravel()
return mask, shape
def get_img(self, yfunc=None):
# FIXME add support for coords to image class -> DS9
self._check_shape()
y_img = self.filter_region(self.get_dep(False))
if yfunc is not None:
m = self.eval_model_to_fit(yfunc)
if numpy.iterable(self.mask):
# if filtered, the calculated model must be padded up
# to the data size to preserve img shape and WCS coord
m = pad_bounding_box(m, self.mask)
y_img = (y_img, self.filter_region(m))
if yfunc is not None:
y_img = (y_img[0].reshape(*self.shape),
y_img[1].reshape(*self.shape))
else:
y_img = y_img.reshape(*self.shape)
return y_img
def get_axes(self):
# FIXME: how to filter an axis when self.mask is size of self.y?
self._check_shape()
# dummy placeholders needed b/c img shape may not be square!
axis0 = numpy.arange(self.shape[1], dtype=float) + 1.
axis1 = numpy.arange(self.shape[0], dtype=float) + 1.
dummy0 = numpy.ones(axis0.size, dtype=float)
dummy1 = numpy.ones(axis1.size, dtype=float)
if self.coord == 'physical':
axis0, dummy = self._logical_to_physical(axis0, dummy0)
dummy, axis1 = self._logical_to_physical(dummy1, axis1)
elif self.coord == 'world':
axis0, dummy = self._logical_to_world(axis0, dummy0)
dummy, axis1 = self._logical_to_world(dummy1, axis1)
return (axis0, axis1)
def get_x0label(self):
"Return label for first dimension in 2-D view of independent axis/axes"
if self.coord in ('logical', 'image'):
return 'x0'
elif self.coord in ('physical',):
return 'x0 (pixels)'
elif self.coord in ('world', 'wcs'):
return 'RA (deg)'
else:
return 'x0'
def get_x1label(self):
"""
Return label for second dimension in 2-D view of independent axis/axes
"""
if self.coord in ('logical', 'image'):
return 'x1'
elif self.coord in ('physical',):
return 'x1 (pixels)'
elif self.coord in ('world', 'wcs'):
return 'DEC (deg)'
else:
return 'x1'
def to_contour(self, yfunc=None):
y = self.filter_region(self.get_dep(False))
if yfunc is not None:
m = self.eval_model_to_fit(yfunc)
if numpy.iterable(self.mask):
# if filtered, the calculated model must be padded up
# to the data size to preserve img shape and WCS coord
m = self.filter_region(pad_bounding_box(m, self.mask))
y = (y, m)
return (self.get_x0(),
self.get_x1(),
y,
self.get_x0label(),
self.get_x1label())
def filter_region(self, data):
if data is not None and numpy.iterable(self.mask):
filter = numpy.ones(len(self.mask), dtype=SherpaFloat)
filter[~self.mask] = numpy.nan
return data * filter
return data
class DataIMGInt(DataIMG):
_fields = Data2DInt._fields + ("sky", "eqpos", "coord")
def __init__(self, name, x0lo, x1lo, x0hi, x1hi, y, shape=None,
staterror=None, syserror=None, sky=None, eqpos=None,
coord='logical', header=None):
self._region = None
self.sky = sky
self.eqpos = eqpos
self.coord = coord
self.header = header
self.shape = shape
Data.__init__(self, name, (x0lo, x1lo, x0hi, x1hi), y, staterror, syserror)
def _init_data_space(self, filter, *data):
return IntegratedDataSpace2D(filter, *data)
def get_logical(self):
coord = self.coord
x0lo, x1lo, x0hi, x1hi = self.get_indep()
if coord != 'logical':
x0lo = x0lo.copy()
x1lo = x1lo.copy()
convert = getattr(self, '_' + coord + '_to_logical')
x0lo, x1lo = convert(x0lo, x1lo)
x0hi = x0hi.copy()
x1hi = x1hi.copy()
x0hi, x1hi = convert(x0hi, x1hi)
return (x0lo, x1lo, x0hi, x1hi)
def get_physical(self):
coord = self.coord
x0lo, x1lo, x0hi, x1hi = self.get_indep()
if coord != 'physical':
x0lo = x0lo.copy()
x1lo = x1lo.copy()
convert = getattr(self, '_' + coord + '_to_physical')
x0lo, x1lo = convert(x0lo, x1lo)
x0hi = x0hi.copy()
x1hi = x1hi.copy()
x0hi, x1hi = convert(x0hi, x1hi)
return (x0lo, x1lo, x0hi, x1hi)
def get_world(self):
coord = self.coord
x0lo, x1lo, x0hi, x1hi = self.get_indep()
if coord != 'world':
x0lo = x0lo.copy()
x1lo = x1lo.copy()
convert = getattr(self, '_' + coord + '_to_world')
x0lo, x1lo = convert(x0lo, x1lo)
x0hi = x0hi.copy()
x1hi = x1hi.copy()
x0hi, x1hi = convert(x0hi, x1hi)
return (x0lo, x1lo, x0hi, x1hi)
def get_axes(self):
# FIXME: how to filter an axis when self.mask is size of self.y?
self._check_shape()
# dummy placeholders needed b/c img shape may not be square!
axis0lo = numpy.arange(self.shape[1], dtype=float) - 0.5
axis1lo = numpy.arange(self.shape[0], dtype=float) - 0.5
axis0hi = numpy.arange(self.shape[1], dtype=float) + 0.5
axis1hi = numpy.arange(self.shape[0], dtype=float) + 0.5
dummy0 = numpy.ones(axis0lo.size, dtype=float)
dummy1 = numpy.ones(axis1lo.size, dtype=float)
if self.coord == 'physical':
axis0lo, dummy = self._logical_to_physical(axis0lo, dummy0)
axis0hi, dummy = self._logical_to_physical(axis0hi, dummy0)
dummy, axis1lo = self._logical_to_physical(dummy1, axis1lo)
dummy, axis1hi = self._logical_to_physical(dummy1, axis1hi)
elif self.coord == 'world':
axis0lo, dummy = self._logical_to_world(axis0lo, dummy0)
axis0hi, dummy = self._logical_to_world(axis0hi, dummy0)
dummy, axis1lo = self._logical_to_world(dummy1, axis1lo)
dummy, axis1hi = self._logical_to_world(dummy1, axis1hi)
return (axis0lo, axis1lo, axis0hi, axis1hi)
| gpl-3.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/testing/jpl_units/StrConverter.py | 8 | 5340 | #===========================================================================
#
# StrConverter
#
#===========================================================================
"""StrConverter module containing class StrConverter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import matplotlib.units as units
from matplotlib.cbook import iterable
# Place all imports before here.
#===========================================================================
__all__ = [ 'StrConverter' ]
#===========================================================================
class StrConverter( units.ConversionInterface ):
""": A matplotlib converter class. Provides matplotlib conversion
functionality for string data values.
Valid units for string are:
- 'indexed' : Values are indexed as they are specified for plotting.
- 'sorted' : Values are sorted alphanumerically.
- 'inverted' : Values are inverted so that the first value is on top.
- 'sorted-inverted' : A combination of 'sorted' and 'inverted'
"""
#------------------------------------------------------------------------
@staticmethod
def axisinfo( unit, axis ):
""": Returns information on how to handle an axis that has string data.
= INPUT VARIABLES
- axis The axis using this converter.
- unit The units to use for a axis with string data.
= RETURN VALUE
- Returns a matplotlib AxisInfo data structure that contains
minor/major formatters, major/minor locators, and default
label information.
"""
return None
#------------------------------------------------------------------------
@staticmethod
def convert( value, unit, axis ):
""": Convert value using unit to a float. If value is a sequence, return
the converted sequence.
= INPUT VARIABLES
- axis The axis using this converter.
- value The value or list of values that need to be converted.
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
if ( units.ConversionInterface.is_numlike( value ) ):
return value
if ( value == [] ):
return []
# we delay loading to make matplotlib happy
ax = axis.axes
if axis is ax.get_xaxis():
isXAxis = True
else:
isXAxis = False
axis.get_major_ticks()
ticks = axis.get_ticklocs()
labels = axis.get_ticklabels()
labels = [ l.get_text() for l in labels if l.get_text() ]
if ( not labels ):
ticks = []
labels = []
if ( not iterable( value ) ):
value = [ value ]
newValues = []
for v in value:
if ( (v not in labels) and (v not in newValues) ):
newValues.append( v )
for v in newValues:
if ( labels ):
labels.append( v )
else:
labels = [ v ]
#DISABLED: This is disabled because matplotlib bar plots do not
#DISABLED: recalculate the unit conversion of the data values
#DISABLED: this is due to design and is not really a bug.
#DISABLED: If this gets changed, then we can activate the following
#DISABLED: block of code. Note that this works for line plots.
#DISABLED if ( unit ):
#DISABLED if ( unit.find( "sorted" ) > -1 ):
#DISABLED labels.sort()
#DISABLED if ( unit.find( "inverted" ) > -1 ):
#DISABLED labels = labels[ ::-1 ]
# add padding (so they do not appear on the axes themselves)
labels = [ '' ] + labels + [ '' ]
ticks = list(xrange( len(labels) ))
ticks[0] = 0.5
ticks[-1] = ticks[-1] - 0.5
axis.set_ticks( ticks )
axis.set_ticklabels( labels )
# we have to do the following lines to make ax.autoscale_view work
loc = axis.get_major_locator()
loc.set_bounds( ticks[0], ticks[-1] )
if ( isXAxis ):
ax.set_xlim( ticks[0], ticks[-1] )
else:
ax.set_ylim( ticks[0], ticks[-1] )
result = []
for v in value:
# If v is not in labels then something went wrong with adding new
# labels to the list of old labels.
errmsg = "This is due to a logic error in the StrConverter class. "
errmsg += "Please report this error and its message in bugzilla."
assert ( v in labels ), errmsg
result.append( ticks[ labels.index(v) ] )
ax.viewLim.ignore(-1)
return result
#------------------------------------------------------------------------
@staticmethod
def default_units( value, axis ):
""": Return the default unit for value, or None.
= INPUT VARIABLES
- axis The axis using this converter.
- value The value or list of values that need units.
= RETURN VALUE
- Returns the default units to use for value.
Return the default unit for value, or None.
"""
# The default behavior for string indexing.
return "indexed"
| mit |
mcanthony/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt.py | 69 | 16846 | from __future__ import division
import math
import os
import sys
import matplotlib
from matplotlib import verbose
from matplotlib.cbook import is_string_like, onetrue
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.widgets import SubplotTool
try:
import qt
except ImportError:
raise ImportError("Qt backend requires pyqt to be installed.")
backend_version = "0.9.1"
def fn_name(): return sys._getframe(1).f_code.co_name
DEBUG = False
cursord = {
cursors.MOVE : qt.Qt.PointingHandCursor,
cursors.HAND : qt.Qt.WaitCursor,
cursors.POINTER : qt.Qt.ArrowCursor,
cursors.SELECT_REGION : qt.Qt.CrossCursor,
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one
"""
if qt.QApplication.startingUp():
if DEBUG: print "Starting up QApplication"
global qApp
qApp = qt.QApplication( [" "] )
qt.QObject.connect( qApp, qt.SIGNAL( "lastWindowClosed()" ),
qApp, qt.SLOT( "quit()" ) )
#remember that matplotlib created the qApp - will be used by show()
_create_qApp.qAppCreatedHere = True
_create_qApp.qAppCreatedHere = False
def show():
"""
Show all the figures and enter the qt main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if DEBUG: print 'Inside show'
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
if _create_qApp.qAppCreatedHere:
qt.qApp.exec_loop()
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasQT( thisFig )
manager = FigureManagerQT( canvas, num )
return manager
class FigureCanvasQT( qt.QWidget, FigureCanvasBase ):
keyvald = { qt.Qt.Key_Control : 'control',
qt.Qt.Key_Shift : 'shift',
qt.Qt.Key_Alt : 'alt',
}
# left 1, middle 2, right 3
buttond = {1:1, 2:3, 4:2}
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQt: ', figure
_create_qApp()
qt.QWidget.__init__( self, None, "QWidget figure" )
FigureCanvasBase.__init__( self, figure )
self.figure = figure
self.setMouseTracking( True )
w,h = self.get_width_height()
self.resize( w, h )
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, event)
def leaveEvent(self, event):
FigureCanvasBase.leave_notify_event(self, event)
def mousePressEvent( self, event ):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond[event.button()]
FigureCanvasBase.button_press_event( self, x, y, button )
if DEBUG: print 'button pressed:', event.button()
def mouseMoveEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
FigureCanvasBase.motion_notify_event( self, x, y )
if DEBUG: print 'mouse move'
def mouseReleaseEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
button = self.buttond[event.button()]
FigureCanvasBase.button_release_event( self, x, y, button )
if DEBUG: print 'button released'
def keyPressEvent( self, event ):
key = self._get_key( event )
FigureCanvasBase.key_press_event( self, key )
if DEBUG: print 'key press', key
def keyReleaseEvent( self, event ):
key = self._get_key(event)
FigureCanvasBase.key_release_event( self, key )
if DEBUG: print 'key release', key
def resizeEvent( self, event ):
if DEBUG: print 'resize (%d x %d)' % (event.size().width(), event.size().height())
qt.QWidget.resizeEvent( self, event )
w = event.size().width()
h = event.size().height()
if DEBUG: print "FigureCanvasQt.resizeEvent(", w, ",", h, ")"
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches( winch, hinch )
self.draw()
def resize( self, w, h ):
# Pass through to Qt to resize the widget.
qt.QWidget.resize( self, w, h )
# Resize the figure by converting pixels to inches.
pixelPerInch = self.figure.dpi
wInch = w / pixelPerInch
hInch = h / pixelPerInch
self.figure.set_size_inches( wInch, hInch )
# Redraw everything.
self.draw()
def sizeHint( self ):
w, h = self.get_width_height()
return qt.QSize( w, h )
def minumumSizeHint( self ):
return qt.QSize( 10, 10 )
def _get_key( self, event ):
if event.key() < 256:
key = event.text().latin1()
elif event.key() in self.keyvald.has_key:
key = self.keyvald[ event.key() ]
else:
key = None
return key
def flush_events(self):
qt.qApp.processEvents()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerQT( FigureManagerBase ):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__( self, canvas, num ):
if DEBUG: print 'FigureManagerQT.%s' % fn_name()
FigureManagerBase.__init__( self, canvas, num )
self.canvas = canvas
self.window = qt.QMainWindow( None, None, qt.Qt.WDestructiveClose )
self.window.closeEvent = self._widgetCloseEvent
centralWidget = qt.QWidget( self.window )
self.canvas.reparent( centralWidget, qt.QPoint( 0, 0 ) )
# Give the keyboard focus to the figure instead of the manager
self.canvas.setFocusPolicy( qt.QWidget.ClickFocus )
self.canvas.setFocus()
self.window.setCaption( "Figure %d" % num )
self.window._destroying = False
self.toolbar = self._get_toolbar(self.canvas, centralWidget)
# Use a vertical layout for the plot and the toolbar. Set the
# stretch to all be in the plot so the toolbar doesn't resize.
self.layout = qt.QVBoxLayout( centralWidget )
self.layout.addWidget( self.canvas, 1 )
if self.toolbar:
self.layout.addWidget( self.toolbar, 0 )
self.window.setCentralWidget( centralWidget )
# Reset the window height so the canvas will be the right
# size. This ALMOST works right. The first issue is that the
# height w/ a toolbar seems to be off by just a little bit (so
# we add 4 pixels). The second is that the total width/height
# is slightly smaller that we actually want. It seems like
# the border of the window is being included in the size but
# AFAIK there is no way to get that size.
w = self.canvas.width()
h = self.canvas.height()
if self.toolbar:
h += self.toolbar.height() + 4
self.window.resize( w, h )
if matplotlib.is_interactive():
self.window.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
def notify_axes_change( fig ):
# This will be called whenever the current axes is changed
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver( notify_axes_change )
def _widgetclosed( self ):
if self.window._destroying: return
self.window._destroying = True
Gcf.destroy(self.num)
def _widgetCloseEvent( self, event ):
self._widgetclosed()
qt.QWidget.closeEvent( self.window, event )
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
print "Classic toolbar is not yet supported"
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height)
def destroy( self, *args ):
if self.window._destroying: return
self.window._destroying = True
if self.toolbar: self.toolbar.destroy()
if DEBUG: print "destroy figure manager"
self.window.close(True)
def set_window_title(self, title):
self.window.setCaption(title)
class NavigationToolbar2QT( NavigationToolbar2, qt.QWidget ):
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image_file, callback(str)
toolitems = (
('Home', 'Reset original view', 'home.ppm', 'home'),
('Back', 'Back to previous view','back.ppm', 'back'),
('Forward', 'Forward to next view','forward.ppm', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move.ppm', 'pan'),
('Zoom', 'Zoom to rectangle','zoom_to_rect.ppm', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots','subplots.png', 'configure_subplots'),
('Save', 'Save the figure','filesave.ppm', 'save_figure'),
)
def __init__( self, canvas, parent ):
self.canvas = canvas
self.buttons = {}
qt.QWidget.__init__( self, parent )
# Layout toolbar buttons horizontally.
self.layout = qt.QHBoxLayout( self )
self.layout.setMargin( 2 )
NavigationToolbar2.__init__( self, canvas )
def _init_toolbar( self ):
basedir = os.path.join(matplotlib.rcParams[ 'datapath' ],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text == None:
self.layout.addSpacing( 8 )
continue
fname = os.path.join( basedir, image_file )
image = qt.QPixmap()
image.load( fname )
button = qt.QPushButton( qt.QIconSet( image ), "", self )
qt.QToolTip.add( button, tooltip_text )
self.buttons[ text ] = button
# The automatic layout doesn't look that good - it's too close
# to the images so add a margin around it.
margin = 4
button.setFixedSize( image.width()+margin, image.height()+margin )
qt.QObject.connect( button, qt.SIGNAL( 'clicked()' ),
getattr( self, callback ) )
self.layout.addWidget( button )
self.buttons[ 'Pan' ].setToggleButton( True )
self.buttons[ 'Zoom' ].setToggleButton( True )
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
self.locLabel = qt.QLabel( "", self )
self.locLabel.setAlignment( qt.Qt.AlignRight | qt.Qt.AlignVCenter )
self.locLabel.setSizePolicy(qt.QSizePolicy(qt.QSizePolicy.Ignored,
qt.QSizePolicy.Ignored))
self.layout.addWidget( self.locLabel, 1 )
# reference holder for subplots_adjust window
self.adj_window = None
def destroy( self ):
for text, tooltip_text, image_file, callback in self.toolitems:
if text is not None:
qt.QObject.disconnect( self.buttons[ text ],
qt.SIGNAL( 'clicked()' ),
getattr( self, callback ) )
def pan( self, *args ):
self.buttons[ 'Zoom' ].setOn( False )
NavigationToolbar2.pan( self, *args )
def zoom( self, *args ):
self.buttons[ 'Pan' ].setOn( False )
NavigationToolbar2.zoom( self, *args )
def dynamic_update( self ):
self.canvas.draw()
def set_message( self, s ):
self.locLabel.setText( s )
def set_cursor( self, cursor ):
if DEBUG: print 'Set cursor' , cursor
qt.QApplication.restoreOverrideCursor()
qt.QApplication.setOverrideCursor( qt.QCursor( cursord[cursor] ) )
def draw_rubberband( self, event, x0, y0, x1, y1 ):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [ int(val)for val in min(x0,x1), min(y0, y1), w, h ]
self.canvas.drawRectangle( rect )
def configure_subplots(self):
self.adj_window = qt.QMainWindow(None, None, qt.Qt.WDestructiveClose)
win = self.adj_window
win.setCaption("Subplot Configuration Tool")
toolfig = Figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
canvas = self._get_canvas(toolfig)
tool = SubplotTool(self.canvas.figure, toolfig)
centralWidget = qt.QWidget(win)
canvas.reparent(centralWidget, qt.QPoint(0, 0))
win.setCentralWidget(centralWidget)
layout = qt.QVBoxLayout(centralWidget)
layout.addWidget(canvas, 1)
win.resize(w, h)
canvas.setFocus()
win.show()
def _get_canvas(self, fig):
return FigureCanvasQT(fig)
def save_figure( self ):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
start = "image." + default_filetype
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname = qt.QFileDialog.getSaveFileName(
start, filters, self, "Save image", "Choose a filename to save to",
selectedFilter)
if fname:
try:
self.canvas.print_figure( unicode(fname) )
except Exception, e:
qt.QMessageBox.critical(
self, "Error saving file", str(e),
qt.QMessageBox.Ok, qt.QMessageBox.NoButton)
def set_history_buttons( self ):
canBackward = ( self._views._pos > 0 )
canForward = ( self._views._pos < len( self._views._elements ) - 1 )
self.buttons[ 'Back' ].setEnabled( canBackward )
self.buttons[ 'Forward' ].setEnabled( canForward )
# set icon used when windows are minimized
try:
# TODO: This is badly broken
qt.window_set_default_icon_from_file (
os.path.join( matplotlib.rcParams['datapath'], 'images', 'matplotlib.svg' ) )
except:
verbose.report( 'Could not load matplotlib icon: %s' % sys.exc_info()[1] )
def error_msg_qt( msg, parent=None ):
if not is_string_like( msg ):
msg = ','.join( map( str,msg ) )
qt.QMessageBox.warning( None, "Matplotlib", msg, qt.QMessageBox.Ok )
def exception_handler( type, value, tb ):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename != None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror != None:
msg += value.strerror
else:
msg += str(value)
if len( msg ) : error_msg_qt( msg )
FigureManager = FigureManagerQT
| agpl-3.0 |
FlorianGraef/adv-lane-lines-vehicle-detection | vehicle_inference.py | 1 | 1024 | from keras.models import load_model
import numpy as np
import matplotlib as plt
import cv2
from keras import backend as K
smooth = 1
def IOU_calc(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return 2*(intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def IOU_calc_loss(y_true, y_pred):
return -IOU_calc(y_true, y_pred)
class VehicleInference():
def __init__(self, model_path, nn_input_dim):
self.model = load_model(model_path, custom_objects={'IOU_calc_loss': IOU_calc_loss, 'IOU_calc':IOU_calc})
#self.model.load_weights('sem_seg_unet30e_w.h5')
self.nn_input_dims = nn_input_dim
def get_mask(self, img):
batch_wrapped = np.empty([1, self.nn_input_dims[1], self.nn_input_dims[0], 3])
batch_wrapped[0] = cv2.resize(img, self.nn_input_dims)
pred_mask = (self.model.predict(batch_wrapped))[0]
return pred_mask
| mit |
meduz/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
jiangxb1987/spark | python/pyspark/sql/context.py | 5 | 18889 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since, _NoValue
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import IntegerType, Row, StringType
from pyspark.sql.udf import UDFRegistration
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession.builder.getOrCreate()
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@property
def _conf(self):
"""Accessor for the JVM SQL-specific configurations"""
return self.sparkSession._jsparkSession.sessionState().conf()
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=_NoValue):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is set, return
defaultValue. If the key is not set and defaultValue is not set, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return self.sparkSession.udf
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@since(1.2)
def registerFunction(self, name, f, returnType=None):
"""An alias for :func:`spark.udf.register`.
See :meth:`pyspark.sql.UDFRegistration.register`.
.. note:: Deprecated in 2.3.0. Use :func:`spark.udf.register` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.register instead.",
DeprecationWarning)
return self.sparkSession.udf.register(name, f, returnType)
@since(2.1)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""An alias for :func:`spark.udf.registerJavaFunction`.
See :meth:`pyspark.sql.UDFRegistration.registerJavaFunction`.
.. note:: Deprecated in 2.3.0. Use :func:`spark.udf.registerJavaFunction` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.registerJavaFunction instead.",
DeprecationWarning)
return self.sparkSession.udf.registerJavaFunction(name, javaClassName, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temporary table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table or view as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(database=u'', tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", "field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
hamid-omid/search_relevance | data_selection.py | 1 | 2491 | '''
Detecting the best features among ~600 new features.
INPUT FILES:
Train(i).csv (The new features of train set; made by Arman)
Test(j).csv (the new features of test set; made by Arman)
OUTPUTS:
newFeatTrain.csv (a file that only has the most relevant features)
newFeatTest.csv (a file that only has the most relevant features)
__Authors__:
Ali Narimani, Hamid Omid
__Veresion__:
1.0
'''
import numpy as np
import pandas as pd
### Reading input data:
df_ts1 = pd.read_csv('../../homedepotdata/Test1.csv')
df_ts2 = pd.read_csv('../../homedepotdata/Test2.csv')
df_ts3 = pd.read_csv('../../homedepotdata/Test3.csv')
df_ts4 = pd.read_csv('../../homedepotdata/Test4.csv')
df_tr1 = pd.read_csv('../../homedepotdata/Train1.csv')
df_tr2 = pd.read_csv('../../homedepotdata/Train2.csv')
### concat train and test:
frames = [df_ts1,df_ts2,df_ts3,df_ts4]
TEST = pd.concat(frames, axis=0, ignore_index=True)
frames = [df_tr1,df_tr2]
TRAIN = pd.concat(frames, axis=0, ignore_index=True)
### Drop columns with less than `espislon` variation:
names = list(TEST.columns)
columns2drop = []
stdTr = {}
stdTs = {}
epsilon = 10**(-3) # this is a subjective choice, but we have no time
for column in names:
sd = np.std(TEST[column])
stdTs[column] = sd
sd = np.std(TRAIN[column])
stdTr[column] = sd
if sd < epsilon:
columns2drop.append(column)
TRAIN.drop(columns2drop,axis=1,inplace=True)
TEST.drop(columns2drop,axis=1,inplace=True)
### Drop columns that are correlated more than (1-eta):
names = TEST.columns
corrDrop = []
eta = 0.2 # this is a subjective choice, but we have no time
for c1 in range(len(names)):
col1 = names[c1]
for c2 in range(c1+1,len(names)):
col2 = names[c2]
buff = abs(np.corrcoef(TRAIN[col1],TRAIN[col2])[0,1])
if buff > (1-eta) :
corrDrop.append(col2)
TRAIN.drop(corrDrop,axis=1,inplace=True)
TEST.drop(corrDrop,axis=1,inplace=True)
### Detect columns with a higher than `delta` correlation with relevance:
names = TEST.columns
corrRelev = {}
goodCol = []
delta = 0.05 # this is a subjective choice, but we have no time
for c1 in range(len(names)):
col = names[c1]
buff = abs(np.corrcoef(TRAIN[col],TRAIN.relevance)[0,1])
corrRelev[col] = buff
if buff > delta:
goodCol.append(col)
### writing data files
trainNew = TRAIN[goodCol]
testNew = TEST[goodCol]
trainNew.to_csv('newFeatTrain.csv',index=False)
testNew.to_csv('newFeatTest.csv',index=False)
# End of Code
| mit |
Jimmy-Morzaria/scikit-learn | sklearn/neighbors/unsupervised.py | 22 | 4396 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> rng = neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
>>> np.asarray(rng[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
| bsd-3-clause |
public-ink/public-ink | server/appengine/lib/mpl_toolkits/axisartist/grid_helper_curvelinear.py | 18 | 26105 | """
An experimental support for curvilinear grid.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from itertools import chain
from .grid_finder import GridFinder
from .axislines import AxisArtistHelper, GridHelperBase
from .axis_artist import AxisArtist
from matplotlib.transforms import Affine2D, IdentityTransform
import numpy as np
from matplotlib.path import Path
class FixedAxisArtistHelper(AxisArtistHelper.Fixed):
"""
Helper class for a fixed axis.
"""
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FixedAxisArtistHelper, self).__init__( \
loc=side,
)
self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = self.nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.side = side
self._limits_inverted = False
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
if self.nth_coord == 0:
xy1, xy2 = axes.get_ylim()
else:
xy1, xy2 = axes.get_xlim()
if xy1 > xy2:
self._limits_inverted = True
else:
self._limits_inverted = False
def change_tick_coord(self, coord_number=None):
if coord_number is None:
self.nth_coord_ticks = 1 - self.nth_coord_ticks
elif coord_number in [0, 1]:
self.nth_coord_ticks = coord_number
else:
raise Exception("wrong coord number")
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
g = self.grid_helper
if self._limits_inverted:
side = {"left":"right","right":"left",
"top":"bottom", "bottom":"top"}[self.side]
else:
side = self.side
ti1 = g.get_tick_iterator(self.nth_coord_ticks, side)
ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, side, minor=True)
#ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
return chain(ti1, ti2), iter([])
class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FloatingAxisArtistHelper, self).__init__(nth_coord,
value,
)
self.value = value
self.grid_helper = grid_helper
self._extremes = None, None
self._get_line_path = None # a method that returns a Path.
self._line_num_points = 100 # number of points to create a line
def set_extremes(self, e1, e2):
self._extremes = e1, e2
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
grid_finder = self.grid_helper.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
extremes = list(extremes)
e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
if e1 is not None:
extremes[2] = max(e1, extremes[2])
if e2 is not None:
extremes[3] = min(e2, extremes[3])
elif self.nth_coord == 1:
if e1 is not None:
extremes[0] = max(e1, extremes[0])
if e2 is not None:
extremes[1] = min(e2, extremes[1])
grid_info = dict()
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
grid_info["extremes"] = extremes
grid_info["lon_info"] = lon_levs, lon_n, lon_factor
grid_info["lat_info"] = lat_levs, lat_n, lat_factor
grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
lon_factor,
lon_levs)
grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
lat_factor,
lat_levs)
grid_finder = self.grid_helper.grid_finder
#e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
xx0 = np.linspace(self.value, self.value, self._line_num_points)
yy0 = np.linspace(extremes[2], extremes[3], self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
elif self.nth_coord == 1:
xx0 = np.linspace(extremes[0], extremes[1], self._line_num_points)
yy0 = np.linspace(self.value, self.value, self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
grid_info["line_xy"] = xx, yy
self.grid_info = grid_info
def get_axislabel_transform(self, axes):
return Affine2D() #axes.transData
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2]+extremes[3])/2.
dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
elif self.nth_coord == 1:
xx0 = (extremes[0]+extremes[1])/2.
yy0 = self.value
dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
grid_finder = self.grid_helper.grid_finder
xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() #axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lat_levs = np.asarray(lat_levs)
if lat_factor is not None:
yy0 = lat_levs / lat_factor
dy = 0.01 / lat_factor
else:
yy0 = lat_levs
dy = 0.01
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs = np.asarray(lon_levs)
if lon_factor is not None:
xx0 = lon_levs / lon_factor
dx = 0.01 / lon_factor
else:
xx0 = lon_levs
dx = 0.01
if None in self._extremes:
e0, e1 = self._extremes
else:
e0, e1 = sorted(self._extremes)
if e0 is None:
e0 = -np.inf
if e1 is None:
e1 = np.inf
if self.nth_coord == 0:
mask = (e0 <= yy0) & (yy0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (e0 <= xx0) & (xx0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.empty_like(yy0)
xx0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
xx2a, yy2a = transform_xy(xx0, yy0)
xx2b, yy2b = transform_xy(xx0, yy0+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.empty_like(xx0)
yy0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx1a, yy1a = transform_xy(xx0, yy0)
xx1b, yy1b = transform_xy(xx0, yy0+dy)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
dd[mm] = dd2[mm]+3.14159/2.
#dd = np.arctan2(yy2-yy1, xx2-xx1) # angle normal
#dd2 = np.arctan2(yy3-yy1, xx3-xx1) # angle tangent
#mm = ((yy2-yy1)==0.) & ((xx2-xx1)==0.) # mask where dd1 is not defined
#dd[mm] = dd2[mm]+3.14159/2.
#dd += 3.14159
#dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tr2ax.transform_point((x, y))
delta=0.00001
if (0. -delta<= c2[0] <= 1.+delta) and \
(0. -delta<= c2[1] <= 1.+delta):
d1 = d/3.14159*180.
d2 = d2/3.14159*180.
yield [x, y], d1, d2, lab
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
x, y = self.grid_info["line_xy"]
if self._get_line_path is None:
return Path(list(zip(x, y)))
else:
return self._get_line_path(axes, x, y)
class GridHelperCurveLinear(GridHelperBase):
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates:
e.g., x2, y2 = trans(x1, y1)
"""
super(GridHelperCurveLinear, self).__init__()
self.grid_info = None
self._old_values = None
#self._grid_params = dict()
self._aux_trans = aux_trans
self.grid_finder = GridFinder(aux_trans,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
def update_grid_finder(self, aux_trans=None, **kw):
if aux_trans is not None:
self.grid_finder.update_transform(aux_trans)
self.grid_finder.update(**kw)
self.invalidate()
def _update(self, x1, x2, y1, y2):
"bbox in 0-based image coordinates"
# update wcsgrid
if self.valid() and self._old_values == (x1, x2, y1, y2):
return
self._update_grid(x1, y1, x2, y2)
self._old_values = (x1, x2, y1, y2)
self._force_update = False
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc,
#nth_coord,
nth_coord_ticks=nth_coord,
)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
return axisline
def new_floating_axis(self, nth_coord,
value,
axes=None,
axis_direction="bottom"
):
if axes is None:
axes = self.axes
_helper = FloatingAxisArtistHelper( \
self, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
#_helper = FloatingAxisArtistHelper(self, nth_coord,
# value,
# label_direction=label_direction,
# )
#axisline = AxisArtistFloating(axes, _helper,
# axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
#axisline.major_ticklabels.set_visible(True)
#axisline.minor_ticklabels.set_visible(False)
#axisline.major_ticklabels.set_rotate_along_line(True)
#axisline.set_rotate_label_along_line(True)
return axisline
def _update_grid(self, x1, y1, x2, y2):
self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
def get_gridlines(self, which="major", axis="both"):
grid_lines = []
if axis in ["both", "x"]:
for gl in self.grid_info["lon"]["lines"]:
grid_lines.extend(gl)
if axis in ["both", "y"]:
for gl in self.grid_info["lat"]["lines"]:
grid_lines.extend(gl)
return grid_lines
def get_tick_iterator(self, nth_coord, axis_side, minor=False):
#axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side]
angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side]
#angle = [0, 90, 180, 270][axisnr]
lon_or_lat = ["lon", "lat"][nth_coord]
if not minor: # major ticks
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, l
else:
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, ""
#for xy, a, l in self.grid_info[lon_or_lat]["ticks"][axis_side]:
# yield xy, a, ""
return f()
def test3():
import numpy as np
from matplotlib.transforms import Transform
from matplotlib.path import Path
class MyTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y-x), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MyTransformInv(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class MyTransformInv(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y+x), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MyTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
tr = MyTransform(1)
grid_helper = GridHelperCurveLinear(tr)
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot_class_factory
from .axislines import Axes
SubplotHost = host_subplot_class_factory(Axes)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
fig.add_subplot(ax1)
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
ax1.parasites.append(ax2)
ax2.plot([3, 6], [5.0, 10.])
ax1.set_aspect(1.)
ax1.set_xlim(0, 10)
ax1.set_ylim(0, 10)
ax1.grid(True)
plt.draw()
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost, \
ParasiteAxesAuxTrans
import matplotlib.cbook as cbook
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(5)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat"] = axis = grid_helper.new_floating_axis(0, 60, axes=ax1)
axis.label.set_text("Test")
axis.label.set_visible(True)
#axis._extremes = 2, 10
#axis.label.set_text("Test")
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.get_helper()._extremes=2, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 6, axes=ax1)
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.label.set_text("Test 2")
axis.get_helper()._extremes=-180, 90
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
def curvelinear_test3(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
for axis in list(six.itervalues(ax1.axis)):
axis.set_visible(False)
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat1"] = axis = grid_helper.new_floating_axis(0, 130,
axes=ax1,
axis_direction="left"
)
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
grid_helper = ax1.get_grid_helper()
ax1.axis["lat2"] = axis = grid_helper.new_floating_axis(0, 50, axes=ax1,
axis_direction="right")
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 10,
axes=ax1,
axis_direction="bottom")
axis.label.set_text("Test 2")
axis.get_helper()._extremes= 50, 130
axis.major_ticklabels.set_axis_direction("top")
axis.label.set_axis_direction("top")
grid_helper.grid_finder.grid_locator1.den = 5
grid_helper.grid_finder.grid_locator2._nbins = 5
# # A parasite axes with given transform
# ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# # note that ax2.transData == tr + ax1.transData
# # Anthing you draw in ax2 will match the ticks and grids of ax1.
# ax1.parasites.append(ax2)
# intp = cbook.simple_linear_interpolation
# ax2.plot(intp(np.array([0, 30]), 50),
# intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5, 5))
fig.clf()
#test3()
#curvelinear_test2(fig)
curvelinear_test3(fig)
#plt.draw()
plt.show()
| gpl-3.0 |
flaviovdf/pyksc | src/scripts/tags_io.py | 1 | 2958 | # -*- coding: utf8
'''This module contains the code used for data conversion'''
from __future__ import division, print_function
from collections import defaultdict
from sklearn.base import BaseEstimator
from sklearn.feature_extraction.text import Vectorizer
import nltk
class NoopAnalyzer(BaseEstimator):
'''
Since we use NLTK to preprocess (more control) this
class is used to bypass sklearns preprocessing
'''
def analyze(self, text_document):
'''Does nothing'''
return text_document
def __tokenize_and_stem(fpath):
'''
Tokenizes and stems the file, converting each line to
an array of words.
Arguments
---------
fpath: a path to a file
Each line is a song, tags are separated by space
'''
tokenizer = nltk.RegexpTokenizer(r'\w+')
stopwords = set(nltk.corpus.stopwords.words('english'))
stemmer = nltk.stem.PorterStemmer()
docs = []
term_pops = defaultdict(int)
with open(fpath) as tags_file:
for line in tags_file:
as_doc = []
for term in tokenizer.tokenize(line)[1:]:
term = term.lower().strip()
if term not in stopwords and term != '':
stemmed = stemmer.stem(term)
as_doc.append(stemmed)
term_pops[stemmed] += 1
docs.append(as_doc)
return docs, term_pops
def clean_up(fpath, bottom_filter=0.01):
'''
Converts a YouTube tag file to a series of tokens. This code
stems the tags, removes stopwords and filters infrequent
tags (whose probability is bellow `bottom_filter`).
Arguments
---------
fpath: a path to a file
Each line is a song, tags are separated by space
bottom_filter: float (defaults to 0.01, one percent)
Minimum probability for tags to be considered useful
'''
docs, term_pops = __tokenize_and_stem(fpath)
for doc in docs:
to_yield = []
for term in doc:
prob_term = term_pops[term] / len(term_pops)
if prob_term > bottom_filter:
to_yield.append(term)
yield to_yield
def vectorize_videos(fpath, use_idf=False):
'''
Converts a YouTube tag file to a sparse matrix pondered. We can assign
weights based on IDF if specified.
Arguments
---------
fpath: a path to a file
Each line is a song, tags are separated by space
use_idf: bool (optinal, defaults to True)
Indicates whether to use IDF.
bottom_filter: float (defaults to 0.005, half of one percent)
Minimum probability for tags to be considered useful
'''
#Vectorizes to TF-IDF
vectorizer = Vectorizer(analyzer=NoopAnalyzer(), use_idf = use_idf)
sparse_matrix = vectorizer.fit_transform(clean_up(fpath, bottom_filter=0))
vocabulary = vectorizer.vocabulary
return sparse_matrix, vocabulary | bsd-3-clause |
etkirsch/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
jpmpentwater/traversing_knowledge_graphs | code/accuracy_evaluate.py | 1 | 1366 |
# coding: utf-8
# In[ ]:
import scriptinit
import util
from data import *
from optimize import *
import diagnostics as dns
from os.path import join
from composition import *
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
import argparse
from os.path import join
import cPickle
# In[ ]:
parser = argparse.ArgumentParser()
parser.add_argument('dataset')
parser.add_argument('model')
parser.add_argument('params')
if util.in_ipython():
args = parser.parse_args(['freebase_socher', 'bilinear', 'compositional_wvec_bilinear_freebase_socher_0xe7d4cf_params.cpkl'])
else:
args = parser.parse_args()
util.metadata('dataset', args.dataset)
util.metadata('model', args.model)
util.metadata('params', args.params)
util.metadata('split', 'test')
model = CompositionalModel(None, path_model=args.model, objective='margin')
params = load_params(args.params, args.model)
dev = dns.load_socher_test(join(args.dataset, 'dev'))
test = dns.load_socher_test(join(args.dataset, 'test'))
def score(samples):
for ex in samples:
try:
ex.score = model.predict(params, ex).ravel()[0]
except KeyError:
print 'out of vocab'
ex.score = float('inf')
score(dev)
score(test)
thresholds = dns.compute_best_thresholds(dev)
util.metadata('accuracy', dns.accuracy(thresholds, test))
| mit |
UltronAI/Deep-Learning | CS231n/assignment1/cs231n/classifiers/neural_net.py | 1 | 12775 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from past.builtins import xrange
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension of
N, a hidden layer dimension of H, and performs classification over C classes.
We train the network with a softmax loss function and L2 regularization on the
weight matrices. The network uses a ReLU nonlinearity after the first fully
connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values and
biases are initialized to zero. Weights and biases are stored in the
variable self.params, which is a dictionary with the following keys:
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0, dropout=False):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
an integer in the range 0 <= y[i] < C. This parameter is optional; if it
is not passed then we only return scores, and if it is passed then we
instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of training
samples.
- grads: Dictionary mapping parameter names to gradients of those parameters
with respect to the loss function; has the same keys as self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
p = 0.2
# Compute the forward pass
scores = None
#############################################################################
# TODO: Perform the forward pass, computing the class scores for the input. #
# Store the result in the scores variable, which should be an array of #
# shape (N, C). #
#############################################################################
h = X.dot(W1)+b1
if dropout == True:
drop_p = np.random.uniform(0,1,(W1.shape[1],))
drop_id = (drop_p>p).reshape(W1.shape[1])
# print(h[:, drop_id].shape)
h[:, drop_id] = 0
z = np.maximum(0, h)
scores = z.dot(W2)+b2
#############################################################################
# END OF YOUR CODE #
#############################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
# Compute the loss
loss = None
#############################################################################
# TODO: Finish the forward pass, and compute the loss. This should include #
# both the data loss and L2 regularization for W1 and W2. Store the result #
# in the variable loss, which should be a scalar. Use the Softmax #
# classifier loss. #
#############################################################################
scores -= np.max(scores, axis=1, keepdims=True)
s = np.sum(np.exp(scores), axis=1, keepdims=True)
p = np.exp(scores)/s
loss = np.sum(-np.log(p[np.arange(N), y]))
loss /= N
loss += reg*np.sum(W1*W1) + reg*np.sum(W2*W2)
#############################################################################
# END OF YOUR CODE #
#############################################################################
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
dscores = p
dscores[np.arange(N), y] -= 1
dscores /= N
# W2 and b2
grads['W2'] = z.T.dot(dscores)
grads['b2'] = np.sum(dscores, axis=0)
# next backprop into hidden layer
dhidden = dscores.dot(W2.T)
# backprop the ReLU non-linearity
dhidden[h<0] = 0
if dropout == True:
dhidden[:, drop_id] = 0
# finally into W,b
grads['W1'] = X.T.dot(dhidden)
grads['b1'] = np.sum(dhidden, axis=0)
# add regularization gradient contribution
grads['W2'] += 2*reg*W2
grads['W1'] += 2*reg*W1
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False, mu=0, dropout=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means that
X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning rate
after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train // batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
if mu != 0:
v = {'W1':np.zeros_like(self.params['W1']),
'b1':np.zeros_like(self.params['b1']),
'W2':np.zeros_like(self.params['W2']),
'b2':np.zeros_like(self.params['b2'])}
for it in xrange(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: Create a random minibatch of training data and labels, storing #
# them in X_batch and y_batch respectively. #
#########################################################################
idx = np.random.choice(np.arange(num_train), batch_size)
X_batch = X[idx, :]
y_batch = y[idx]
#########################################################################
# END OF YOUR CODE #
#########################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg, dropout=dropout)
loss_history.append(loss)
#########################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params) #
# using stochastic gradient descent. You'll need to use the gradients #
# stored in the grads dictionary defined above. #
#########################################################################
for str in ['W1','b1','W2','b2']:
if mu == 0:
self.params[str] -= learning_rate*grads[str]
else:
v[str] = mu*v[str] - learning_rate*grads[str]
self.params[str] += v[str]
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate.
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
# learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score.
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points to
classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each of
the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
to have class c, where 0 <= c < C.
"""
y_pred = None
###########################################################################
# TODO: Implement this function; it should be VERY simple! #
###########################################################################
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
h = X.dot(W1)+b1
z = np.maximum(0, h)
scores = z.dot(W2)+b2
y_pred = scores.argmax(axis=1)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
| mit |
evanbiederstedt/RRBSfun | scripts/PDR_methyl_CLL_RRBS_cw154_StackA1.py | 1 | 2026 |
import glob
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import os
os.chdir('/Users/evanbiederstedt/Downloads/RRBS_data_files')
# set glob subdirectory via cell batch
cll_cells1 = glob.glob("RRBS_cw154*")
for filename in cll_cells1:
df = pd.read_table(filename)
df = df.drop(['chr', 'start', 'strand', 'avgWeightedEnt', 'CpGEntropy', 'tss', 'tssDistance', 'genes', 'exons',
'introns', 'promoter', 'cgi',
'geneDensity', 'ctcfUpstream', 'ctcfDownstream','ctcfDensity', 'geneDistalRegulatoryModules',
'vistaEnhancers', '3PrimeUTR', 'ctcfUpDistance', 'ctcfDownDistance','3PrimeUTRDistance',
'5PrimeUTR', '5PrimeUTRDistance', 'firstExon','geneDistalRegulatoryModulesK562',
'geneDistalRegulatoryModulesK562Distance', 'hypoInHues64','hypoInHues64Distance',
'genesDistance', 'exonsDistance', 'intronsDistance', 'promoterDistance', 'cgiDistance',
'ctcf', 'ctcfDistance', 'geneDistalRegulatoryModulesDistance', 'vistaEnhancersDistance', 'firstExonDistance'], axis=1)
num_bins2 = np.ceil(df['avgReadCpGs'].max()/1.25)
df['avgReadCpGs_binned'] = pd.cut(df['avgReadCpGs'], num_bins2, labels=False)
df['read_stack_ID'] = (df.avgReadCpGs_binned.shift(1) != df.avgReadCpGs_binned).astype(int).cumsum()
df['total_reads'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1)
df['avgReadCpGs'] = df['avgReadCpGs'].values.round(decimals=0)
df1 = df.groupby(['read_stack_ID', 'avgReadCpGs'])[['thisMeth', 'thisUnmeth', 'methReadCount', 'unmethReadCount', 'mixedReadCount', 'total_reads']].sum()
df1.reset_index(inplace=True)
df1["methylation"] = df1["thisMeth"]/(df1["thisMeth"]+df1["thisUnmeth"]) # corrected
df1["PDR_per_stack"] = df1["mixedReadCount"]/df1["total_reads"]
df1.to_csv(str("stacked_") + str(filename) +str(".csv"))
| mit |
dansbecker/skflow | examples/iris_custom_decay_dnn.py | 6 | 1600 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
import skflow
import tensorflow as tf
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
# setup exponential decay function
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
# use customized decay function in learning_rate
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=800,
learning_rate=exp_decay)
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
| apache-2.0 |
hsiaoyi0504/scikit-learn | sklearn/ensemble/voting_classifier.py | 178 | 8006 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
Wonjuseo/Project101 | 0/0-6. pythonbasic-3.py | 1 | 2191 | # List Comprehensions
list1 = [i for i in range(10)]
print(list1)
# print: 0,1,2,3,4,5,6,7,8,9
list1 = [i for i in range(10) if i % 2 == 0]
print(list1)
# print: 0,2,4,6,8
print(abs(-3),complex(1,2),abs(complex(1,2)))
# print: 3, 1+2j, 2.2360(values)
print(all([True,True,True]),all([True,False,True]))
# print: True, False
print(divmod(7,3),divmod(8,3))
# print: (2,1) (2,2)
print(max(1,2,3,4,5),min(1,2,3,4,5))
# print: 5 1
list1= [1,2,3,4,5]
iter1 = iter(list1)
print(next(iter1),next(iter1),next(iter1))
# print: 1 2 3
print(pow(2,6),round(3.1415),round(3.1415,2),sum([1,2,3,4]))
# print: 64 3 3.14 10
for i in range(0,10,3):
print(i)
# print: 0 3 6 9
a = [1,2,3]
b = [4,5,6]
zip1 = zip(a,b)
print(list(zip1))
# print: (1,4), (2,5), (3,6)
from math import pi
print(pi)
# print: 3.141592653589793
from datetime import datetime
print(datetime.now())
# print: now time
import calendar
print(calendar.prmonth(2017,2))
# print: calender
# Python Image Library
from PIL import Image
from PIL import ImageFilter
#im = Image.open('./123456.jpg')
#imout = im.filter(ImageFilter.SMOOTH_MORE)
# BLUR, CONTOUR, DETAIL, EDGE_ENHANCE, EDGE_ENHANCE_MORE, EMBOSS, FIND_EDGES, SMOOTH, SMOOTH_MORE, SHARPEN
#imout.show()
# matplotlib
from pylab import *
plot([1,2,3,4])
ylabel('some numbers')
show()
import matplotlib.pyplot as plt
plt.plot([1,2,3,4],[1,4,9,16],'ro')
plt.axis([0,6,0,20])
plt.show()
# scatter
import numpy as np
x = np.random.randn(1000)
y = np.random.randn(1000)
plt.scatter(x,y)
plt.show()
x = np.linspace(0,5,10)
y = x**2
fig = plt.figure()
axes1 = fig.add_axes([0.1,0.1,0.8,0.8])
axes2 = fig.add_axes([0.2,0.5,0.4,0.3])
axes1.plot(x,y,'r')
axes1.set_xlabel('x')
axes1.set_ylabel('y')
axes1.set_title('title')
axes2.plot(y,x,'g')
axes2.set_xlabel('y')
axes2.set_ylabel('x')
axes2.set_title('inser title')
fig.show()
# subplots, step, bar
n = np.array([0,1,2,3,4,5])
fig, axes = plt.subplots(nrows=1,ncols=2)
axes[0].step(n,n**2,lw=2)
axes[0].set_title('step')
axes[1].bar(n,n**2,align='center',alpha=0.5)
axes[1].set_title('bar')
plt.show()
| apache-2.0 |
duguyue100/telauges | scripts/convnet_test.py | 1 | 5234 | """
@author: Yuhuang Hu
@contact: [email protected]
@note: ConvNet tests
"""
import numpy as np;
import theano
import theano.tensor as T;
import matplotlib.pyplot as plt;
import telauges.utils as utils;
from telauges.conv_net_layer import ConvNetLayer;
from telauges.hidden_layer import HiddenLayer;
from telauges.hidden_layer import SoftmaxLayer;
n_epochs=50;
training_portion=1;
batch_size=200;
nkerns=[50, 20];
datasets=utils.load_mnist("../data/mnist.pkl.gz");
rng=np.random.RandomState(23455);
### Loading and preparing dataset
train_set_x, train_set_y = datasets[0];
valid_set_x, valid_set_y = datasets[1];
test_set_x, test_set_y = datasets[2];
n_train_batches=int(train_set_x.get_value(borrow=True).shape[0]*training_portion);
n_valid_batches=valid_set_x.get_value(borrow=True).shape[0];
n_test_batches=test_set_x.get_value(borrow=True).shape[0];
n_train_batches /= batch_size; # number of train data batches
n_valid_batches /= batch_size; # number of valid data batches
n_test_batches /= batch_size; # number of test data batches
print "[MESSAGE] The data is loaded"
print "[MESSAGE] Building model"
index=T.lscalar(); # batch index
X=T.matrix('X'); # input data source
y=T.ivector('y'); # input data label
images=X.reshape((batch_size, 1, 28, 28))
# input size (28, 28), (5, 5)
layer_0=ConvNetLayer(rng=rng,
feature_maps=images,
feature_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 7, 7),
pool=True,
activate_mode="relu");
filters=layer_0.filters;
for i in xrange(nkerns[0]):
plt.subplot(8, 7, i);
plt.imshow(filters.get_value(borrow=True)[i,0,:,:], cmap = plt.get_cmap('gray'), interpolation='nearest');
plt.axis('off')
plt.show();
# input size (12, 12), (3, 3)
layer_1=ConvNetLayer(rng=rng,
feature_maps=layer_0.out_feature_maps,
feature_shape=(batch_size, nkerns[0], 11, 11),
filter_shape=(nkerns[1], nkerns[0], 4, 4),
pool=True,
activate_mode="relu");
# output size (5, 5)
layer_2=HiddenLayer(rng=rng,
data_in=layer_1.out_feature_maps.flatten(2),
n_in=nkerns[1]*16,
n_out=300);
layer_3=SoftmaxLayer(rng=rng,
data_in=layer_2.output,
n_in=300,
n_out=10);
params=layer_0.params+layer_1.params+layer_2.params+layer_3.params;
cost=layer_3.cost(y)+0.001*((layer_0.filters**2).sum()+(layer_1.filters**2).sum()+(layer_2.W**2).sum()+(layer_3.W**2).sum());
gparams=T.grad(cost, params);
updates=[(param_i, param_i-0.1*grad_i)
for param_i, grad_i in zip(params, gparams)];
test_model = theano.function(inputs=[index],
outputs=layer_3.errors(y),
givens={X: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]});
validate_model = theano.function(inputs=[index],
outputs=layer_3.errors(y),
givens={X: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]});
train_model = theano.function(inputs=[index],
outputs=cost,
updates=updates,
givens={X: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]});
print "[MESSAGE] The model is built";
print "[MESSAGE] Start training"
validation_frequency = n_train_batches;
validation_record=np.zeros((n_epochs, 1));
test_record=np.zeros((n_epochs, 1));
epoch = 0;
while (epoch < n_epochs):
epoch = epoch + 1;
for minibatch_index in xrange(n_train_batches):
mlp_minibatch_avg_cost = train_model(minibatch_index);
iter = (epoch - 1) * n_train_batches + minibatch_index;
if (iter + 1) % validation_frequency == 0:
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)];
validation_record[epoch-1] = np.mean(validation_losses);
print 'MLP MODEL';
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches, validation_record[epoch-1] * 100.));
test_losses = [test_model(i) for i
in xrange(n_test_batches)];
test_record[epoch-1] = np.mean(test_losses);
print((' epoch %i, minibatch %i/%i, test error %f %%') %
(epoch, minibatch_index + 1, n_train_batches, test_record[epoch-1] * 100.));
filters=layer_0.filters;
for i in xrange(nkerns[0]):
plt.subplot(8, 7, i);
plt.imshow(filters.get_value(borrow=True)[i,0,:,:], cmap = plt.get_cmap('gray'), interpolation='nearest');
plt.axis('off')
plt.show(); | gpl-3.0 |
billy-inn/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
freeman-lab/dask | dask/dataframe/tests/test_optimize_dataframe.py | 1 | 1939 | import pytest
from operator import getitem
from toolz import valmap
import bcolz
from dask.dataframe.optimize import rewrite_rules, dataframe_from_ctable
import dask.dataframe as dd
import pandas as pd
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
dfs = list(dsk.values())
def test_column_optimizations_with_bcolz_and_rewrite():
bc = bcolz.ctable([[1, 2, 3], [10, 20, 30]], names=['a', 'b'])
func = lambda x: x
for cols in [None, 'abc', ['abc']]:
dsk2 = dict((('x', i),
(func,
(getitem,
(dataframe_from_ctable, bc, slice(0, 2), cols, {}),
(list, ['a', 'b']))))
for i in [1, 2, 3])
expected = dict((('x', i), (func, (dataframe_from_ctable,
bc, slice(0, 2), (list, ['a', 'b']), {})))
for i in [1, 2, 3])
result = valmap(rewrite_rules.rewrite, dsk2)
assert result == expected
def test_fast_functions():
df = dd.DataFrame(dsk, 'x', ['a', 'b'], [None, None, None, None])
e = df.a + df.b
assert len(e.dask) > 6
assert len(dd.optimize(e.dask, e._keys())) == 6
def test_castra_column_store():
try:
from castra import Castra
except ImportError:
return
df = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
with Castra(template=df) as c:
c.extend(df)
df = c.to_dask()
df2 = df[['x']]
dsk = dd.optimize(df2.dask, df2._keys())
assert dsk == {(df2._name, 0): (Castra.load_partition, c, '0--2',
(list, ['x']))}
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/test_panel4d.py | 7 | 35347 | # -*- coding: utf-8 -*-
from datetime import datetime
from pandas.compat import range, lrange
import operator
import nose
import numpy as np
from pandas.types.common import is_float_dtype
from pandas import Series, Index, isnull, notnull
from pandas.core.panel import Panel
from pandas.core.panel4d import Panel4D
from pandas.core.series import remove_na
from pandas.tseries.offsets import BDay
from pandas.util.testing import (assert_panel_equal,
assert_panel4d_equal,
assert_frame_equal,
assert_series_equal,
assert_almost_equal)
import pandas.util.testing as tm
def add_nans(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
tm.add_nans(panel)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel4d)
def test_iter(self):
tm.equalContents(list(self.panel4d), self.panel4d.labels)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel4d
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_panel_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_panel_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
@classmethod
def assert_panel4d_equal(cls, x, y):
assert_panel4d_equal(x, y)
def test_get_axis(self):
assert(self.panel4d._get_axis(0) is self.panel4d.labels)
assert(self.panel4d._get_axis(1) is self.panel4d.items)
assert(self.panel4d._get_axis(2) is self.panel4d.major_axis)
assert(self.panel4d._get_axis(3) is self.panel4d.minor_axis)
def test_set_axis(self):
new_labels = Index(np.arange(len(self.panel4d.labels)))
# TODO: unused?
# new_items = Index(np.arange(len(self.panel4d.items)))
new_major = Index(np.arange(len(self.panel4d.major_axis)))
new_minor = Index(np.arange(len(self.panel4d.minor_axis)))
# ensure propagate to potentially prior-cached items too
# TODO: unused?
# label = self.panel4d['l1']
self.panel4d.labels = new_labels
if hasattr(self.panel4d, '_item_cache'):
self.assertNotIn('l1', self.panel4d._item_cache)
self.assertIs(self.panel4d.labels, new_labels)
self.panel4d.major_axis = new_major
self.assertIs(self.panel4d[0].major_axis, new_major)
self.assertIs(self.panel4d.major_axis, new_major)
self.panel4d.minor_axis = new_minor
self.assertIs(self.panel4d[0].minor_axis, new_minor)
self.assertIs(self.panel4d.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel4d._get_axis_number('labels'), 0)
self.assertEqual(self.panel4d._get_axis_number('items'), 1)
self.assertEqual(self.panel4d._get_axis_number('major'), 2)
self.assertEqual(self.panel4d._get_axis_number('minor'), 3)
def test_get_axis_name(self):
self.assertEqual(self.panel4d._get_axis_name(0), 'labels')
self.assertEqual(self.panel4d._get_axis_name(1), 'items')
self.assertEqual(self.panel4d._get_axis_name(2), 'major_axis')
self.assertEqual(self.panel4d._get_axis_name(3), 'minor_axis')
def test_arith(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self._test_op(self.panel4d, operator.add)
self._test_op(self.panel4d, operator.sub)
self._test_op(self.panel4d, operator.mul)
self._test_op(self.panel4d, operator.truediv)
self._test_op(self.panel4d, operator.floordiv)
self._test_op(self.panel4d, operator.pow)
self._test_op(self.panel4d, lambda x, y: y + x)
self._test_op(self.panel4d, lambda x, y: y - x)
self._test_op(self.panel4d, lambda x, y: y * x)
self._test_op(self.panel4d, lambda x, y: y / x)
self._test_op(self.panel4d, lambda x, y: y ** x)
self.assertRaises(Exception, self.panel4d.__add__,
self.panel4d['l1'])
@staticmethod
def _test_op(panel4d, op):
result = op(panel4d, 1)
assert_panel_equal(result['l1'], op(panel4d['l1'], 1))
def test_keys(self):
tm.equalContents(list(self.panel4d.keys()), self.panel4d.labels)
def test_iteritems(self):
"""Test panel4d.iteritems()"""
self.assertEqual(len(list(self.panel4d.iteritems())),
len(self.panel4d.labels))
def test_combinePanel4d(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = self.panel4d.add(self.panel4d)
self.assert_panel4d_equal(result, self.panel4d * 2)
def test_neg(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assert_panel4d_equal(-self.panel4d, self.panel4d * -1)
def test_select(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
p = self.panel4d
# select labels
result = p.select(lambda x: x in ('l1', 'l3'), axis='labels')
expected = p.reindex(labels=['l1', 'l3'])
self.assert_panel4d_equal(result, expected)
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel4d_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15),
axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel4d_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=3)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel4d_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo',), axis='items')
self.assert_panel4d_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
def test_abs(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = self.panel4d.abs()
expected = np.abs(self.panel4d)
self.assert_panel4d_equal(result, expected)
p = self.panel4d['l1']
result = p.abs()
expected = np.abs(p)
assert_panel_equal(result, expected)
df = p['ItemA']
result = df.abs()
expected = np.abs(df)
assert_frame_equal(result, expected)
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel4d.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = self.panel4d['l2']
result = self.panel4d.pop('l2')
assert_panel_equal(expected, result)
self.assertNotIn('l2', self.panel4d.labels)
del self.panel4d['l3']
self.assertNotIn('l3', self.panel4d.labels)
self.assertRaises(Exception, self.panel4d.__delitem__, 'l3')
values = np.empty((4, 4, 4, 4))
values[0] = 0
values[1] = 1
values[2] = 2
values[3] = 3
panel4d = Panel4D(values, lrange(4), lrange(4),
lrange(4), lrange(4))
# did we delete the right row?
panel4dc = panel4d.copy()
del panel4dc[0]
assert_panel_equal(panel4dc[1], panel4d[1])
assert_panel_equal(panel4dc[2], panel4d[2])
assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[1]
assert_panel_equal(panel4dc[0], panel4d[0])
assert_panel_equal(panel4dc[2], panel4d[2])
assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[2]
assert_panel_equal(panel4dc[1], panel4d[1])
assert_panel_equal(panel4dc[0], panel4d[0])
assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[3]
assert_panel_equal(panel4dc[1], panel4d[1])
assert_panel_equal(panel4dc[2], panel4d[2])
assert_panel_equal(panel4dc[0], panel4d[0])
def test_setitem(self):
# LongPanel with one item
# lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
# self.assertRaises(Exception, self.panel.__setitem__,
# 'ItemE', lp)
# Panel
p = Panel(dict(
ItemA=self.panel4d['l1']['ItemA'][2:].filter(items=['A', 'B'])))
self.panel4d['l4'] = p
self.panel4d['l5'] = p
p2 = self.panel4d['l4']
assert_panel_equal(p, p2.reindex(items=p.items,
major_axis=p.major_axis,
minor_axis=p.minor_axis))
# scalar
self.panel4d['lG'] = 1
self.panel4d['lE'] = True
self.assertEqual(self.panel4d['lG'].values.dtype, np.int64)
self.assertEqual(self.panel4d['lE'].values.dtype, np.bool_)
# object dtype
self.panel4d['lQ'] = 'foo'
self.assertEqual(self.panel4d['lQ'].values.dtype, np.object_)
# boolean dtype
self.panel4d['lP'] = self.panel4d['l1'] > 0
self.assertEqual(self.panel4d['lP'].values.dtype, np.bool_)
def test_setitem_by_indexer(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Panel
panel4dc = self.panel4d.copy()
p = panel4dc.iloc[0]
def func():
self.panel4d.iloc[0] = p
self.assertRaises(NotImplementedError, func)
# DataFrame
panel4dc = self.panel4d.copy()
df = panel4dc.iloc[0, 0]
df.iloc[:] = 1
panel4dc.iloc[0, 0] = df
self.assertTrue((panel4dc.iloc[0, 0].values == 1).all())
# Series
panel4dc = self.panel4d.copy()
s = panel4dc.iloc[0, 0, :, 0]
s.iloc[:] = 1
panel4dc.iloc[0, 0, :, 0] = s
self.assertTrue((panel4dc.iloc[0, 0, :, 0].values == 1).all())
# scalar
panel4dc = self.panel4d.copy()
panel4dc.iloc[0] = 1
panel4dc.iloc[1] = True
panel4dc.iloc[2] = 'foo'
self.assertTrue((panel4dc.iloc[0].values == 1).all())
self.assertTrue(panel4dc.iloc[1].values.all())
self.assertTrue((panel4dc.iloc[2].values == 'foo').all())
def test_setitem_by_indexer_mixed_type(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH 8702
self.panel4d['foo'] = 'bar'
# scalar
panel4dc = self.panel4d.copy()
panel4dc.iloc[0] = 1
panel4dc.iloc[1] = True
panel4dc.iloc[2] = 'foo'
self.assertTrue((panel4dc.iloc[0].values == 1).all())
self.assertTrue(panel4dc.iloc[1].values.all())
self.assertTrue((panel4dc.iloc[2].values == 'foo').all())
def test_comparisons(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
p1 = tm.makePanel4D()
p2 = tm.makePanel4D()
tp = p1.reindex(labels=p1.labels.tolist() + ['foo'])
p = p1[p1.labels[0]]
def test_comp(func):
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, p)
result3 = func(self.panel4d, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel4d.values, 0))
with np.errstate(invalid='ignore'):
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_major_xs(self):
ref = self.panel4d['l1']['ItemA']
idx = self.panel4d.major_axis[5]
xs = self.panel4d.major_xs(idx)
assert_series_equal(xs['l1'].T['ItemA'],
ref.xs(idx), check_names=False)
# not contained
idx = self.panel4d.major_axis[0] - BDay()
self.assertRaises(Exception, self.panel4d.major_xs, idx)
def test_major_xs_mixed(self):
self.panel4d['l4'] = 'foo'
xs = self.panel4d.major_xs(self.panel4d.major_axis[0])
self.assertEqual(xs['l1']['A'].dtype, np.float64)
self.assertEqual(xs['l4']['A'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel4d['l1']['ItemA']
idx = self.panel4d.minor_axis[1]
xs = self.panel4d.minor_xs(idx)
assert_series_equal(xs['l1'].T['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel4d.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel4d['l4'] = 'foo'
xs = self.panel4d.minor_xs('D')
self.assertEqual(xs['l1'].T['ItemA'].dtype, np.float64)
self.assertEqual(xs['l4'].T['ItemA'].dtype, np.object_)
def test_xs(self):
l1 = self.panel4d.xs('l1', axis=0)
expected = self.panel4d['l1']
assert_panel_equal(l1, expected)
# view if possible
l1_view = self.panel4d.xs('l1', axis=0)
l1_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel4d['l1'].values).all())
# mixed-type
self.panel4d['strings'] = 'foo'
result = self.panel4d.xs('D', axis=3)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
panel4d = self.panel4d
labels = panel4d.labels[[1, 0]]
items = panel4d.items[[1, 0]]
dates = panel4d.major_axis[::2]
cols = ['D', 'C', 'F']
# all 4 specified
assert_panel4d_equal(panel4d.ix[labels, items, dates, cols],
panel4d.reindex(labels=labels, items=items,
major=dates, minor=cols))
# 3 specified
assert_panel4d_equal(panel4d.ix[:, items, dates, cols],
panel4d.reindex(items=items, major=dates,
minor=cols))
# 2 specified
assert_panel4d_equal(panel4d.ix[:, :, dates, cols],
panel4d.reindex(major=dates, minor=cols))
assert_panel4d_equal(panel4d.ix[:, items, :, cols],
panel4d.reindex(items=items, minor=cols))
assert_panel4d_equal(panel4d.ix[:, items, dates, :],
panel4d.reindex(items=items, major=dates))
# only 1
assert_panel4d_equal(panel4d.ix[:, items, :, :],
panel4d.reindex(items=items))
assert_panel4d_equal(panel4d.ix[:, :, dates, :],
panel4d.reindex(major=dates))
assert_panel4d_equal(panel4d.ix[:, :, :, cols],
panel4d.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
pass
def test_get_value(self):
for label in self.panel4d.labels:
for item in self.panel4d.items:
for mjr in self.panel4d.major_axis[::2]:
for mnr in self.panel4d.minor_axis:
result = self.panel4d.get_value(
label, item, mjr, mnr)
expected = self.panel4d[label][item][mnr][mjr]
assert_almost_equal(result, expected)
def test_set_value(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
for label in self.panel4d.labels:
for item in self.panel4d.items:
for mjr in self.panel4d.major_axis[::2]:
for mnr in self.panel4d.minor_axis:
self.panel4d.set_value(label, item, mjr, mnr, 1.)
assert_almost_equal(
self.panel4d[label][item][mnr][mjr], 1.)
res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5)
self.assertTrue(is_float_dtype(res3['l4'].values))
# resize
res = self.panel4d.set_value('l4', 'ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel4D)
self.assertIsNot(res, self.panel4d)
self.assertEqual(res.get_value('l4', 'ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5)
self.assertTrue(is_float_dtype(res3['l4'].values))
class TestPanel4d(tm.TestCase, CheckIndexing, SafeForSparse,
SafeForLongAndSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel4d_equal(cls, x, y):
assert_panel4d_equal(x, y)
def setUp(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.panel4d = tm.makePanel4D(nper=8)
add_nans(self.panel4d)
def test_constructor(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
panel4d = Panel4D(self.panel4d._data)
self.assertIs(panel4d._data, self.panel4d._data)
panel4d = Panel4D(self.panel4d._data, copy=True)
self.assertIsNot(panel4d._data, self.panel4d._data)
assert_panel4d_equal(panel4d, self.panel4d)
vals = self.panel4d.values
# no copy
panel4d = Panel4D(vals)
self.assertIs(panel4d.values, vals)
# copy
panel4d = Panel4D(vals, copy=True)
self.assertIsNot(panel4d.values, vals)
# GH #8285, test when scalar data is used to construct a Panel4D
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
panel4d = Panel4D(val, labels=range(2), items=range(
3), major_axis=range(4), minor_axis=range(5))
vals = np.empty((2, 3, 4, 5), dtype=dtype)
vals.fill(val)
expected = Panel4D(vals, dtype=dtype)
assert_panel4d_equal(panel4d, expected)
# test the case when dtype is passed
panel4d = Panel4D(1, labels=range(2), items=range(
3), major_axis=range(4), minor_axis=range(5), dtype='float32')
vals = np.empty((2, 3, 4, 5), dtype='float32')
vals.fill(1)
expected = Panel4D(vals, dtype='float32')
assert_panel4d_equal(panel4d, expected)
def test_constructor_cast(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
zero_filled = self.panel4d.fillna(0)
casted = Panel4D(zero_filled._data, dtype=int)
casted2 = Panel4D(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel4D(zero_filled._data, dtype=np.int32)
casted2 = Panel4D(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_consolidate(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertTrue(self.panel4d._data.is_consolidated())
self.panel4d['foo'] = 1.
self.assertFalse(self.panel4d._data.is_consolidated())
panel4d = self.panel4d.consolidate()
self.assertTrue(panel4d._data.is_consolidated())
def test_ctor_dict(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
l1 = self.panel4d['l1']
l2 = self.panel4d['l2']
d = {'A': l1, 'B': l2.ix[['ItemB'], :, :]}
panel4d = Panel4D(d)
assert_panel_equal(panel4d['A'], self.panel4d['l1'])
assert_frame_equal(panel4d.ix['B', 'ItemB', :, :],
self.panel4d.ix['l2', ['ItemB'], :, :]['ItemB'])
def test_constructor_dict_mixed(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
data = dict((k, v.values) for k, v in self.panel4d.iteritems())
result = Panel4D(data)
exp_major = Index(np.arange(len(self.panel4d.major_axis)))
self.assert_index_equal(result.major_axis, exp_major)
result = Panel4D(data,
labels=self.panel4d.labels,
items=self.panel4d.items,
major_axis=self.panel4d.major_axis,
minor_axis=self.panel4d.minor_axis)
assert_panel4d_equal(result, self.panel4d)
data['l2'] = self.panel4d['l2']
result = Panel4D(data)
assert_panel4d_equal(result, self.panel4d)
# corner, blow up
data['l2'] = data['l2']['ItemB']
self.assertRaises(Exception, Panel4D, data)
data['l2'] = self.panel4d['l2'].values[:, :, :-1]
self.assertRaises(Exception, Panel4D, data)
def test_constructor_resize(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
data = self.panel4d._data
labels = self.panel4d.labels[:-1]
items = self.panel4d.items[:-1]
major = self.panel4d.major_axis[:-1]
minor = self.panel4d.minor_axis[:-1]
result = Panel4D(data, labels=labels, items=items,
major_axis=major, minor_axis=minor)
expected = self.panel4d.reindex(
labels=labels, items=items, major=major, minor=minor)
assert_panel4d_equal(result, expected)
result = Panel4D(data, items=items, major_axis=major)
expected = self.panel4d.reindex(items=items, major=major)
assert_panel4d_equal(result, expected)
result = Panel4D(data, items=items)
expected = self.panel4d.reindex(items=items)
assert_panel4d_equal(result, expected)
result = Panel4D(data, minor_axis=minor)
expected = self.panel4d.reindex(minor=minor)
assert_panel4d_equal(result, expected)
def test_conform(self):
p = self.panel4d['l1'].filter(items=['ItemA', 'ItemB'])
conformed = self.panel4d.conform(p)
tm.assert_index_equal(conformed.items, self.panel4d.labels)
tm.assert_index_equal(conformed.major_axis, self.panel4d.major_axis)
tm.assert_index_equal(conformed.minor_axis, self.panel4d.minor_axis)
def test_reindex(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
ref = self.panel4d['l2']
# labels
result = self.panel4d.reindex(labels=['l1', 'l2'])
assert_panel_equal(result['l2'], ref)
# items
result = self.panel4d.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['l2']['ItemB'], ref['ItemB'])
# major
new_major = list(self.panel4d.major_axis[:10])
result = self.panel4d.reindex(major=new_major)
assert_frame_equal(
result['l2']['ItemB'], ref['ItemB'].reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel4d.reindex,
major_axis=new_major, major=new_major)
# minor
new_minor = list(self.panel4d.minor_axis[:2])
result = self.panel4d.reindex(minor=new_minor)
assert_frame_equal(
result['l2']['ItemB'], ref['ItemB'].reindex(columns=new_minor))
result = self.panel4d.reindex(labels=self.panel4d.labels,
items=self.panel4d.items,
major=self.panel4d.major_axis,
minor=self.panel4d.minor_axis)
# don't necessarily copy
result = self.panel4d.reindex()
assert_panel4d_equal(result, self.panel4d)
self.assertFalse(result is self.panel4d)
# with filling
smaller_major = self.panel4d.major_axis[::5]
smaller = self.panel4d.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel4d.major_axis,
method='pad')
assert_panel_equal(larger.ix[:, :, self.panel4d.major_axis[1], :],
smaller.ix[:, :, smaller_major[0], :])
# don't necessarily copy
result = self.panel4d.reindex(
major=self.panel4d.major_axis, copy=False)
assert_panel4d_equal(result, self.panel4d)
self.assertTrue(result is self.panel4d)
def test_not_hashable(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
p4D_empty = Panel4D()
self.assertRaises(TypeError, hash, p4D_empty)
self.assertRaises(TypeError, hash, self.panel4d)
def test_reindex_like(self):
# reindex_like
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
smaller = self.panel4d.reindex(labels=self.panel4d.labels[:-1],
items=self.panel4d.items[:-1],
major=self.panel4d.major_axis[:-1],
minor=self.panel4d.minor_axis[:-1])
smaller_like = self.panel4d.reindex_like(smaller)
assert_panel4d_equal(smaller, smaller_like)
def test_sort_index(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
import random
rlabels = list(self.panel4d.labels)
ritems = list(self.panel4d.items)
rmajor = list(self.panel4d.major_axis)
rminor = list(self.panel4d.minor_axis)
random.shuffle(rlabels)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel4d.reindex(labels=rlabels)
sorted_panel4d = random_order.sort_index(axis=0)
assert_panel4d_equal(sorted_panel4d, self.panel4d)
def test_fillna(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertFalse(np.isfinite(self.panel4d.values).all())
filled = self.panel4d.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
self.assertRaises(NotImplementedError,
self.panel4d.fillna, method='pad')
def test_swapaxes(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = self.panel4d.swapaxes('labels', 'items')
self.assertIs(result.items, self.panel4d.labels)
result = self.panel4d.swapaxes('labels', 'minor')
self.assertIs(result.labels, self.panel4d.minor_axis)
result = self.panel4d.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel4d.minor_axis)
result = self.panel4d.swapaxes('items', 'major')
self.assertIs(result.items, self.panel4d.major_axis)
result = self.panel4d.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel4d.minor_axis)
# this should also work
result = self.panel4d.swapaxes(0, 1)
self.assertIs(result.labels, self.panel4d.items)
# this works, but return a copy
result = self.panel4d.swapaxes('items', 'items')
assert_panel4d_equal(self.panel4d, result)
self.assertNotEqual(id(self.panel4d), id(result))
def test_update(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
p4d = Panel4D([[[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]]])
other = Panel4D([[[[3.6, 2., np.nan]],
[[np.nan, np.nan, 7]]]])
p4d.update(other)
expected = Panel4D([[[[3.6, 2, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 7],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]]])
assert_panel4d_equal(p4d, expected)
def test_dtypes(self):
result = self.panel4d.dtypes
expected = Series(np.dtype('float64'), index=self.panel4d.labels)
assert_series_equal(result, expected)
def test_repr_empty(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
empty = Panel4D()
repr(empty)
def test_rename(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
mapper = {'l1': 'foo',
'l2': 'bar',
'l3': 'baz'}
renamed = self.panel4d.rename_axis(mapper, axis=0)
exp = Index(['foo', 'bar', 'baz'])
self.assert_index_equal(renamed.labels, exp)
renamed = self.panel4d.rename_axis(str.lower, axis=3)
exp = Index(['a', 'b', 'c', 'd'])
self.assert_index_equal(renamed.minor_axis, exp)
# don't copy
renamed_nocopy = self.panel4d.rename_axis(mapper,
axis=0,
copy=False)
renamed_nocopy['foo'] = 3.
self.assertTrue((self.panel4d['l1'].values == 3).all())
def test_get_attr(self):
assert_panel_equal(self.panel4d['l1'], self.panel4d.l1)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
CIFASIS/pylearn2 | pylearn2/utils/image.py | 39 | 18841 | """
Utility functions for working with images.
"""
import logging
import numpy as np
plt = None
axes = None
from theano.compat.six.moves import xrange
from theano.compat.six import string_types
import warnings
try:
import matplotlib.pyplot as plt
import matplotlib.axes
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
import os
try:
from PIL import Image
except ImportError:
Image = None
from pylearn2.utils import string_utils as string
from pylearn2.utils.exc import reraise_as
from tempfile import mkstemp
from multiprocessing import Process
import subprocess
logger = logging.getLogger(__name__)
def ensure_Image():
"""Makes sure Image has been imported from PIL"""
global Image
if Image is None:
raise RuntimeError("You are trying to use PIL-dependent functionality"
" but don't have PIL installed.")
def imview(*args, **kwargs):
"""
A matplotlib-based image viewer command,
wrapping `matplotlib.pyplot.imshow` but behaving more
sensibly.
Parameters
----------
figure : TODO
TODO: write parameters section using decorators to inherit
the matplotlib docstring
Notes
-----
Parameters are identical to `matplotlib.pyplot.imshow`
but this behaves somewhat differently:
* By default, it creates a new figure (unless a
`figure` keyword argument is supplied.
* It modifies the axes of that figure to use the
full frame, without ticks or tick labels.
* It turns on `nearest` interpolation by default
(i.e., it does not antialias pixel data). This
can be overridden with the `interpolation`
argument as in `imshow`.
All other arguments and keyword arguments are passed
on to `imshow`.`
"""
if 'figure' not in kwargs:
f = plt.figure()
else:
f = kwargs['figure']
new_ax = matplotlib.axes.Axes(f,
[0, 0, 1, 1],
xticks=[],
yticks=[],
frame_on=False)
f.delaxes(f.gca())
f.add_axes(new_ax)
if len(args) < 5 and 'interpolation' not in kwargs:
kwargs['interpolation'] = 'nearest'
plt.imshow(*args, **kwargs)
def imview_async(*args, **kwargs):
"""
A version of `imview` that forks a separate process and
immediately shows the image.
Parameters
----------
window_title : str
TODO: writeme with decorators to inherit the other imviews'
docstrings
Notes
-----
Supports the `window_title` keyword argument to cope with
the title always being 'Figure 1'.
Returns the `multiprocessing.Process` handle.
"""
if 'figure' in kwargs:
raise ValueError("passing a figure argument not supported")
def fork_image_viewer():
f = plt.figure()
kwargs['figure'] = f
imview(*args, **kwargs)
if 'window_title' in kwargs:
f.set_window_title(kwargs['window_title'])
plt.show()
p = Process(None, fork_image_viewer)
p.start()
return p
def show(image):
"""
.. todo::
WRITEME
Parameters
----------
image : PIL Image object or ndarray
If ndarray, integer formats are assumed to use 0-255
and float formats are assumed to use 0-1
"""
viewer_command = string.preprocess('${PYLEARN2_VIEWER_COMMAND}')
if viewer_command == 'inline':
return imview(image)
if hasattr(image, '__array__'):
# do some shape checking because PIL just raises a tuple indexing error
# that doesn't make it very clear what the problem is
if len(image.shape) < 2 or len(image.shape) > 3:
raise ValueError('image must have either 2 or 3 dimensions but its'
' shape is ' + str(image.shape))
# The below is a temporary workaround that prevents us from crashing
# 3rd party image viewers such as eog by writing out overly large
# images.
# In the long run we should determine if this is a bug in PIL when
# producing
# such images or a bug in eog and determine a proper fix.
# Since this is hopefully just a short term workaround the
# constants below are not included in the interface to the
# function, so that 3rd party code won't start passing them.
max_height = 4096
max_width = 4096
# Display separate warnings for each direction, since it's
# common to crop only one.
if image.shape[0] > max_height:
image = image[0:max_height, :, :]
warnings.warn("Cropping image to smaller height to avoid crashing "
"the viewer program.")
if image.shape[0] > max_width:
image = image[:, 0:max_width, :]
warnings.warn("Cropping the image to a smaller width to avoid "
"crashing the viewer program.")
# This ends the workaround
if image.dtype == 'int8':
image = np.cast['uint8'](image)
elif str(image.dtype).startswith('float'):
# don't use *=, we don't want to modify the input array
image = image * 255.
image = np.cast['uint8'](image)
# PIL is too stupid to handle single-channel arrays
if len(image.shape) == 3 and image.shape[2] == 1:
image = image[:, :, 0]
try:
ensure_Image()
image = Image.fromarray(image)
except TypeError:
reraise_as(TypeError("PIL issued TypeError on ndarray of shape " +
str(image.shape) + " and dtype " +
str(image.dtype)))
# Create a temporary file with the suffix '.png'.
fd, name = mkstemp(suffix='.png')
os.close(fd)
# Note:
# Although we can use tempfile.NamedTemporaryFile() to create
# a temporary file, the function should be used with care.
#
# In Python earlier than 2.7, a temporary file created by the
# function will be deleted just after the file is closed.
# We can re-use the name of the temporary file, but there is an
# instant where a file with the name does not exist in the file
# system before we re-use the name. This may cause a race
# condition.
#
# In Python 2.7 or later, tempfile.NamedTemporaryFile() has
# the 'delete' argument which can control whether a temporary
# file will be automatically deleted or not. With the argument,
# the above race condition can be avoided.
#
image.save(name)
if os.name == 'nt':
subprocess.Popen(viewer_command + ' ' + name + ' && del ' + name,
shell=True)
else:
subprocess.Popen(viewer_command + ' ' + name + ' ; rm ' + name,
shell=True)
def pil_from_ndarray(ndarray):
"""
Converts an ndarray to a PIL image.
Parameters
----------
ndarray : ndarray
An ndarray containing an image.
Returns
-------
pil : PIL Image
A PIL Image containing the image.
"""
try:
if ndarray.dtype == 'float32' or ndarray.dtype == 'float64':
assert ndarray.min() >= 0.0
assert ndarray.max() <= 1.0
ndarray = np.cast['uint8'](ndarray * 255)
if len(ndarray.shape) == 3 and ndarray.shape[2] == 1:
ndarray = ndarray[:, :, 0]
ensure_Image()
rval = Image.fromarray(ndarray)
return rval
except Exception as e:
logger.exception('original exception: ')
logger.exception(e)
logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype))
logger.exception('ndarray.shape: {0}'.format(ndarray.shape))
raise
assert False
def ndarray_from_pil(pil, dtype='uint8'):
"""
Converts a PIL Image to an ndarray.
Parameters
----------
pil : PIL Image
An image represented as a PIL Image object
dtype : str
The dtype of ndarray to create
Returns
-------
ndarray : ndarray
The image as an ndarray.
"""
rval = np.asarray(pil)
if dtype != rval.dtype:
rval = np.cast[dtype](rval)
if str(dtype).startswith('float'):
rval /= 255.
if len(rval.shape) == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
return rval
def rescale(image, shape):
"""
Scales image to be no larger than shape. PIL might give you
unexpected results beyond that.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
i = pil_from_ndarray(image)
ensure_Image()
i.thumbnail([shape[1], shape[0]], Image.ANTIALIAS)
rval = ndarray_from_pil(i, dtype=image.dtype)
return rval
resize = rescale
def fit_inside(image, shape):
"""
Scales image down to fit inside shape preserves proportions of image
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
if image.shape[0] <= shape[0] and image.shape[1] <= shape[1]:
return image.copy()
row_ratio = float(image.shape[0]) / float(shape[0])
col_ratio = float(image.shape[1]) / float(shape[1])
if row_ratio > col_ratio:
target_shape = [shape[0], min(image.shape[1] / row_ratio, shape[1])]
else:
target_shape = [min(image.shape[0] / col_ratio, shape[0]), shape[1]]
assert target_shape[0] <= shape[0]
assert target_shape[1] <= shape[1]
assert target_shape[0] == shape[0] or target_shape[1] == shape[1]
rval = rescale(image, target_shape)
return rval
def letterbox(image, shape):
"""
Pads image with black letterboxing to bring image.shape up to shape
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
assert image.shape[0] <= shape[0]
assert image.shape[1] <= shape[1]
if image.shape[0] == shape[0] and image.shape[1] == shape[1]:
return image.copy()
rval = np.zeros((shape[0], shape[1], image.shape[2]), dtype=image.dtype)
rstart = (shape[0] - image.shape[0]) / 2
cstart = (shape[1] - image.shape[1]) / 2
rend = rstart + image.shape[0]
cend = cstart + image.shape[1]
rval[rstart:rend, cstart:cend] = image
return rval
def make_letterboxed_thumbnail(image, shape):
"""
Scales image down to shape. Preserves proportions of image, introduces
black letterboxing if necessary.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3
assert len(shape) == 2
shrunk = fit_inside(image, shape)
letterboxed = letterbox(shrunk, shape)
return letterboxed
def load(filepath, rescale_image=True, dtype='float64'):
"""
Load an image from a file.
Parameters
----------
filepath : str
Path to the image file to load
rescale_image : bool
Default value: True
If True, returned images have pixel values in [0, 1]. Otherwise,
values are in [0, 255].
dtype: str
The dtype to use for the returned value
Returns
-------
img : numpy ndarray
An array containing the image that was in the file.
"""
assert isinstance(filepath, string_types)
if not rescale_image and dtype == 'uint8':
ensure_Image()
rval = np.asarray(Image.open(filepath))
assert rval.dtype == 'uint8'
return rval
s = 1.0
if rescale_image:
s = 255.
try:
ensure_Image()
rval = Image.open(filepath)
except Exception:
reraise_as(Exception("Could not open " + filepath))
numpy_rval = np.array(rval)
msg = ("Tried to load an image, got an array with %d"
" dimensions. Expected 2 or 3."
"This may indicate a mildly corrupted image file. Try "
"converting it to a different image format with a different "
"editor like gimp or imagemagic. Sometimes these programs are "
"more robust to minor corruption than PIL and will emit a "
"correctly formatted image in the new format.")
if numpy_rval.ndim not in [2, 3]:
logger.error(dir(rval))
logger.error(rval)
logger.error(rval.size)
rval.show()
raise AssertionError(msg % numpy_rval.ndim)
rval = numpy_rval
rval = np.cast[dtype](rval) / s
if rval.ndim == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
if rval.ndim != 3:
raise AssertionError("Something went wrong opening " +
filepath + '. Resulting shape is ' +
str(rval.shape) +
" (it's meant to have 3 dimensions by now)")
return rval
def save(filepath, ndarray):
"""
Saves an image to a file.
Parameters
----------
filepath : str
The path to write the file to.
ndarray : ndarray
An array containing the image to be saved.
"""
pil_from_ndarray(ndarray).save(filepath)
def scale_to_unit_interval(ndar, eps=1e-8):
"""
Scales all values in the ndarray ndar to be between 0 and 1
Parameters
----------
ndar : WRITEME
eps : WRITEME
Returns
-------
WRITEME
"""
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
Parameters
----------
x : numpy.ndarray
2-d ndarray or 4 tuple of 2-d ndarrays or None for channels,
in which every row is a flattened image.
shape : 2-tuple of ints
The first component is the height of each image,
the second component is the width.
tile_shape : 2-tuple of ints
The number of images to tile in (row, columns) form.
scale_rows_to_unit_interval : bool
Whether or not the values need to be before being plotted to [0, 1].
output_pixel_vals : bool
Whether or not the output should be pixel values (int8) or floats.
Returns
-------
y : 2d-ndarray
The return value has the same dtype as X, and is suitable for
viewing as an image with PIL.Image.fromarray.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
# colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape, dtype=dt) + \
channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
if __name__ == '__main__':
black = np.zeros((50, 50, 3), dtype='uint8')
red = black.copy()
red[:, :, 0] = 255
green = black.copy()
green[:, :, 1] = 255
show(black)
show(green)
show(red)
| bsd-3-clause |
jpinsonault/android_sensor_logger | python_scripts/cluster_light.py | 1 | 2894 | import numpy as np
import argparse
from pprint import pprint
from sklearn import mixture
from sklearn import datasets
import matplotlib.pyplot as plt
from sklearn import decomposition
from LogEntry import LogEntry
from LogEntry import db
from datetime import datetime
from matplotlib.dates import DayLocator, HourLocator, DateFormatter, drange
from numpy import arange
args = None
format_string = '%H:%M:%S %m/%d/%Y'
def parse_args():
global args
parser = argparse.ArgumentParser()
args = parser.parse_args()
def main():
g = mixture.GMM(n_components=4)
log_entries = load_light()
light_data = [min(row.light_reading, 120) for row in log_entries]
timestamps = [datetime.strptime(row.timestamp, format_string) for row in log_entries]
g.fit(light_data)
predictions = predict(g, light_data)
light_dict = {}
inside = bin_by_hour(timestamps, predictions, [0,1])
outside = bin_by_hour(timestamps, predictions, [2,3])
pprint(inside)
pprint(outside)
def plot_light_data(timestamps, predictions):
fig, ax = plt.subplots()
ax.plot_date(timestamps, predictions, 'b')
ax.xaxis.set_minor_locator(HourLocator(arange(0,25,6)))
ax.xaxis.set_minor_formatter(DateFormatter('%H'))
ax.xaxis.set_major_locator(DayLocator())
ax.xaxis.set_major_formatter(DateFormatter('%a'))
ax.fmt_xdata = DateFormatter('%H:%M:%S')
fig.autofmt_xdate()
plt.show()
def bin_by_hour(timestamps, predictions, clusters):
filtered = [timestamps[index] for index, entry in enumerate(timestamps) if predictions[index] in clusters]
buckets = {hour: 0 for hour in range(24)}
for time in filtered:
hour = time.hour
buckets[hour] = buckets.get(hour, 0) + 1
return buckets
def predict(gmm, data):
results = gmm.predict(data)
smoothed = smooth_results(results)
converter = make_converter(gmm, smoothed)
return [converter[value] for value in smoothed]
def load_light():
light_data = LogEntry.select()
return sorted(light_data, key=lambda row: datetime.strptime(row.timestamp, format_string))
def smooth_results(data):
new_data = []
for index in range(len(data)):
new_data.append(get_most_common(data, index))
return new_data
def make_converter(gmm, data):
converter = {}
means = [[index, value[0]] for index, value in enumerate(gmm.means_)]
for index, mean in enumerate(sorted(means, key=lambda means: means[1])):
converter[mean[0]] = index
return converter
def get_most_common(data, index):
window_size = 100
start = max(index - window_size, 0)
end = min(index + window_size, len(data))
buckets = {}
for value in data[start:end]:
buckets[value] = buckets.get(value, 0) + 1
return max(buckets.iterkeys(), key=(lambda key: buckets[key]))
if __name__ == '__main__':
main() | mit |
sgenoud/scikit-learn | sklearn/mixture/gmm.py | 1 | 28544 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import numpy as np
import warnings
from ..base import BaseEstimator
from ..utils import check_random_state, deprecated
from ..utils.extmath import logsumexp
from .. import cluster
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covars : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
from scipy import linalg
U, s, V = linalg.svd(covar)
sqrtS = np.diag(np.sqrt(s))
sqrt_covar = np.dot(U, np.dot(sqrtS, V))
rand = np.dot(sqrt_covar, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
thresh : float, optional
Convergence threshold.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
`weights_` : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
`means_` : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
`covars_` : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
`converged_` : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Ininite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=1e-2, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc'):
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on `cvtype`::
(`n_states`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def eval(self, X):
"""Evaluate the model on data
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob: array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('the shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(
X, self.means_, self.covars_, self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
@deprecated("""will be removed in v0.12;
use the score or predict method instead, depending on the question""")
def decode(self, X):
"""Find most likely mixture components for each point in X.
DEPRECATED IN VERSION 0.10; WILL BE REMOVED IN VERSION 0.12
use the score or predict method instead, depending on the question.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprobs : array_like, shape (n_samples,)
Log probability of each point in `obs` under the model.
components : array_like, shape (n_samples,)
Index of the most likelihod mixture components for each observation
"""
logprob, posteriors = self.eval(X)
return logprob, posteriors.argmax(axis=1)
def score(self, X):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.eval(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.eval(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.eval(X)
return responsibilities
@deprecated("""will be removed in v0.12;
use the score or predict method instead, depending on the question""")
def rvs(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
DEPRECATED IN VERSION 0.11; WILL BE REMOVED IN VERSION 0.12
use sample instead
"""
return self.sample(n_samples, random_state)
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in xrange(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit(self, X, **kwargs):
"""Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating the
GMM object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
## initialization step
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
if kwargs:
warnings.warn("Setting parameters in the 'fit' method is"
"deprecated. Set it on initialization instead.",
DeprecationWarning)
# initialisations for in case the user still adds parameters to fit
# so things don't break
if 'n_iter' in kwargs:
self.n_iter = kwargs['n_iter']
if 'n_init' in kwargs:
if kwargs['n_init'] < 1:
raise ValueError('GMM estimation requires at least one run')
else:
self.n_init = kwargs['n_init']
if 'params' in kwargs:
self.params = kwargs['params']
if 'init_params' in kwargs:
self.init_params = kwargs['init_params']
max_log_prob = - np.infty
for _ in range(self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components).fit(X).cluster_centers_
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
# EM algorithms
log_likelihood = []
# reset self.converged_ to False
self.converged_ = False
for i in xrange(self.n_iter):
# Expectation step
curr_log_likelihood, responsibilities = self.eval(X)
log_likelihood.append(curr_log_likelihood.sum())
# Check for convergence.
if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2]) < \
self.thresh:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
# if the results are better, keep it
if self.n_iter:
if log_likelihood[-1] > max_log_prob:
max_log_prob = log_likelihood[-1]
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (- 2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
## some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
from scipy import linalg
n_samples, n_dim = X.shape
icv = linalg.pinv(covars)
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 0.1)
+ np.sum(X * np.dot(X, icv), 1)[:, np.newaxis]
- 2 * np.dot(np.dot(X, icv), means.T)
+ np.sum(means * np.dot(means, icv), 1))
return lpr
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
from scipy import linalg
import itertools
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(itertools.izip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + \
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape"
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in xrange(gmm.n_components):
post = responsibilities[:, c]
# Underflow Errors in doing post * X.T are not important
np.seterr(under='ignore')
avg_cv = np.dot(post * X.T, X) / (post.sum() + 10 * EPS)
mu = gmm.means_[c][np.newaxis]
cv[c] = (avg_cv - np.dot(mu.T, mu) + min_covar * np.eye(n_features))
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
n_features = X.shape[1]
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
return (avg_X2 - avg_means2 + min_covar * np.eye(n_features)) / X.shape[0]
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
ElDeveloper/qiime | qiime/colors.py | 15 | 24391 | #!/usr/bin/env python
# file colors.py
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2011, The QIIME Project" # consider project name
# remember to add yourself
__credits__ = ["Rob Knight", "Jesse Stombaugh", "Yoshiki Vazquez-Baeza"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jesse Stombaugh"
__email__ = "[email protected]"
"""Code for coloring series based on prefs file.
"""
from colorsys import rgb_to_hsv, hsv_to_rgb
from parse import parse_mapping_file, group_by_field, parse_taxa_summary_table
from numpy import array
from math import floor
import os
import re
from qiime.util import MissingFileError
from qiime.sort import natsort
def string_to_rgb(s):
"""Converts hex string to RGB"""
orig_s = s
s = s.strip()
if s.startswith('#'):
s = s[1:]
if not len(s) == 6:
raise ValueError("String %s doesn't look like a hex string" % orig_s)
return int(s[:2], 16), int(s[2:4], 16), int(s[4:], 16)
def rgb_tuple_to_hsv(rgb):
"""Converts rgb tuple to hsv on Mage's scale"""
rgb_0_to_1 = array(rgb) / 255.0
hsv = rgb_to_hsv(*tuple(rgb_0_to_1))
return hsv[0] * 360, hsv[1] * 100, hsv[2] * 100
def mage_hsv_tuple_to_rgb(hsv):
"""Converts hsv tuple on Mage scale to rgb on 0-255 scale"""
hsv_0_to_1 = hsv[0] / 360.0, hsv[1] / 100.0, hsv[2] / 100.0
rgb = hsv_to_rgb(*tuple(hsv_0_to_1))
return int(rgb[0] * 255), int(rgb[1] * 255), int(rgb[2] * 255)
class Color(object):
"""Stores a color object: name, HSV, ability to write as HTML or Mage.
Note: the reason we store as HSV, not RGB, is that you frequently want
to do gradient colors by hue going from e.g. white to blue, white to red,
etc. Unfortunately, in RGB, you can't specify _which_ white you have
in e.g. #FFFFFF, whereas to get the right gradient you need to be able
to specify that you want (0,0,100) or (180,0,100) or whatever. Hence
the colorspace gymnastics.
"""
def __init__(self, name, coords, colorspace='rgb'):
"""Returns new Color object. Init with name and coords as (R,G,B).
Can also initialize with coords as (H,S,V) or #aabbcc format.
"""
self.Name = name
if isinstance(coords, str): # assume is hex format
self.Coords = rgb_tuple_to_hsv(string_to_rgb(coords))
elif colorspace == 'rgb':
self.Coords = rgb_tuple_to_hsv(tuple(coords))
elif colorspace == 'hsv':
self.Coords = tuple(coords)
else:
raise ValueError(
"Unknown colorspace %s: valid values are rgb, hsv" %
colorspace)
def toRGB(self):
"""Returns self as r, g, b tuple."""
return mage_hsv_tuple_to_rgb(self.Coords)
def toMage(self):
"""Returns self as Mage/KiNG-format string"""
h, s, v = self.Coords
return '@hsvcolor {%s} %3.1f %3.1f %3.1f' % (self.Name, h, s, v)
def toHex(self):
"""Returns self as hex string."""
rgb = self.toRGB()
return ('#%02s%02s%02s' % (hex(rgb[0])[2:], hex(rgb[1])[2:],
hex(rgb[2])[2:])).replace(' ', '0')
def toInt(self):
"""Returns self as hex string."""
rgb = self.toHex()[1:]
return int(float.fromhex(rgb))
def __str__(self):
"""Return string representation of self"""
return str(self.Name) + ':' + self.toHex()
def color_dict_to_objects(d, colorspace='hsv'):
"""Converts color dict to dict of Color objects"""
result = {}
for k, v in d.items():
result[k] = Color(k, v, colorspace)
return result
# Note: these are all in Mage HSV colorspace
'''
These are the old colors
data_color_hsv = {
'aqua': (180, 100, 100),
'blue': (240,100,100),
'fuchsia': (300,100,100),
'gray': (300,0,50.2),
'green': (120,100,50.2),
'lime': (120,100,100),
'maroon': (0,100,50.2),
'olive': (60,100,50.2),
'purple': (300,100,50.2),
'red': (0,100,100),
'silver': (0, 0, 75.3),
'teal': (180,100,50.2),
'yellow': (60,100,100)
}
This is the old order
data_color_order = ['blue','lime','red','aqua','fuchsia','yellow','green', \
'maroon','teal','purple','olive','silver','gray']
'''
data_color_hsv = {
#'black1': (0,0,20),
'red1': (0, 100, 100),
'blue1': (240, 100, 100),
'orange1': (28, 98, 95),
'green1': (120, 100, 50.2),
'purple1': (302, 73, 57),
'yellow1': (60, 100, 100),
'cyan1': (184, 49, 96),
'pink1': (333, 37, 96),
'teal1': (178, 42, 63),
'brown1': (36, 89, 42),
'gray1': (0, 0, 50.2),
'lime': (123, 99, 96),
'red2': (14, 51, 97),
'blue2': (211, 42, 85),
'orange2': (32, 46, 99),
'green2': (142, 36, 79),
'purple2': (269, 29, 75),
'yellow2': (56, 40, 100),
#'black2': (303,100,24),
'gray2': (0, 0, 75.3),
#'teal2': (192,100,24),
'red3': (325, 100, 93),
'blue3': (197, 100, 100),
#'purple3': (271,43,36),
'brown2': (33, 45, 77),
'green3': (60, 100, 50.2),
'purple4': (264, 75, 100),
#'yellow3': (60,66,75),
#'blue4': (213,45,77),
'red4': (348, 31, 74),
'teal3': (180, 100, 50.2),
#'brown3': (60,100,28),
'red5': (0, 100, 50.2),
'green4': (81, 100, 26),
#'purple5': (240,100,41),
'orange3': (26, 100, 65)
#'brown4': (25,100,20),
#'red6': (17,100,63),
#'purple6':(272,100,44)
}
data_color_order = ['red1', 'blue1', 'orange1', 'green1', 'purple1', 'yellow1',
'cyan1', 'pink1', 'teal1', 'brown1', 'gray1', 'lime', 'red2', 'blue2',
'orange2', 'green2', 'purple2', 'yellow2', 'gray2', 'red3',
'blue3', 'brown2', 'green3', 'purple4',
'red4', 'teal3', 'red5', 'green4', 'orange3']
data_colors = color_dict_to_objects(data_color_hsv)
kinemage_colors = [
'hotpink',
'blue',
'lime',
'gold',
'red',
'sea',
'purple',
'green']
def iter_color_groups(mapping, prefs):
"""Iterates over color groups for each category given mapping file/prefs.
See get_group_colors for details of algorithm.
"""
# Iterate through prefs and color by given mapping labels
for key in natsort(prefs.keys()):
col_name = prefs[key]['column']
if 'colors' in prefs[key]:
if isinstance(prefs[key]['colors'], dict):
colors = prefs[key]['colors'].copy() # copy so we can mutate
else:
colors = prefs[key]['colors'][:]
else:
colors = {}
labelname = prefs[key]['column']
# Define groups and associate appropriate colors to each group
groups = group_by_field(mapping, col_name)
colors, data_colors, data_color_order = \
get_group_colors(groups, colors)
yield labelname, groups, colors, data_colors, data_color_order
def get_group_colors(groups, colors, data_colors=data_colors,
data_color_order=data_color_order):
"""Figures out group colors for a specific series based on prefs.
Algorithm is as follows:
- For each name, color pair we know about:
- Check if the name is one of the groups (exact match)
- If it isn't, assume it's a prefix and pull out all the matching groups
- If the color is just a string, set everything to the color with that
name
- Otherwise, assume that either it's a new color we're adding, or that
it's a range for gradient coloring.
- If it's a new color, create it and add it to added_data_colors.
- If it's a gradient, make up all the new colors and add them to
added_data_colors
The current method for gradient coloring of columns (should perhaps
replace with more general method) is to pass in any of the following:
'colors':(('white', (0,0,100)),('red',(0,100,100)))
makes gradient between white and red, applies to all samples
'colors':{'RK':(('white',(0,0,100)),('red',(0,100,100))),
'NF':(('white',(120,0,100)),('green',(120,100,100)))
}
pulls the combination samples starting with RK, colors with
first gradient, then pulls the combination samples starting
with NF, colors with the next gradient.
Return values are:
- colors: dict of {group_value:color_name}
- data_colors: dict of {color_name:color_object}
- data_color_order: order in which the data colors are used/written.
"""
added_data_colors = {}
if isinstance(colors, dict):
# assume we're getting some of the colors out of a dict
if colors.items() != []:
for k, v in sorted(colors.items()):
if k not in groups: # assume is prefix
k_matches = [g for g in groups if g.startswith(k)]
if isinstance(v, str): # just set everything to this color
for m in k_matches:
colors[m] = v
else: # assume is new color or range
first, second = v
if isinstance(first, str): # new named color?
if first not in data_colors:
added_data_colors[first] = Color(first, second)
for m in k_matches:
colors[m] = first
else: # new color range?
start_color, end_color = map(get_color,
[first, second])
num_colors = len(k_matches)
curr_data_colors = color_dict_to_objects(
make_color_dict(start_color,
start_hsv, end_color, end_hsv, num_colors))
curr_colors = {}
color_groups(k_matches, curr_colors,
natsort(curr_data_colors))
colors.update(curr_colors)
added_data_colors.update(curr_data_colors)
del colors[k]
elif not isinstance(v, str): # assume val is new color
color = get_color(v)
if color.Name not in data_colors:
added_data_colors[color.Name] = color
colors[k] = color.Name
# handle any leftover groups
color_groups(groups, colors, data_color_order)
# add new colors
data_colors.update(added_data_colors)
if added_data_colors != {}:
data_color_order.append(''.join(natsort(added_data_colors)))
else:
# handle case where no prefs is used
color_groups(groups, colors, data_color_order)
else:
# handle the case where colors is a tuple for gradients
start_color, end_color = map(get_color, colors)
start_hsv = start_color.Coords
end_hsv = end_color.Coords
num_colors = len(groups)
data_colors = color_dict_to_objects(
make_color_dict(start_color, start_hsv, end_color,
end_hsv, num_colors))
data_color_order = list(natsort(data_colors.keys()))
colors = {}
color_groups(groups, colors, data_color_order)
return colors, data_colors, data_color_order
def get_color(color, data_colors=data_colors):
"""Gets a color by looking up its name or initializing with name+data"""
if isinstance(color, str):
if color in data_colors:
return data_colors[color]
else:
raise ValueError("Color name %s in prefs not recognized" % color)
else:
name, coords = color
if isinstance(coords, str):
colorspace = 'rgb'
else:
colorspace = 'hsv'
return Color(name, coords, colorspace)
def color_groups(groups, colors, data_color_order):
"""Colors a set of groups in data_color_order, handling special colors.
Modifies colors in-place.
Cycles through data colors (i.e. wraps around when last color is reached).
"""
group_num = -1
for g in natsort(groups):
if g not in colors:
group_num += 1
if group_num == len(data_color_order):
group_num = 0
colors[g] = data_color_order[group_num]
def make_color_dict(start_name, start_hsv, end_name, end_hsv, n):
"""Makes dict of color gradient"""
colors = linear_gradient(start_hsv, end_hsv, n)
names = ['%sto%s%s_%s' % (start_name, end_name, n, i) for i in range(n)]
return dict(zip(names, colors))
def combine_map_label_cols(combinecolorby, mapping):
"""Merge two or more mapping columns into one column"""
combinedmapdata = array([''] * len(mapping), dtype='a100')
title = []
match = False
for p in range(len(combinecolorby)):
for i in range(len(mapping[0])):
if str(combinecolorby[p]) == str(mapping[0][i]):
match = True
for q in range(len(mapping)):
combinedmapdata[q] = combinedmapdata[q] + mapping[q][i]
break
else:
match = False
if not match:
raise ValueError(
'One of the columns you tried to combine does not exist!')
title.append(combinecolorby[p])
combinedmapdata[0] = '&&'.join(title)
for i in range(len(combinedmapdata)):
mapping[i].append(combinedmapdata[i])
return mapping
def process_colorby(colorby, data, color_prefs=None):
"""Parses the colorby option from the command line.
color_prefs is required if colorby is not passed.
"""
match = False
prefs = {}
mapping = data['map']
colorbydata = []
if colorby is None and color_prefs is None:
# if coloby option are prefs file not given, color by all categories
# in mapping file
colorbydata = mapping[0]
elif colorby and color_prefs:
# if both the colorby option and prefs file are given, use the categories
# from the colorby option with their appropriate colors in the prefs
# file
prefs_colorby = [color_prefs[i]['column'] for i in color_prefs]
cmd_colorby = colorby.strip().strip("'").split(',')
for i in range(len(cmd_colorby)):
for j in range(len(prefs_colorby)):
if cmd_colorby[i] == prefs_colorby[j]:
colorbydata.append(prefs_colorby[j])
match = True
break
else:
match = False
if not match:
colorbydata.append(cmd_colorby[i])
names = list(colorbydata)
elif colorby:
# if only the colorby option is passed
colorbydata = colorby.strip().strip("'").split(',')
else:
# if only the prefs file is passed
colorbydata = [color_prefs[i]['column'] for i in color_prefs]
names = list(color_prefs)
match = False
for j, col in enumerate(colorbydata):
key = str(col)
# transfer over old color data if it was present
if '&&' in col:
# Create an array using multiple columns from mapping file
combinecolorby = col.split('&&')
data['map'] = combine_map_label_cols(combinecolorby, mapping)
prefs[key] = {}
prefs[key]['column'] = '&&'.join(combinecolorby)
else:
# Color by only one column in mapping file
prefs[key] = {}
prefs[key]['column'] = col
if color_prefs:
for p in color_prefs:
if 'column' in color_prefs[p] and color_prefs[p]['column'] == col:
if 'colors' in color_prefs[p]:
prefs[key]['colors'] = color_prefs[p]['colors']
else:
prefs[key]['colors'] = (
('white', (0, 0, 100)), ('red', (0, 100, 100)))
match = True
break
else:
match = False
if not match:
prefs[key] = {}
prefs[key]['column'] = col
prefs[key]['colors'] = (
('white', (0, 0, 100)), ('red', (0, 100, 100)))
return prefs, data
def linear_gradient(start, end, nbins, eps=1e-10):
"""Makes linear color gradient from start to end, using nbins.
Returns list of (x, y, z) tuples in current colorspace.
eps is used to prevent the case where start and end are the same.
"""
start = array(start)
end = array(end)
result = []
n_minus_1 = max(float(nbins - 1), eps)
for i in range(nbins):
result.append(
list((start * (n_minus_1 - i) / n_minus_1) + (end * (i / n_minus_1))))
return result
# The following functions were not unit_tested, however the parts within
# the functions are unit_tested
def get_map(options, data):
"""Opens and returns mapping data"""
try:
map_f = open(options.map_fname, 'U').readlines()
except (TypeError, IOError):
raise MissingFileError('Mapping file required for this analysis')
data['map'] = parse_mapping_file(map_f)
return data['map']
def map_from_coords(coords):
"""Makes pseudo mapping file from coords.
set data['map'] to result of this if coords file supplied but not map.
TODO: write equivalent function for other inputs, e.g. for rarefaction --
basic principle is that you need data structure that you can extract list
of sample ids from.
"""
result = (([['SampleID', 'Sample']]))
for i in range(len(data['coord'][0])):
data['map'].append([data['coord'][0][i], 'Sample'])
def sample_color_prefs_and_map_data_from_options(options):
"""Returns color prefs and mapping data based on options.
Note: opens files as needed. Only returns the info related to metadata
coloring and category maps. If you need additional info, it is necessary
to get that info explicitly (e.g. coord files, rarefaction files, etc.).
For example, you might modify the data dict afterwards to add coords,
rarefaction info, etc. depending on the application.
"""
data = {}
# Open and get mapping data, if none supplied create a pseudo mapping \
# file
mapping, headers, comments = get_map(options, data)
new_mapping = []
new_mapping.append(headers)
for i in range(len(mapping)):
new_mapping.append(mapping[i])
data['map'] = new_mapping
# need to set some other way from sample ids
# Determine which mapping headers to color by, if none given, color by \
# Sample ID's
try:
colorby = options.colorby
except AttributeError:
colorby = None
if options.prefs_path:
prefs = eval(open(options.prefs_path, 'U').read())
color_prefs, data = process_colorby(colorby, data,
prefs['sample_coloring'])
if 'background_color' in prefs:
background_color = prefs['background_color']
else:
background_color = 'black'
if 'ball_scale' in prefs:
ball_scale = prefs['ball_scale']
else:
ball_scale = 1.0
arrow_colors = {}
if 'arrow_line_color' in prefs:
arrow_colors['line_color'] = prefs['arrow_line_color']
else:
arrow_colors['line_color'] = 'white'
if 'arrow_head_color' in prefs:
arrow_colors['head_color'] = prefs['arrow_head_color']
else:
arrow_colors['head_color'] = 'red'
else:
background_color = 'black'
color_prefs, data = process_colorby(colorby, data, None)
ball_scale = 1.0
arrow_colors = {'line_color': 'white', 'head_color': 'red'}
if options.prefs_path and options.background_color:
background_color = options.background_color
elif options.background_color:
background_color = options.background_color
if background_color == 'black':
label_color = 'white'
else:
label_color = 'black'
return (
color_prefs, data, background_color, label_color, ball_scale, arrow_colors
)
def taxonomy_color_prefs_and_map_data_from_options(options):
"""Returns color prefs and counts data based on options.
counts data is any file in a format that can be parsed by parse_otu_table
"""
data = {}
data['counts'] = {}
taxonomy_levels = []
# need to set some other way from sample ids
# Determine which mapping headers to color by, if none given, color by \
# Sample ID's
taxonomy_count_files = options.counts_fname
for f in taxonomy_count_files:
try:
counts_f = open(f, 'U').readlines()
except (TypeError, IOError):
raise MissingFileError('Counts file required for this analysis')
sample_ids, otu_ids, otu_table = \
parse_taxa_summary_table(counts_f)
data['counts'][f] = (sample_ids, otu_ids, otu_table)
level = max([len(t.split(';')) - 1 for t in otu_ids])
taxonomy_levels.append(str(level))
if options.prefs_path:
prefs = eval(open(options.prefs_path, 'U').read())
color_prefs = taxonomy_process_prefs(taxonomy_levels,
prefs['taxonomy_coloring'])
if 'background_color' in prefs:
background_color = prefs['background_color']
else:
background_color = 'black'
else:
background_color = 'black'
color_prefs = taxonomy_process_prefs(taxonomy_levels, None)
if options.prefs_path and options.background_color:
background_color = options.background_color
elif options.background_color:
background_color = options.background_color
if background_color == 'black':
label_color = 'white'
else:
label_color = 'black'
return color_prefs, data, background_color, label_color
def taxonomy_process_prefs(taxonomy_levels, color_prefs=None):
"""Creates taxonomy prefs dict given specific taxonomy levels.
color_prefs is not required
taxonomy_levels is a list of the level number i.e. Phylum is 2
prefs will include a 'colors' dictionary for each given level
if there is a cooresponding level in color_prefs that is the
dictionary for the level otherwise it adds and empty dict
"""
prefs = {}
for j, col in enumerate(taxonomy_levels):
key = str(col)
col = str(col)
# Color by only one level
prefs[key] = {}
prefs[key]['column'] = col
if color_prefs:
for p in color_prefs:
if 'column' in color_prefs[p] and str(color_prefs[p]['column']) == col:
if 'colors' in color_prefs[p]:
prefs[key]['colors'] = color_prefs[p]['colors'].copy()
else:
prefs[key]['colors'] = {}
match = True
break
else:
match = False
if not match:
prefs[key] = {}
prefs[key]['column'] = col
prefs[key]['colors'] = {}
return prefs
def get_qiime_hex_string_color(index):
"""Retrieve an HEX color from the list of QIIME colors
Input:
index: index of the color to retrieve, if the number is greater than the
number of available colors, it will rollover in the list.
Output:
color: string in the format #FF0000
"""
assert index >= 0, "There are no negative indices for the QIIME colors"
n_colors = len(data_color_order)
if index >= n_colors:
index = int(index - floor((index / n_colors) * n_colors))
return data_colors[data_color_order[index]].toHex()
def matplotlib_rgb_color(rgb_color):
"""Returns RGB color in matplotlib format.
ex: (255,0,255) will return (1.0,0.0,1.0)
"""
return tuple([i / 255. for i in rgb_color])
| gpl-2.0 |
hdmetor/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
matsaleh13/courses | deeplearning1/nbs/lesson3.0.py | 1 | 9648 | # lesson2.py
# Stand-alone script to run the code from the lesson2-matsaleh.ipynb Jupyter Notebook.
'''
Lesson 3 Assignment Plan:
1. Start with Vgg16 model with binary output and weights from lesson2.5.py.
2. Create an overfitted model:
a. Split conv and FC layers into two separate models.
b. Precalculate FC layer inputs from conv layer output.
c. Remove dropout from the FC model.
d. Fit the FC model to the data.
e. Save the weights.
3. Add data augmentation to the training set:
a. Combine the Conv (locked) and FC models.
b. Compile and train the combined model.
c. Save the weights.
4. Add batchnorm to the combined model:
a. Create a standalone model from the Vgg16bn model's BN layers.
b. Fit the BN model to the data.
c. Save the weights.
d. Create another BN model and combine it with the conv model into a final model.
e. Set the BN layer weights from the first BN model (why not just combine *that* BN model with the conv model)?
f. Save the weights.
5. Fit the final model:
a. Incrementally, saving the weights along the way.
lesson3.0.py:
- Based on lesson2.5.py
- now with functions
'''
import os
import os.path
import click
import utils
from vgg16 import Vgg16
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import confusion_matrix
from numpy.random import random, permutation
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.models import Sequential
from keras.layers import Input
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, RMSprop, Nadam
from keras.preprocessing import image
#
# Utility Functions
#
def onehot(x):
# Returns two-column matrix with one row for each class.
return np.array(OneHotEncoder().fit_transform(x.reshape(-1, 1)).todense())
#
# Constants
#
INPUT_PATH = None
OUTPUT_PATH = None
TRAIN_PATH = None
VALID_PATH = None
TEST_PATH = None
MODEL_PATH = None
RESULTS_PATH = None
BATCH_SIZE = None
NUM_EPOCHS = None
#
# Data Setup
#
def setup_folders():
click.echo()
click.echo('Setting up folders...')
click.echo()
click.echo('Input folder: %s' % INPUT_PATH)
global TRAIN_PATH
TRAIN_PATH = os.path.join(INPUT_PATH, 'train')
click.echo('Training data: %s' % TRAIN_PATH)
global VALID_PATH
VALID_PATH = os.path.join(INPUT_PATH, 'valid')
click.echo('Validation data: %s' % VALID_PATH)
global TEST_PATH
TEST_PATH = os.path.join(INPUT_PATH, 'test')
click.echo('Test data: %s' % TEST_PATH)
click.echo()
click.echo('Output folder: %s' % OUTPUT_PATH)
global MODEL_PATH
MODEL_PATH = os.path.join(OUTPUT_PATH, 'models')
if not os.path.exists(MODEL_PATH): os.makedirs(MODEL_PATH)
click.echo('Model data: %s' % MODEL_PATH)
global RESULTS_PATH
RESULTS_PATH = os.path.join(OUTPUT_PATH, 'results')
if not os.path.exists(RESULTS_PATH): os.makedirs(RESULTS_PATH)
click.echo('Results: %s' % RESULTS_PATH)
click.echo()
def load_data():
#
# NOTE: Loading and use of data structures is pretty fucked up here.
# Some things require getting data from generators, others require NumPy arrays.
# In the end we use both, and sometimes re-load the data from disk and/or re-transform
# it more than once.
#
click.echo('Loading raw training data from %s...' % TRAIN_PATH)
global TRAIN_BATCHES
TRAIN_BATCHES = utils.get_batches(TRAIN_PATH, shuffle=False, batch_size=1)
click.echo('Loading array from generator...')
global TRAIN_ARRAY
TRAIN_ARRAY = utils.get_data(TRAIN_PATH)
click.echo('\tshape: %s' % (TRAIN_ARRAY.shape,))
click.echo()
# TRAIN_DATA = os.path.join(MODEL_PATH, 'train_data.bc')
# click.echo('Saving processed training data to %s...' % TRAIN_DATA)
# utils.save_array(TRAIN_DATA, TRAIN_ARRAY)
click.echo('Loading raw validation data from %s...' % VALID_PATH)
global VALID_BATCHES
VALID_BATCHES = utils.get_batches(VALID_PATH, shuffle=False, batch_size=1)
click.echo('Loading array from generator...')
global VALID_ARRAY
VALID_ARRAY = utils.get_data(VALID_PATH)
click.echo('\tshape: %s' % (VALID_ARRAY.shape,))
click.echo()
def get_true_labels():
click.echo('Getting the true labels for every image...')
global TRAIN_CLASSES
TRAIN_CLASSES = TRAIN_BATCHES.classes
global TRAIN_LABELS
TRAIN_LABELS = onehot(TRAIN_CLASSES)
# click.echo('\tTraining labels look like this: \n%s\n...\n%s' % (TRAIN_LABELS[:5], TRAIN_LABELS[-5:]))
# click.echo()
global VALID_CLASSES
VALID_CLASSES = VALID_BATCHES.classes
global VALID_LABELS
VALID_LABELS = onehot(VALID_CLASSES)
# click.echo('\tValidation labels look like this: \n%s\n...\n%s' % (VALID_LABELS[:5], VALID_LABELS[-5:]))
# click.echo()
def prepare_generators():
click.echo('Preparing image data generators...')
gen = image.ImageDataGenerator()
# NOTE: Why do we overwrite these generators using the arrays?
# TRAIN_BATCHES and VALID_BATCHES here are generators,
# but still not quite the same as above.
global TRAIN_BATCHES
TRAIN_BATCHES = gen.flow(TRAIN_ARRAY, TRAIN_LABELS,
batch_size=BATCH_SIZE, shuffle=True)
global VALID_BATCHES
VALID_BATCHES = gen.flow(VALID_ARRAY, VALID_LABELS,
batch_size=BATCH_SIZE, shuffle=False)
def create_model():
vgg = Vgg16()
vgg.model.pop()
click.echo('Replacing last layer of model...')
for layer in vgg.model.layers: layer.trainable=False
vgg.model.add(Dense(2, activation='softmax'))
# OPTIMIZER = Nadam()
OPTIMIZER = RMSprop(lr=0.001)
vgg.model.compile(optimizer=OPTIMIZER, loss='categorical_crossentropy', metrics=['accuracy'])
return vgg, OPTIMIZER
def fit_model(model, opt):
# First epoch higher LR
LR=0.01
K.set_value(opt.lr, LR)
click.echo('Fitting last layer of model using LR=%s' % LR)
model.fit_generator(TRAIN_BATCHES, samples_per_epoch=TRAIN_BATCHES.n, nb_epoch=NUM_EPOCHS,
validation_data=VALID_BATCHES, nb_val_samples=VALID_BATCHES.n)
# Next batch, lower again
LR=0.001
K.set_value(opt.lr, LR)
click.echo('Fitting last layer of model using LR=%s' % LR)
model.fit_generator(TRAIN_BATCHES, samples_per_epoch=TRAIN_BATCHES.n, nb_epoch=NUM_EPOCHS,
validation_data=VALID_BATCHES, nb_val_samples=VALID_BATCHES.n)
click.echo('Saving model weights...')
model.save_weights(os.path.join(MODEL_PATH, 'finetune_1_ll.h5'))
def eval_model(model):
click.echo('Evaluating model with validation data...')
TEST_LOSS = model.evaluate(VALID_ARRAY, VALID_LABELS)
click.echo('TEST_LOSS: %s' % (TEST_LOSS,))
click.echo('Confusion matrix after last layer retraining')
PREDS = model.predict_classes(VALID_ARRAY, batch_size=BATCH_SIZE)
PROBS = model.predict_proba(VALID_ARRAY, batch_size=BATCH_SIZE)[:, 0]
CM = confusion_matrix(VALID_CLASSES, PREDS)
click.echo(CM)
def predict(model):
click.echo('Predicting labels for test data set...')
TEST_BATCHES = utils.get_batches(TEST_PATH, shuffle=False, batch_size=BATCH_SIZE)
TEST_PREDS = model.predict_generator(TEST_BATCHES, TEST_BATCHES.nb_sample)
TEST_FILENAMES = TEST_BATCHES.filenames
#Save our test results arrays so we can use them again later
# click.echo('Saving raw prediction results.')
# utils.save_array(os.path.join(MODEL_PATH, 'test_preds.dat'), TEST_PREDS)
# utils.save_array(os.path.join(MODEL_PATH, 'filenames.dat'), TEST_FILENAMES)
# Grab the dog prediction column
IS_DOG = TEST_PREDS[:, 1]
# To play it safe, we use a sneaky trick to round down our edge predictions
# Swap all ones with .95 and all zeros with .05
IS_DOG = IS_DOG.clip(min=0.05, max=0.95)
# Extract imageIds from the filenames in our test/unknown directory
IDS = np.array([int(os.path.splitext(os.path.basename(f))[0])
for f in TEST_FILENAMES])
# Combine the ids and IS_DOG columns into a single 2-column array.
SUBMIT = np.stack([IDS, IS_DOG], axis=1)
click.echo('Formatting and saving data for Kaggle submission.')
np.savetxt(os.path.join(RESULTS_PATH, 'kaggle_submission.csv'), SUBMIT,
fmt='%d,%.5f', header='id,label', comments='')
click.echo('Model training and prediction complete.')
@click.command()
@click.option('--sample', is_flag=True, default=True, help='Use sample dataset for training.')
@click.option('--sample-set', default='sample', help='Sample dataset to train on.')
@click.option('--local', default=True, help='Local environment (vs. FloydHub)')
def main(sample, sample_set, local):
global BATCH_SIZE
global NUM_EPOCHS
global INPUT_PATH
global OUTPUT_PATH
if local:
BATCH_SIZE = 32
else:
BATCH_SIZE = 64
INPUT_PATH = os.path.join('.', 'input')
OUTPUT_PATH = os.path.join('.', 'output')
if sample:
INPUT_PATH = os.path.join(INPUT_PATH, sample_set)
OUTPUT_PATH = os.path.join(OUTPUT_PATH, sample_set)
NUM_EPOCHS = 4
else:
NUM_EPOCHS = 10
setup_folders()
load_data()
get_true_labels()
prepare_generators()
vgg, opt = create_model()
fit_model(vgg.model, opt)
eval_model(vgg.model)
predict(vgg.model)
if __name__ == '__main__':
main()
| apache-2.0 |
r-mart/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
gchrupala/visually-grounded-speech | imaginet/simple_data.py | 2 | 7498 | import numpy
import cPickle as pickle
import gzip
import os
import copy
import funktional.util as util
from funktional.util import autoassign
from sklearn.preprocessing import StandardScaler
import string
import random
# Types of tokenization
def words(sentence):
return sentence['tokens']
def characters(sentence):
return list(sentence['raw'])
def compressed(sentence):
return [ c.lower() for c in sentence['raw'] if c in string.letters ]
def phonemes(sentence):
return [ pho for pho in sentence['ipa'] if pho != "*" ]
class NoScaler():
def __init__(self):
pass
def fit_transform(self, x):
return x
def transform(self, x):
return x
def inverse_transform(self, x):
return x
class InputScaler():
def __init__(self):
self.scaler = StandardScaler()
def fit_transform(self, data):
flat = numpy.vstack(data)
self.scaler.fit(flat)
return [ self.scaler.transform(X) for X in data ]
def transform(self, data):
return [ self.scaler.transform(X) for X in data ]
def inverse_transform(self, data):
return [ self.scaler.inverse_transform(X) for X in data ]
def vector_padder(vecs):
"""Pads each vector in vecs with zeros at the beginning. Returns 3D tensor with dimensions:
(BATCH_SIZE, SAMPLE_LENGTH, NUMBER_FEATURES).
"""
max_len = max(map(len, vecs))
return numpy.array([ numpy.vstack([numpy.zeros((max_len-len(vec),vec.shape[1])) , vec])
for vec in vecs ], dtype='float32')
class Batcher(object):
def __init__(self, mapper, pad_end=False):
autoassign(locals())
self.BEG = self.mapper.BEG_ID
self.END = self.mapper.END_ID
def pad(self, xss): # PAD AT BEGINNING
max_len = max((len(xs) for xs in xss))
def pad_one(xs):
if self.pad_end:
return xs + [ self.END for _ in range(0,(max_len-len(xs))) ]
return [ self.BEG for _ in range(0,(max_len-len(xs))) ] + xs
return [ pad_one(xs) for xs in xss ]
def batch_inp(self, sents):
mb = self.padder(sents)
return mb[:,1:]
def padder(self, sents):
return numpy.array(self.pad([[self.BEG]+sent+[self.END] for sent in sents]), dtype='int32')
def batch(self, gr):
"""Prepare minibatch.
Returns:
- input string
- visual target vector
- output string at t-1
- target string
"""
mb_inp = self.padder([x['tokens_in'] for x in gr])
mb_target_t = self.padder([x['tokens_out'] for x in gr])
inp = mb_inp[:,1:]
target_t = mb_target_t[:,1:]
target_prev_t = mb_target_t[:,0:-1]
target_v = numpy.array([ x['img'] for x in gr ], dtype='float32')
audio = vector_padder([ x['audio'] for x in gr ]) if x['audio'] is not None else None
return { 'input': inp,
'target_v':target_v,
'target_prev_t':target_prev_t,
'target_t':target_t,
'audio': audio }
class SimpleData(object):
"""Training / validation data prepared to feed to the model."""
def __init__(self, provider, tokenize=words, min_df=10, scale=True, scale_input=False, batch_size=64, shuffle=False, limit=None, curriculum=False, val_vocab=False):
autoassign(locals())
self.data = {}
self.mapper = util.IdMapper(min_df=self.min_df)
self.scaler = StandardScaler() if scale else NoScaler()
self.audio_scaler = InputScaler() if scale_input else NoScaler()
parts = insideout(self.shuffled(arrange(provider.iterImages(split='train'),
tokenize=self.tokenize,
limit=limit)))
parts_val = insideout(self.shuffled(arrange(provider.iterImages(split='val'), tokenize=self.tokenize)))
# TRAINING
if self.val_vocab:
_ = list(self.mapper.fit_transform(parts['tokens_in'] + parts_val['tokens_in']))
parts['tokens_in'] = self.mapper.transform(parts['tokens_in']) # FIXME UGLY HACK
else:
parts['tokens_in'] = self.mapper.fit_transform(parts['tokens_in'])
parts['tokens_out'] = self.mapper.transform(parts['tokens_out'])
parts['img'] = self.scaler.fit_transform(parts['img'])
parts['audio'] = self.audio_scaler.fit_transform(parts['audio'])
self.data['train'] = outsidein(parts)
# VALIDATION
parts_val['tokens_in'] = self.mapper.transform(parts_val['tokens_in'])
parts_val['tokens_out'] = self.mapper.transform(parts_val['tokens_out'])
parts_val['img'] = self.scaler.transform(parts_val['img'])
parts_val['audio'] = self.audio_scaler.transform(parts_val['audio'])
self.data['valid'] = outsidein(parts_val)
self.batcher = Batcher(self.mapper, pad_end=False)
def shuffled(self, xs):
if not self.shuffle:
return xs
else:
zs = copy.copy(list(xs))
random.shuffle(zs)
return zs
def iter_train_batches(self):
# sort data by length
if self.curriculum:
data = [self.data['train'][i] for i in numpy.argsort([len(x['tokens_in']) for x in self.data['train']])]
else:
data = self.data['train']
for bunch in util.grouper(data, self.batch_size*20):
bunch_sort = [ bunch[i] for i in numpy.argsort([len(x['tokens_in']) for x in bunch]) ]
for item in util.grouper(bunch_sort, self.batch_size):
yield self.batcher.batch(item)
def iter_valid_batches(self):
for bunch in util.grouper(self.data['valid'], self.batch_size*20):
bunch_sort = [ bunch[i] for i in numpy.argsort([len(x['tokens_in']) for x in bunch]) ]
for item in util.grouper(bunch_sort, self.batch_size):
yield self.batcher.batch(item)
def dump(self, model_path):
"""Write scaler and batcher to disc."""
pickle.dump(self.scaler, gzip.open(os.path.join(model_path, 'scaler.pkl.gz'), 'w'),
protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.batcher, gzip.open(os.path.join(model_path, 'batcher.pkl.gz'), 'w'),
protocol=pickle.HIGHEST_PROTOCOL)
def arrange(data, tokenize=words, limit=None):
for i,image in enumerate(data):
if limit is not None and i > limit:
break
for sent in image['sentences']:
toks = tokenize(sent)
yield {'tokens_in': toks,
'tokens_out': toks,
'audio': sent.get('audio'),
'img': image['feat']}
def insideout(ds):
"""Transform a list of dictionaries to a dictionary of lists."""
ds = list(ds)
result = dict([(k, []) for k in ds[0].keys()])
for d in ds:
for k,v in d.items():
result[k].append(v)
return result
def outsidein(d):
"""Transform a dictionary of lists to a list of dictionaries."""
ds = []
keys = d.keys()
for key in keys:
d[key] = list(d[key])
for i in range(len(d.values()[0])):
ds.append(dict([(k, d[k][i]) for k in keys]))
return ds
| mit |
LaurenLuoYun/losslessh264 | plot_prior_misses.py | 40 | 1124 | # Run h264dec on a single file compiled with PRIOR_STATS and then run this script
# Outputs timeseries plot at /tmp/misses.pdf
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import os
def temporal_misses(key):
values = data[key]
numbins = 100
binsize = len(values) // numbins
bins = [[]]
for v in values:
if len(bins[-1]) >= binsize:
bins.append([])
bins[-1].append(v)
x = range(len(bins))
total_misses = float(sum(values))
y = [100 * float(sum(b)) / total_misses for b in bins]
return plt.plot(x, y, label=key)[0]
paths = filter(lambda s: 'misses.log' in s, os.listdir('/tmp/'))
data = {p.split('_misses.')[0]: map(lambda c: c == '0', open('/tmp/' + p).read()) for p in paths}
handles = []
plt.figure(figsize=(20,10))
keys = data.keys()
for k in keys:
handles.append(temporal_misses(k))
plt.axis((0, 100, 0, 2))
plt.xlabel('temporal %')
plt.ylabel('% total misses')
plt.legend(handles, keys, bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
out = PdfPages('/tmp/misses.pdf')
out.savefig()
out.close()
| bsd-2-clause |
meghana1995/sympy | sympy/physics/quantum/tests/test_circuitplot.py | 93 | 2065 | from sympy.physics.quantum.circuitplot import labeller, render_label, Mz, CreateOneQubitGate,\
CreateCGate
from sympy.physics.quantum.gate import CNOT, H, SWAP, CGate, S, T
from sympy.external import import_module
from sympy.utilities.pytest import skip
mpl = import_module('matplotlib')
def test_render_label():
assert render_label('q0') == r'$|q0\rangle$'
assert render_label('q0', {'q0': '0'}) == r'$|q0\rangle=|0\rangle$'
def test_Mz():
assert str(Mz(0)) == 'Mz(0)'
def test_create1():
Qgate = CreateOneQubitGate('Q')
assert str(Qgate(0)) == 'Q(0)'
def test_createc():
Qgate = CreateCGate('Q')
assert str(Qgate([1],0)) == 'C((1),Q(0))'
def test_labeller():
"""Test the labeller utility"""
assert labeller(2) == ['q_1', 'q_0']
assert labeller(3,'j') == ['j_2', 'j_1', 'j_0']
def test_cnot():
"""Test a simple cnot circuit. Right now this only makes sure the code doesn't
raise an exception, and some simple properties
"""
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(CNOT(1,0),2,labels=labeller(2))
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == ['q_1', 'q_0']
c = CircuitPlot(CNOT(1,0),2)
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == []
def test_ex1():
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(CNOT(1,0)*H(1),2,labels=labeller(2))
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == ['q_1', 'q_0']
def test_ex4():
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(SWAP(0,2)*H(0)* CGate((0,),S(1)) *H(1)*CGate((0,),T(2))\
*CGate((1,),S(2))*H(2),3,labels=labeller(3,'j'))
assert c.ngates == 7
assert c.nqubits == 3
assert c.labels == ['j_2', 'j_1', 'j_0']
| bsd-3-clause |
yidawang/brainiak | examples/eventseg/simulated_data.py | 7 | 3607 | """Example of finding event segmentations on simulated data
This code generates simulated datasets that have temporally-clustered
structure (with the same series of latent event patterns). An event
segmentation is learned on the first dataset, and then we try to find the same
series of events in other datasets. We measure how well we find the latent
boundaries and the log-likelihood of the fits, and compare to a null model
in which the event order is randomly shuffled.
"""
import brainiak.eventseg.event
import numpy as np
from scipy import stats
import logging
import matplotlib.pyplot as plt
logging.basicConfig(level=logging.DEBUG)
def generate_event_labels(T, K, length_std):
event_labels = np.zeros(T, dtype=int)
start_TR = 0
for e in range(K - 1):
length = round(
((T - start_TR) / (K - e)) * (1 + length_std * np.random.randn()))
length = min(max(length, 1), T - start_TR - (K - e))
event_labels[start_TR:(start_TR + length)] = e
start_TR = start_TR + length
event_labels[start_TR:] = K - 1
return event_labels
def generate_data(V, T, event_labels, event_means, noise_std):
simul_data = np.empty((V, T))
for t in range(T):
simul_data[:, t] = stats.multivariate_normal.rvs(
event_means[:, event_labels[t]], cov=noise_std, size=1)
simul_data = stats.zscore(simul_data, axis=1, ddof=1)
return simul_data
# Parameters for creating small simulated datasets
V = 10
K = 10
T = 500
T2 = 300
# Generate the first dataset
np.random.seed(1)
event_means = np.random.randn(V, K)
event_labels = generate_event_labels(T, K, 0.1)
simul_data = generate_data(V, T, event_labels, event_means, 1)
# Find the events in this dataset
simul_seg = brainiak.eventseg.event.EventSegment(K)
simul_seg.fit(simul_data.T)
# Generate other datasets with the same underlying sequence of event
# patterns, and try to find matching events
test_loops = 10
bound_match = np.empty((2, test_loops))
LL = np.empty((2, test_loops))
for test_i in range(test_loops):
# Generate data
event_labels2 = generate_event_labels(T2, K, 0.5)
simul_data2 = generate_data(V, T2, event_labels2, event_means, 0.1)
# Find events matching previously-learned events
gamma, LL[0, test_i] = simul_seg.find_events(simul_data2.T)
est_events2 = np.argmax(gamma, axis=1)
bound_match[0, test_i] = 1 - np.sum(abs(np.diff(event_labels2) -
np.diff(est_events2))) / (2 * K)
# Run again, but with the order of events shuffled so that it no longer
# corresponds to the training data
gamma, LL[1, test_i] = simul_seg.find_events(simul_data2.T, scramble=True)
est_events2 = np.argmax(gamma, axis=1)
bound_match[1, test_i] = 1 - np.sum(abs(np.diff(event_labels2) -
np.diff(est_events2))) / (2 * K)
# Across the testing datasets, print how well we identify the true event
# boundaries and the log-likehoods in real vs. shuffled data
print("Boundary match: {:.2} (null: {:.2})".format(
np.mean(bound_match[0, :]), np.mean(bound_match[1, :])))
print("Log-likelihood: {:.3} (null: {:.3})".format(
np.mean(LL[0, :]), np.mean(LL[1, :])))
plt.figure()
plt.subplot(2, 1, 1)
plt.imshow(simul_data2, interpolation='nearest', cmap=plt.cm.bone,
aspect='auto')
plt.xlabel('Timepoints')
plt.ylabel('Voxels')
plt.subplot(2, 1, 2)
gamma, LL[0, test_i] = simul_seg.find_events(simul_data2.T)
est_events2 = np.argmax(gamma, axis=1)
plt.plot(est_events2)
plt.xlabel('Timepoints')
plt.ylabel('Event label')
plt.show()
| apache-2.0 |
JPFrancoia/scikit-learn | examples/model_selection/plot_learning_curve.py | 76 | 4509 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
pradyu1993/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 4 | 1741 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
Multi-output regression with :ref:`decision trees <tree>`: the decision tree
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print __doc__
import numpy as np
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
from sklearn.tree import DecisionTreeRegressor
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_3 = DecisionTreeRegressor(max_depth=8)
clf_1.fit(X, y)
clf_2.fit(X, y)
clf_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
y_3 = clf_3.predict(X_test)
# Plot the results
import pylab as pl
pl.figure()
pl.scatter(y[:, 0], y[:, 1], c="k", label="data")
pl.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
pl.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
pl.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
pl.xlim([-6, 6])
pl.ylim([-6, 6])
pl.xlabel("data")
pl.ylabel("target")
pl.title("Multi-output Decision Tree Regression")
pl.legend()
pl.show()
| bsd-3-clause |
dhermes/gcloud-python | bigquery/google/cloud/bigquery/table.py | 2 | 48877 | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Tables."""
from __future__ import absolute_import
import copy
import datetime
import operator
import warnings
import six
try:
import pandas
except ImportError: # pragma: NO COVER
pandas = None
from google.api_core.page_iterator import HTTPIterator
import google.cloud._helpers
from google.cloud.bigquery import _helpers
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.schema import _build_schema_resource
from google.cloud.bigquery.schema import _parse_schema_resource
from google.cloud.bigquery.external_config import ExternalConfig
_NO_PANDAS_ERROR = (
"The pandas library is not installed, please install "
"pandas to use the to_dataframe() function."
)
_TABLE_HAS_NO_SCHEMA = 'Table has no schema: call "client.get_table()"'
_MARKER = object()
def _reference_getter(table):
"""A :class:`~google.cloud.bigquery.table.TableReference` pointing to
this table.
Returns:
google.cloud.bigquery.table.TableReference: pointer to this table.
"""
from google.cloud.bigquery import dataset
dataset_ref = dataset.DatasetReference(table.project, table.dataset_id)
return TableReference(dataset_ref, table.table_id)
def _view_use_legacy_sql_getter(table):
"""bool: Specifies whether to execute the view with Legacy or Standard SQL.
This boolean specifies whether to execute the view with Legacy SQL
(:data:`True`) or Standard SQL (:data:`False`). The client side default is
:data:`False`. The server-side default is :data:`True`. If this table is
not a view, :data:`None` is returned.
Raises:
ValueError: For invalid value types.
"""
view = table._properties.get("view")
if view is not None:
# The server-side default for useLegacySql is True.
return view.get("useLegacySql", True)
# In some cases, such as in a table list no view object is present, but the
# resource still represents a view. Use the type as a fallback.
if table.table_type == "VIEW":
# The server-side default for useLegacySql is True.
return True
class EncryptionConfiguration(object):
"""Custom encryption configuration (e.g., Cloud KMS keys).
Args:
kms_key_name (str): resource ID of Cloud KMS key used for encryption
"""
def __init__(self, kms_key_name=None):
self._properties = {}
if kms_key_name is not None:
self._properties["kmsKeyName"] = kms_key_name
@property
def kms_key_name(self):
"""str: Resource ID of Cloud KMS key
Resource ID of Cloud KMS key or :data:`None` if using default
encryption.
"""
return self._properties.get("kmsKeyName")
@kms_key_name.setter
def kms_key_name(self, value):
self._properties["kmsKeyName"] = value
@classmethod
def from_api_repr(cls, resource):
"""Construct an encryption configuration from its API representation
Args:
resource (Dict[str, object]):
An encryption configuration representation as returned from
the API.
Returns:
google.cloud.bigquery.table.EncryptionConfiguration:
An encryption configuration parsed from ``resource``.
"""
config = cls()
config._properties = copy.deepcopy(resource)
return config
def to_api_repr(self):
"""Construct the API resource representation of this encryption
configuration.
Returns:
Dict[str, object]:
Encryption configuration as represented as an API resource
"""
return copy.deepcopy(self._properties)
def __eq__(self, other):
if not isinstance(other, EncryptionConfiguration):
return NotImplemented
return self.kms_key_name == other.kms_key_name
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.kms_key_name)
def __repr__(self):
return "EncryptionConfiguration({})".format(self.kms_key_name)
class TableReference(object):
"""TableReferences are pointers to tables.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables
Args:
dataset_ref (google.cloud.bigquery.dataset.DatasetReference):
A pointer to the dataset
table_id (str): The ID of the table
"""
def __init__(self, dataset_ref, table_id):
self._project = dataset_ref.project
self._dataset_id = dataset_ref.dataset_id
self._table_id = table_id
@property
def project(self):
"""str: Project bound to the table"""
return self._project
@property
def dataset_id(self):
"""str: ID of dataset containing the table."""
return self._dataset_id
@property
def table_id(self):
"""str: The table ID."""
return self._table_id
@property
def path(self):
"""str: URL path for the table's APIs."""
return "/projects/%s/datasets/%s/tables/%s" % (
self._project,
self._dataset_id,
self._table_id,
)
@classmethod
def from_string(cls, table_id, default_project=None):
"""Construct a table reference from table ID string.
Args:
table_id (str):
A table ID in standard SQL format. If ``default_project``
is not specified, this must included a project ID, dataset
ID, and table ID, each separated by ``.``.
default_project (str):
Optional. The project ID to use when ``table_id`` does not
include a project ID.
Returns:
TableReference: Table reference parsed from ``table_id``.
Examples:
>>> TableReference.from_string('my-project.mydataset.mytable')
TableRef...(DatasetRef...('my-project', 'mydataset'), 'mytable')
Raises:
ValueError:
If ``table_id`` is not a fully-qualified table ID in
standard SQL format.
"""
from google.cloud.bigquery.dataset import DatasetReference
output_project_id = default_project
output_dataset_id = None
output_table_id = None
parts = table_id.split(".")
if len(parts) < 2:
raise ValueError(
"table_id must be a fully-qualified table ID in "
'standard SQL format. e.g. "project.dataset.table", got '
"{}".format(table_id)
)
elif len(parts) == 2:
if not default_project:
raise ValueError(
"When default_project is not set, table_id must be a "
"fully-qualified table ID in standard SQL format. "
'e.g. "project.dataset_id.table_id", got {}'.format(table_id)
)
output_dataset_id, output_table_id = parts
elif len(parts) == 3:
output_project_id, output_dataset_id, output_table_id = parts
if len(parts) > 3:
raise ValueError(
"Too many parts in table_id. Must be a fully-qualified table "
'ID in standard SQL format. e.g. "project.dataset.table", '
"got {}".format(table_id)
)
return cls(
DatasetReference(output_project_id, output_dataset_id), output_table_id
)
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct a table reference given its API representation
Args:
resource (Dict[str, object]):
Table reference representation returned from the API
Returns:
google.cloud.bigquery.table.TableReference:
Table reference parsed from ``resource``.
"""
from google.cloud.bigquery.dataset import DatasetReference
project = resource["projectId"]
dataset_id = resource["datasetId"]
table_id = resource["tableId"]
return cls(DatasetReference(project, dataset_id), table_id)
def to_api_repr(self):
"""Construct the API resource representation of this table reference.
Returns:
Dict[str, object]: Table reference represented as an API resource
"""
return {
"projectId": self._project,
"datasetId": self._dataset_id,
"tableId": self._table_id,
}
def _key(self):
"""A tuple key that uniquely describes this field.
Used to compute this instance's hashcode and evaluate equality.
Returns:
Tuple[str]: The contents of this :class:`DatasetReference`.
"""
return (self._project, self._dataset_id, self._table_id)
def __eq__(self, other):
if not isinstance(other, TableReference):
return NotImplemented
return self._key() == other._key()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._key())
def __repr__(self):
from google.cloud.bigquery.dataset import DatasetReference
dataset_ref = DatasetReference(self._project, self._dataset_id)
return "TableReference({}, '{}')".format(repr(dataset_ref), self._table_id)
class Table(object):
"""Tables represent a set of rows whose values correspond to a schema.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables
Args:
table_ref (google.cloud.bigquery.table.TableReference):
A pointer to a table
schema (List[google.cloud.bigquery.schema.SchemaField]):
The table's schema
"""
_PROPERTY_TO_API_FIELD = {
"friendly_name": "friendlyName",
"expires": "expirationTime",
"time_partitioning": "timePartitioning",
"partitioning_type": "timePartitioning",
"partition_expiration": "timePartitioning",
"view_use_legacy_sql": "view",
"view_query": "view",
"external_data_configuration": "externalDataConfiguration",
"encryption_configuration": "encryptionConfiguration",
}
def __init__(self, table_ref, schema=None):
self._properties = {"tableReference": table_ref.to_api_repr(), "labels": {}}
# Let the @property do validation.
if schema is not None:
self.schema = schema
@property
def project(self):
"""str: Project bound to the table."""
return self._properties["tableReference"]["projectId"]
@property
def dataset_id(self):
"""str: ID of dataset containing the table."""
return self._properties["tableReference"]["datasetId"]
@property
def table_id(self):
"""str: ID of the table."""
return self._properties["tableReference"]["tableId"]
reference = property(_reference_getter)
@property
def path(self):
"""str: URL path for the table's APIs."""
return "/projects/%s/datasets/%s/tables/%s" % (
self.project,
self.dataset_id,
self.table_id,
)
@property
def schema(self):
"""List[google.cloud.bigquery.schema.SchemaField]: Table's schema.
Raises:
TypeError: If 'value' is not a sequence
ValueError:
If any item in the sequence is not a
:class:`~google.cloud.bigquery.schema.SchemaField`
"""
prop = self._properties.get("schema")
if not prop:
return []
else:
return _parse_schema_resource(prop)
@schema.setter
def schema(self, value):
if value is None:
self._properties["schema"] = None
elif not all(isinstance(field, SchemaField) for field in value):
raise ValueError("Schema items must be fields")
else:
self._properties["schema"] = {"fields": _build_schema_resource(value)}
@property
def labels(self):
"""Dict[str, str]: Labels for the table.
This method always returns a dict. To change a table's labels,
modify the dict, then call ``Client.update_table``. To delete a
label, set its value to :data:`None` before updating.
Raises:
ValueError: If ``value`` type is invalid.
"""
return self._properties.setdefault("labels", {})
@labels.setter
def labels(self, value):
if not isinstance(value, dict):
raise ValueError("Pass a dict")
self._properties["labels"] = value
@property
def encryption_configuration(self):
"""google.cloud.bigquery.table.EncryptionConfiguration: Custom
encryption configuration for the table.
Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
if using default encryption.
See `protecting data with Cloud KMS keys
<https://cloud.google.com/bigquery/docs/customer-managed-encryption>`_
in the BigQuery documentation.
"""
prop = self._properties.get("encryptionConfiguration")
if prop is not None:
prop = EncryptionConfiguration.from_api_repr(prop)
return prop
@encryption_configuration.setter
def encryption_configuration(self, value):
api_repr = value
if value is not None:
api_repr = value.to_api_repr()
self._properties["encryptionConfiguration"] = api_repr
@property
def created(self):
"""Union[datetime.datetime, None]: Datetime at which the table was
created (:data:`None` until set from the server).
"""
creation_time = self._properties.get("creationTime")
if creation_time is not None:
# creation_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(creation_time)
)
@property
def etag(self):
"""Union[str, None]: ETag for the table resource (:data:`None` until
set from the server).
"""
return self._properties.get("etag")
@property
def modified(self):
"""Union[datetime.datetime, None]: Datetime at which the table was last
modified (:data:`None` until set from the server).
"""
modified_time = self._properties.get("lastModifiedTime")
if modified_time is not None:
# modified_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(modified_time)
)
@property
def num_bytes(self):
"""Union[int, None]: The size of the table in bytes (:data:`None` until
set from the server).
"""
return _helpers._int_or_none(self._properties.get("numBytes"))
@property
def num_rows(self):
"""Union[int, None]: The number of rows in the table (:data:`None`
until set from the server).
"""
return _helpers._int_or_none(self._properties.get("numRows"))
@property
def self_link(self):
"""Union[str, None]: URL for the table resource (:data:`None` until set
from the server).
"""
return self._properties.get("selfLink")
@property
def full_table_id(self):
"""Union[str, None]: ID for the table (:data:`None` until set from the
server).
In the format ``project_id:dataset_id.table_id``.
"""
return self._properties.get("id")
@property
def table_type(self):
"""Union[str, None]: The type of the table (:data:`None` until set from
the server).
Possible values are ``'TABLE'``, ``'VIEW'``, or ``'EXTERNAL'``.
"""
return self._properties.get("type")
@property
def time_partitioning(self):
"""google.cloud.bigquery.table.TimePartitioning: Configures time-based
partitioning for a table.
Raises:
ValueError:
If the value is not :class:`TimePartitioning` or :data:`None`.
"""
prop = self._properties.get("timePartitioning")
if prop is not None:
return TimePartitioning.from_api_repr(prop)
@time_partitioning.setter
def time_partitioning(self, value):
api_repr = value
if isinstance(value, TimePartitioning):
api_repr = value.to_api_repr()
elif value is not None:
raise ValueError(
"value must be google.cloud.bigquery.table.TimePartitioning " "or None"
)
self._properties["timePartitioning"] = api_repr
@property
def partitioning_type(self):
"""Union[str, None]: Time partitioning of the table if it is
partitioned (Defaults to :data:`None`).
The only partitioning type that is currently supported is
:attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`.
"""
warnings.warn(
"This method will be deprecated in future versions. Please use "
"Table.time_partitioning.type_ instead.",
PendingDeprecationWarning,
stacklevel=2,
)
if self.time_partitioning is not None:
return self.time_partitioning.type_
@partitioning_type.setter
def partitioning_type(self, value):
warnings.warn(
"This method will be deprecated in future versions. Please use "
"Table.time_partitioning.type_ instead.",
PendingDeprecationWarning,
stacklevel=2,
)
if self.time_partitioning is None:
self._properties["timePartitioning"] = {}
self._properties["timePartitioning"]["type"] = value
@property
def partition_expiration(self):
"""Union[int, None]: Expiration time in milliseconds for a partition.
If :attr:`partition_expiration` is set and :attr:`type_` is
not set, :attr:`type_` will default to
:attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`.
"""
warnings.warn(
"This method will be deprecated in future versions. Please use "
"Table.time_partitioning.expiration_ms instead.",
PendingDeprecationWarning,
stacklevel=2,
)
if self.time_partitioning is not None:
return self.time_partitioning.expiration_ms
@partition_expiration.setter
def partition_expiration(self, value):
warnings.warn(
"This method will be deprecated in future versions. Please use "
"Table.time_partitioning.expiration_ms instead.",
PendingDeprecationWarning,
stacklevel=2,
)
if self.time_partitioning is None:
self._properties["timePartitioning"] = {"type": TimePartitioningType.DAY}
self._properties["timePartitioning"]["expirationMs"] = str(value)
@property
def clustering_fields(self):
"""Union[List[str], None]: Fields defining clustering for the table
(Defaults to :data:`None`).
Clustering fields are immutable after table creation.
.. note::
As of 2018-06-29, clustering fields cannot be set on a table
which does not also have time partioning defined.
"""
prop = self._properties.get("clustering")
if prop is not None:
return list(prop.get("fields", ()))
@clustering_fields.setter
def clustering_fields(self, value):
"""Union[List[str], None]: Fields defining clustering for the table
(Defaults to :data:`None`).
"""
if value is not None:
prop = self._properties.setdefault("clustering", {})
prop["fields"] = value
else:
if "clustering" in self._properties:
del self._properties["clustering"]
@property
def description(self):
"""Union[str, None]: Description of the table (defaults to
:data:`None`).
Raises:
ValueError: For invalid value types.
"""
return self._properties.get("description")
@description.setter
def description(self, value):
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties["description"] = value
@property
def expires(self):
"""Union[datetime.datetime, None]: Datetime at which the table will be
deleted.
Raises:
ValueError: For invalid value types.
"""
expiration_time = self._properties.get("expirationTime")
if expiration_time is not None:
# expiration_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(expiration_time)
)
@expires.setter
def expires(self, value):
if not isinstance(value, datetime.datetime) and value is not None:
raise ValueError("Pass a datetime, or None")
value_ms = google.cloud._helpers._millis_from_datetime(value)
self._properties["expirationTime"] = _helpers._str_or_none(value_ms)
@property
def friendly_name(self):
"""Union[str, None]: Title of the table (defaults to :data:`None`).
Raises:
ValueError: For invalid value types.
"""
return self._properties.get("friendlyName")
@friendly_name.setter
def friendly_name(self, value):
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties["friendlyName"] = value
@property
def location(self):
"""Union[str, None]: Location in which the table is hosted
Defaults to :data:`None`.
"""
return self._properties.get("location")
@property
def view_query(self):
"""Union[str, None]: SQL query defining the table as a view (defaults
to :data:`None`).
By default, the query is treated as Standard SQL. To use Legacy
SQL, set :attr:`view_use_legacy_sql` to :data:`True`.
Raises:
ValueError: For invalid value types.
"""
view = self._properties.get("view")
if view is not None:
return view.get("query")
@view_query.setter
def view_query(self, value):
if not isinstance(value, six.string_types):
raise ValueError("Pass a string")
view = self._properties.get("view")
if view is None:
view = self._properties["view"] = {}
view["query"] = value
# The service defaults useLegacySql to True, but this
# client uses Standard SQL by default.
if view.get("useLegacySql") is None:
view["useLegacySql"] = False
@view_query.deleter
def view_query(self):
"""Delete SQL query defining the table as a view."""
self._properties.pop("view", None)
view_use_legacy_sql = property(_view_use_legacy_sql_getter)
@view_use_legacy_sql.setter
def view_use_legacy_sql(self, value):
if not isinstance(value, bool):
raise ValueError("Pass a boolean")
if self._properties.get("view") is None:
self._properties["view"] = {}
self._properties["view"]["useLegacySql"] = value
@property
def streaming_buffer(self):
"""google.cloud.bigquery.StreamingBuffer: Information about a table's
streaming buffer.
"""
sb = self._properties.get("streamingBuffer")
if sb is not None:
return StreamingBuffer(sb)
@property
def external_data_configuration(self):
"""Union[google.cloud.bigquery.ExternalConfig, None]: Configuration for
an external data source (defaults to :data:`None`).
Raises:
ValueError: For invalid value types.
"""
prop = self._properties.get("externalDataConfiguration")
if prop is not None:
prop = ExternalConfig.from_api_repr(prop)
return prop
@external_data_configuration.setter
def external_data_configuration(self, value):
if not (value is None or isinstance(value, ExternalConfig)):
raise ValueError("Pass an ExternalConfig or None")
api_repr = value
if value is not None:
api_repr = value.to_api_repr()
self._properties["externalDataConfiguration"] = api_repr
@classmethod
def from_string(cls, full_table_id):
"""Construct a table from fully-qualified table ID.
Args:
full_table_id (str):
A fully-qualified table ID in standard SQL format. Must
included a project ID, dataset ID, and table ID, each
separated by ``.``.
Returns:
Table: Table parsed from ``full_table_id``.
Examples:
>>> Table.from_string('my-project.mydataset.mytable')
Table(TableRef...(D...('my-project', 'mydataset'), 'mytable'))
Raises:
ValueError:
If ``full_table_id`` is not a fully-qualified table ID in
standard SQL format.
"""
return cls(TableReference.from_string(full_table_id))
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct a table given its API representation
Args:
resource (Dict[str, object]):
Table resource representation from the API
dataset (google.cloud.bigquery.dataset.Dataset):
The dataset containing the table.
Returns:
google.cloud.bigquery.table.Table: Table parsed from ``resource``.
Raises:
KeyError:
If the ``resource`` lacks the key ``'tableReference'``, or if
the ``dict`` stored within the key ``'tableReference'`` lacks
the keys ``'tableId'``, ``'projectId'``, or ``'datasetId'``.
"""
from google.cloud.bigquery import dataset
if (
"tableReference" not in resource
or "tableId" not in resource["tableReference"]
):
raise KeyError(
"Resource lacks required identity information:"
'["tableReference"]["tableId"]'
)
project_id = resource["tableReference"]["projectId"]
table_id = resource["tableReference"]["tableId"]
dataset_id = resource["tableReference"]["datasetId"]
dataset_ref = dataset.DatasetReference(project_id, dataset_id)
table = cls(dataset_ref.table(table_id))
table._properties = resource
return table
def to_api_repr(self):
"""Constructs the API resource of this table
Returns:
Dict[str, object]: Table represented as an API resource
"""
return copy.deepcopy(self._properties)
def _build_resource(self, filter_fields):
"""Generate a resource for ``update``."""
partial = {}
for filter_field in filter_fields:
api_field = self._PROPERTY_TO_API_FIELD.get(filter_field)
if api_field is None and filter_field not in self._properties:
raise ValueError("No Table property %s" % filter_field)
elif api_field is not None:
partial[api_field] = self._properties.get(api_field)
else:
# allows properties that are not defined in the library
# and properties that have the same name as API resource key
partial[filter_field] = self._properties[filter_field]
return partial
def __repr__(self):
return "Table({})".format(repr(self.reference))
class TableListItem(object):
"""A read-only table resource from a list operation.
For performance reasons, the BigQuery API only includes some of the table
properties when listing tables. Notably,
:attr:`~google.cloud.bigquery.table.Table.schema` and
:attr:`~google.cloud.bigquery.table.Table.num_rows` are missing.
For a full list of the properties that the BigQuery API returns, see the
`REST documentation for tables.list
<https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list>`_.
Args:
resource (Dict[str, object]):
A table-like resource object from a table list response. A
``tableReference`` property is required.
Raises:
ValueError:
If ``tableReference`` or one of its required members is missing
from ``resource``.
"""
def __init__(self, resource):
if "tableReference" not in resource:
raise ValueError("resource must contain a tableReference value")
if "projectId" not in resource["tableReference"]:
raise ValueError(
"resource['tableReference'] must contain a projectId value"
)
if "datasetId" not in resource["tableReference"]:
raise ValueError(
"resource['tableReference'] must contain a datasetId value"
)
if "tableId" not in resource["tableReference"]:
raise ValueError("resource['tableReference'] must contain a tableId value")
self._properties = resource
@property
def project(self):
"""str: Project bound to the table."""
return self._properties["tableReference"]["projectId"]
@property
def dataset_id(self):
"""str: ID of dataset containing the table."""
return self._properties["tableReference"]["datasetId"]
@property
def table_id(self):
"""str: ID of the table."""
return self._properties["tableReference"]["tableId"]
reference = property(_reference_getter)
@property
def labels(self):
"""Dict[str, str]: Labels for the table.
This method always returns a dict. To change a table's labels,
modify the dict, then call ``Client.update_table``. To delete a
label, set its value to :data:`None` before updating.
"""
return self._properties.setdefault("labels", {})
@property
def full_table_id(self):
"""Union[str, None]: ID for the table (:data:`None` until set from the
server).
In the format ``project_id:dataset_id.table_id``.
"""
return self._properties.get("id")
@property
def table_type(self):
"""Union[str, None]: The type of the table (:data:`None` until set from
the server).
Possible values are ``'TABLE'``, ``'VIEW'``, or ``'EXTERNAL'``.
"""
return self._properties.get("type")
@property
def time_partitioning(self):
"""google.cloud.bigquery.table.TimePartitioning: Configures time-based
partitioning for a table.
"""
prop = self._properties.get("timePartitioning")
if prop is not None:
return TimePartitioning.from_api_repr(prop)
@property
def partitioning_type(self):
"""Union[str, None]: Time partitioning of the table if it is
partitioned (Defaults to :data:`None`).
"""
warnings.warn(
"This method will be deprecated in future versions. Please use "
"TableListItem.time_partitioning.type_ instead.",
PendingDeprecationWarning,
stacklevel=2,
)
if self.time_partitioning is not None:
return self.time_partitioning.type_
@property
def partition_expiration(self):
"""Union[int, None]: Expiration time in milliseconds for a partition.
If this property is set and :attr:`type_` is not set, :attr:`type_`
will default to :attr:`TimePartitioningType.DAY`.
"""
warnings.warn(
"This method will be deprecated in future versions. Please use "
"TableListItem.time_partitioning.expiration_ms instead.",
PendingDeprecationWarning,
stacklevel=2,
)
if self.time_partitioning is not None:
return self.time_partitioning.expiration_ms
@property
def friendly_name(self):
"""Union[str, None]: Title of the table (defaults to :data:`None`)."""
return self._properties.get("friendlyName")
view_use_legacy_sql = property(_view_use_legacy_sql_getter)
def _row_from_mapping(mapping, schema):
"""Convert a mapping to a row tuple using the schema.
Args:
mapping (Dict[str, object])
Mapping of row data: must contain keys for all required fields in
the schema. Keys which do not correspond to a field in the schema
are ignored.
schema (List[google.cloud.bigquery.schema.SchemaField]):
The schema of the table destination for the rows
Returns:
Tuple[object]:
Tuple whose elements are ordered according to the schema.
Raises:
ValueError: If schema is empty.
"""
if len(schema) == 0:
raise ValueError(_TABLE_HAS_NO_SCHEMA)
row = []
for field in schema:
if field.mode == "REQUIRED":
row.append(mapping[field.name])
elif field.mode == "REPEATED":
row.append(mapping.get(field.name, ()))
elif field.mode == "NULLABLE":
row.append(mapping.get(field.name))
else:
raise ValueError("Unknown field mode: {}".format(field.mode))
return tuple(row)
class StreamingBuffer(object):
"""Information about a table's streaming buffer.
See https://cloud.google.com/bigquery/streaming-data-into-bigquery.
Args:
resource (Dict[str, object]):
streaming buffer representation returned from the API
"""
def __init__(self, resource):
self.estimated_bytes = int(resource["estimatedBytes"])
self.estimated_rows = int(resource["estimatedRows"])
# time is in milliseconds since the epoch.
self.oldest_entry_time = google.cloud._helpers._datetime_from_microseconds(
1000.0 * int(resource["oldestEntryTime"])
)
class Row(object):
"""A BigQuery row.
Values can be accessed by position (index), by key like a dict,
or as properties.
Args:
values (Sequence[object]): The row values
field_to_index (Dict[str, int]):
A mapping from schema field names to indexes
"""
# Choose unusual field names to try to avoid conflict with schema fields.
__slots__ = ("_xxx_values", "_xxx_field_to_index")
def __init__(self, values, field_to_index):
self._xxx_values = values
self._xxx_field_to_index = field_to_index
def values(self):
"""Return the values included in this row.
Returns:
Sequence[object]: A sequence of length ``len(row)``.
"""
return copy.deepcopy(self._xxx_values)
def keys(self):
"""Return the keys for using a row as a dict.
Returns:
Iterable[str]: The keys corresponding to the columns of a row
Examples:
>>> list(Row(('a', 'b'), {'x': 0, 'y': 1}).keys())
['x', 'y']
"""
return six.iterkeys(self._xxx_field_to_index)
def items(self):
"""Return items as ``(key, value)`` pairs.
Returns:
Iterable[Tuple[str, object]]:
The ``(key, value)`` pairs representing this row.
Examples:
>>> list(Row(('a', 'b'), {'x': 0, 'y': 1}).items())
[('x', 'a'), ('y', 'b')]
"""
for key, index in six.iteritems(self._xxx_field_to_index):
yield (key, copy.deepcopy(self._xxx_values[index]))
def get(self, key, default=None):
"""Return a value for key, with a default value if it does not exist.
Args:
key (str): The key of the column to access
default (object):
The default value to use if the key does not exist. (Defaults
to :data:`None`.)
Returns:
object:
The value associated with the provided key, or a default value.
Examples:
When the key exists, the value associated with it is returned.
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('x')
'a'
The default value is :data:`None` when the key does not exist.
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z')
None
The default value can be overrided with the ``default`` parameter.
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', '')
''
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', default = '')
''
"""
index = self._xxx_field_to_index.get(key)
if index is None:
return default
return self._xxx_values[index]
def __getattr__(self, name):
value = self._xxx_field_to_index.get(name)
if value is None:
raise AttributeError("no row field {!r}".format(name))
return self._xxx_values[value]
def __len__(self):
return len(self._xxx_values)
def __getitem__(self, key):
if isinstance(key, six.string_types):
value = self._xxx_field_to_index.get(key)
if value is None:
raise KeyError("no row field {!r}".format(key))
key = value
return self._xxx_values[key]
def __eq__(self, other):
if not isinstance(other, Row):
return NotImplemented
return (
self._xxx_values == other._xxx_values
and self._xxx_field_to_index == other._xxx_field_to_index
)
def __ne__(self, other):
return not self == other
def __repr__(self):
# sort field dict by value, for determinism
items = sorted(self._xxx_field_to_index.items(), key=operator.itemgetter(1))
f2i = "{" + ", ".join("%r: %d" % item for item in items) + "}"
return "Row({}, {})".format(self._xxx_values, f2i)
class RowIterator(HTTPIterator):
"""A class for iterating through HTTP/JSON API row list responses.
Args:
client (google.cloud.bigquery.Client): The API client.
api_request (Callable[google.cloud._http.JSONConnection.api_request]):
The function to use to make API requests.
path (str): The method path to query for the list of items.
page_token (str): A token identifying a page in a result set to start
fetching results from.
max_results (int, optional): The maximum number of results to fetch.
page_size (int, optional): The number of items to return per page.
extra_params (Dict[str, object]):
Extra query string parameters for the API call.
"""
def __init__(
self,
client,
api_request,
path,
schema,
page_token=None,
max_results=None,
page_size=None,
extra_params=None,
):
super(RowIterator, self).__init__(
client,
api_request,
path,
item_to_value=_item_to_row,
items_key="rows",
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
page_start=_rows_page_start,
next_token="pageToken",
)
self._schema = schema
self._field_to_index = _helpers._field_to_index_mapping(schema)
self._total_rows = None
self._page_size = page_size
def _get_next_page_response(self):
"""Requests the next page from the path provided.
Returns:
Dict[str, object]:
The parsed JSON response of the next page's contents.
"""
params = self._get_query_params()
if self._page_size is not None:
params["maxResults"] = self._page_size
return self.api_request(
method=self._HTTP_METHOD, path=self.path, query_params=params
)
@property
def schema(self):
"""List[google.cloud.bigquery.schema.SchemaField]: Table's schema."""
return list(self._schema)
@property
def total_rows(self):
"""int: The total number of rows in the table."""
return self._total_rows
def to_dataframe(self):
"""Create a pandas DataFrame from the query results.
Returns:
pandas.DataFrame:
A :class:`~pandas.DataFrame` populated with row data and column
headers from the query results. The column headers are derived
from the destination table's schema.
Raises:
ValueError: If the :mod:`pandas` library cannot be imported.
"""
if pandas is None:
raise ValueError(_NO_PANDAS_ERROR)
column_headers = [field.name for field in self.schema]
# Use generator, rather than pulling the whole rowset into memory.
rows = (row.values() for row in iter(self))
return pandas.DataFrame(rows, columns=column_headers)
class _EmptyRowIterator(object):
"""An empty row iterator.
This class prevents API requests when there are no rows to fetch or rows
are impossible to fetch, such as with query results for DDL CREATE VIEW
statements.
"""
schema = ()
pages = ()
total_rows = 0
def to_dataframe(self):
if pandas is None:
raise ValueError(_NO_PANDAS_ERROR)
return pandas.DataFrame()
def __iter__(self):
return iter(())
class TimePartitioningType(object):
"""Specifies the type of time partitioning to perform."""
DAY = "DAY"
"""str: Generates one partition per day."""
class TimePartitioning(object):
"""Configures time-based partitioning for a table.
Args:
type_ (google.cloud.bigquery.table.TimePartitioningType, optional):
Specifies the type of time partitioning to perform. Defaults to
:attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`,
which is the only currently supported type.
field (str, optional):
If set, the table is partitioned by this field. If not set, the
table is partitioned by pseudo column ``_PARTITIONTIME``. The field
must be a top-level ``TIMESTAMP`` or ``DATE`` field. Its mode must
be ``NULLABLE`` or ``REQUIRED``.
expiration_ms(int, optional):
Number of milliseconds for which to keep the storage for a
partition.
require_partition_filter (bool, optional):
If set to true, queries over the partitioned table require a
partition filter that can be used for partition elimination to be
specified.
"""
def __init__(
self, type_=None, field=None, expiration_ms=None, require_partition_filter=None
):
self._properties = {}
if type_ is None:
self.type_ = TimePartitioningType.DAY
else:
self.type_ = type_
if field is not None:
self.field = field
if expiration_ms is not None:
self.expiration_ms = expiration_ms
if require_partition_filter is not None:
self.require_partition_filter = require_partition_filter
@property
def type_(self):
"""google.cloud.bigquery.table.TimePartitioningType: The type of time
partitioning to use.
"""
return self._properties["type"]
@type_.setter
def type_(self, value):
self._properties["type"] = value
@property
def field(self):
"""str: Field in the table to use for partitioning"""
return self._properties.get("field")
@field.setter
def field(self, value):
self._properties["field"] = value
@property
def expiration_ms(self):
"""int: Number of milliseconds to keep the storage for a partition."""
return _helpers._int_or_none(self._properties.get("expirationMs"))
@expiration_ms.setter
def expiration_ms(self, value):
if value is not None:
# Allow explicitly setting the expiration to None.
value = str(value)
self._properties["expirationMs"] = value
@property
def require_partition_filter(self):
"""bool: Specifies whether partition filters are required for queries
"""
return self._properties.get("requirePartitionFilter")
@require_partition_filter.setter
def require_partition_filter(self, value):
self._properties["requirePartitionFilter"] = value
@classmethod
def from_api_repr(cls, api_repr):
"""Return a :class:`TimePartitioning` object deserialized from a dict.
This method creates a new ``TimePartitioning`` instance that points to
the ``api_repr`` parameter as its internal properties dict. This means
that when a ``TimePartitioning`` instance is stored as a property of
another object, any changes made at the higher level will also appear
here::
>>> time_partitioning = TimePartitioning()
>>> table.time_partitioning = time_partitioning
>>> table.time_partitioning.field = 'timecolumn'
>>> time_partitioning.field
'timecolumn'
Args:
api_repr (Mapping[str, str]):
The serialized representation of the TimePartitioning, such as
what is output by :meth:`to_api_repr`.
Returns:
google.cloud.bigquery.table.TimePartitioning:
The ``TimePartitioning`` object.
"""
instance = cls(api_repr["type"])
instance._properties = api_repr
return instance
def to_api_repr(self):
"""Return a dictionary representing this object.
This method returns the properties dict of the ``TimePartitioning``
instance rather than making a copy. This means that when a
``TimePartitioning`` instance is stored as a property of another
object, any changes made at the higher level will also appear here.
Returns:
dict:
A dictionary representing the TimePartitioning object in
serialized form.
"""
return self._properties
def _key(self):
return tuple(sorted(self._properties.items()))
def __eq__(self, other):
if not isinstance(other, TimePartitioning):
return NotImplemented
return self._key() == other._key()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._key())
def __repr__(self):
key_vals = ["{}={}".format(key, val) for key, val in self._key()]
return "TimePartitioning({})".format(",".join(key_vals))
def _item_to_row(iterator, resource):
"""Convert a JSON row to the native object.
.. note::
This assumes that the ``schema`` attribute has been
added to the iterator after being created, which
should be done by the caller.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a row.
:rtype: :class:`~google.cloud.bigquery.table.Row`
:returns: The next row in the page.
"""
return Row(
_helpers._row_tuple_from_json(resource, iterator.schema),
iterator._field_to_index,
)
# pylint: disable=unused-argument
def _rows_page_start(iterator, page, response):
"""Grab total rows when :class:`~google.cloud.iterator.Page` starts.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type page: :class:`~google.api_core.page_iterator.Page`
:param page: The page that was just created.
:type response: dict
:param response: The JSON API response for a page of rows in a table.
"""
total_rows = response.get("totalRows")
if total_rows is not None:
total_rows = int(total_rows)
iterator._total_rows = total_rows
# pylint: enable=unused-argument
| apache-2.0 |
ajmendez/batlog2 | bin/plot_batlog.py | 1 | 3345 | #!/usr/bin/env python
import json
import pylab
import numpy as np
from datetime import datetime
from pysurvey.plot import setup, dateticks, hist, minmax, embiggen
from parse_batlog import OUTFILENAME as FILENAME
from matplotlib.dates import date2num, num2date
def make():
tag = 'Voltage'
tag = 'CurrentCapacity'
data = json.load(open(FILENAME,'r'))
date = date2num([datetime.fromtimestamp(x['date']) for x in data if len(x) > 1])
amp = [x[tag] for x in data if len(x) > 1]
damp = np.diff(amp)
ddate = date[:-1] + np.diff(date)/2.0
uii = np.where(damp > 2)
dii = np.where(damp < -2)
print len(data)
plot_params = dict(
marker = 's',
markersize=2,
alpha=0.2,
linestyle='_',
markeredgewidth=0,
markeredgecolor='none',
)
xr = embiggen(minmax(date), 0.02, 'both')
yr = [0,7000]
dyr = [-80,80]
if np.max(amp) > 7000:
yr = [0,10000]
dyr = [-100,100]
setup(figsize=(16,8))
setup(subplt=(2,3,1), autoticks=True,
title='Last Recorded: {}'.format(num2date(date[-1])),
xlabel='Date', xr=xr,
ylabel=tag, yr=yr)
pylab.plot(date, amp, **plot_params)
dateticks('%Y-%m-%d')
tmp = date % 7.0
setup(subplt=(2,3,2), autoticks=True,
title='Current Date: {}'.format(datetime.now()),
xlabel='Day of Week',
xtickv=np.arange(7), xr=[-0.2,6.2],
xticknames='sun mon tue wed thur fri sat'.split(),
ylabel=tag, yr=yr)
pylab.plot(tmp, amp, **plot_params)
tmp = (date % 1.0) * 24.0
setup(subplt=(2,3,3), autoticks=True,
xlabel='Hour of Day', xr=[-0.4,24.4],
xtickv=np.arange(0,25,4), xtickrotate=dict(rotation=90, ha='center'),
xticknames='mid 4am 8am noon 4pm 8pm mid'.split(),
ylabel=tag, yr=yr)
pylab.plot(tmp, amp, **plot_params)
# dateticks('%Y-%m-%d')
setup(subplt=(2,3,4), autoticks=True,
xlabel='Date', xr=xr,
ylabel='Delta', yr=dyr)
pylab.plot(ddate, damp, **plot_params)
dateticks('%Y-%m-%d', ha='center')
tmp = (date % 7.0)[:-1]
setup(subplt=(2,3,5), autoticks=True,
xlabel='Day of Week',
xtickv=np.arange(7), xr=[-0.2,6.2],
xticknames='sun mon tue wed thur fri sat'.split(),
ylabel=tag, yr=dyr)
pylab.plot(tmp, damp, **plot_params)
hist(tmp[uii], np.linspace(0,7,90), alpha=0.5, norm=dyr[1], filled=True)
hist(tmp[dii], np.linspace(0,7,90), alpha=0.5, norm=dyr[0], filled=True)
tmp = ((date % 1.0) * 24.0)[:-1]
setup(subplt=(2,3,6), autoticks=True,
xlabel='Hour of Day', xr=[-0.4,24.4],
xtickv=np.arange(0,25,4), xtickrotate=dict(rotation=90, ha='center'),
xticknames='mid 4am 8am noon 4pm 8pm mid'.split(),
ylabel=tag, yr=dyr)
pylab.plot(tmp, damp, **plot_params)
hist(tmp[uii], np.linspace(0,24,50), alpha=0.5, norm=dyr[1], filled=True)
hist(tmp[dii], np.linspace(0,24,50), alpha=0.5, norm=dyr[0], filled=True)
# dateticks('%Y-%m-%d')
# pylab.tight_layout()
setup(hspace=0, wspace=0)
pylab.show()
if __name__ == '__main__':
from pysurvey import util
util.setup_stop()
make()
| bsd-2-clause |
joshloyal/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 13 | 26703 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.datasets import make_regression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.utils import check_random_state
from sklearn.datasets import make_multilabel_classification
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_regression_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
def test_ridge_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
param_grid = product((1.0, 1e-2), (True, False),
('svd', 'cholesky', 'lsqr', 'sparse_cg'))
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for (alpha, intercept, solver) in param_grid:
# Ridge with explicit sample_weight
est = Ridge(alpha=alpha, fit_intercept=intercept, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
coefs = est.coef_
inter = est.intercept_
# Closed form of the weighted regularized least square
# theta = (X^T W X + alpha I)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
I = np.eye(n_features)
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
I = np.eye(n_features + 1)
I[0, 0] = 0
cf_coefs = linalg.solve(X_aug.T.dot(W).dot(X_aug) + alpha * I,
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs, cf_coefs)
else:
assert_array_almost_equal(coefs, cf_coefs[1:])
assert_almost_equal(inter, cf_coefs[0])
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
reg = Ridge(alpha=0.0)
reg.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(reg.predict(X_test), [1., 2, 3, 4])
assert_equal(len(reg.coef_.shape), 1)
assert_equal(type(reg.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
reg.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(reg.coef_.shape), 2)
assert_equal(type(reg.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
fit_intercept = filter_ == DENSE_FILTER
if fit_intercept:
X_diabetes_ = X_diabetes - X_diabetes.mean(0)
else:
X_diabetes_ = X_diabetes
ridge_gcv = _RidgeGCV(fit_intercept=fit_intercept)
ridge = Ridge(alpha=1.0, fit_intercept=fit_intercept)
# because fit_intercept is applied
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes_, y_diabetes, fit_intercept)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes_[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes_[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes_, y_diabetes, fit_intercept)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('neg_mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for reg in (RidgeClassifier(), RidgeClassifierCV()):
reg.fit(filter_(X_iris), y_iris)
assert_equal(reg.coef_.shape, (n_classes, n_features))
y_pred = reg.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
cv = KFold(5)
reg = RidgeClassifierCV(cv=cv)
reg.fit(filter_(X_iris), y_iris)
y_pred = reg.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5, fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3, fit_intercept=False)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def check_dense_sparse(test_func):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
yield check_dense_sparse, test_func
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd', fit_intercept=False)
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
reg = RidgeClassifier(class_weight=None)
reg.fit(X, y)
assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
reg = RidgeClassifier(class_weight={1: 0.001})
reg.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
reg = RidgeClassifier(class_weight='balanced')
reg.fit(X, y)
assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
reg = RidgeClassifier(class_weight=None)
reg.fit(X, y)
rega = RidgeClassifier(class_weight='balanced')
rega.fit(X, y)
assert_equal(len(rega.classes_), 2)
assert_array_almost_equal(reg.coef_, rega.coef_)
assert_array_almost_equal(reg.intercept_, rega.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for reg in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
reg1 = reg()
reg1.fit(iris.data, iris.target)
reg2 = reg(class_weight='balanced')
reg2.fit(iris.data, iris.target)
assert_almost_equal(reg1.coef_, reg2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
reg1 = reg()
reg1.fit(iris.data, iris.target, sample_weight)
reg2 = reg(class_weight=class_weight)
reg2.fit(iris.data, iris.target)
assert_almost_equal(reg1.coef_, reg2.coef_)
# Check that sample_weight and class_weight are multiplicative
reg1 = reg()
reg1.fit(iris.data, iris.target, sample_weight ** 2)
reg2 = reg(class_weight=class_weight)
reg2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(reg1.coef_, reg2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
reg = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
reg.fit(X, y)
# we give a small weights to class 1
reg = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
reg.fit(X, y)
assert_array_equal(reg.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
cv = KFold(5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
gs = GridSearchCV(Ridge(), parameters, cv=cv)
gs.fit(X, y, sample_weight=sample_weight)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
def test_ridge_fit_intercept_sparse():
X, y = make_regression(n_samples=1000, n_features=2, n_informative=2,
bias=10., random_state=42)
X_csr = sp.csr_matrix(X)
dense = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
sparse = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
dense.fit(X, y)
sparse.fit(X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
# test the solver switch and the corresponding warning
sparse = Ridge(alpha=1., tol=1.e-15, solver='lsqr', fit_intercept=True)
assert_warns(UserWarning, sparse.fit, X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
def test_errors_and_values_helper():
ridgecv = _RidgeGCV()
rng = check_random_state(42)
alpha = 1.
n = 5
y = rng.randn(n)
v = rng.randn(n)
Q = rng.randn(len(v), len(v))
QT_y = Q.T.dot(y)
G_diag, c = ridgecv._errors_and_values_helper(alpha, y, v, Q, QT_y)
# test that helper function behaves as expected
out, c_ = ridgecv._errors(alpha, y, v, Q, QT_y)
np.testing.assert_array_equal(out, (c / G_diag) ** 2)
np.testing.assert_array_equal(c, c)
out, c_ = ridgecv._values(alpha, y, v, Q, QT_y)
np.testing.assert_array_equal(out, y - (c / G_diag))
np.testing.assert_array_equal(c_, c)
def test_errors_and_values_svd_helper():
ridgecv = _RidgeGCV()
rng = check_random_state(42)
alpha = 1.
for n, p in zip((5, 10), (12, 6)):
y = rng.randn(n)
v = rng.randn(p)
U = rng.randn(n, p)
UT_y = U.T.dot(y)
G_diag, c = ridgecv._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
# test that helper function behaves as expected
out, c_ = ridgecv._errors_svd(alpha, y, v, U, UT_y)
np.testing.assert_array_equal(out, (c / G_diag) ** 2)
np.testing.assert_array_equal(c, c)
out, c_ = ridgecv._values_svd(alpha, y, v, U, UT_y)
np.testing.assert_array_equal(out, y - (c / G_diag))
np.testing.assert_array_equal(c_, c)
def test_ridge_classifier_no_support_multilabel():
X, y = make_multilabel_classification(n_samples=10, random_state=0)
assert_raises(ValueError, RidgeClassifier().fit, X, y)
| bsd-3-clause |
alvarofierroclavero/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
jdherman/SALib | docs/conf.py | 2 | 9326 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import inspect
import shutil
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# Support markdown
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, '../src'))
# -- Run sphinx-apidoc ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/SALib")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
from pkg_resources import parse_version
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if parse_version(sphinx.__version__) >= parse_version('1.7'):
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SALib'
copyright = u'2019, Jon Herman, Will Usher and others'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'sidebar_width': '300px',
'page_width': '1200px'
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from SALib import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'salib-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'SALib Documentation',
u'Jon Herman, Will Usher and others', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/stable', None),
'python': ('https://docs.python.org/' + python_version, None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
}
| mit |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/tests/sparse/test_groupby.py | 18 | 1736 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestSparseGroupBy(object):
def setup_method(self, method):
self.dense = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8),
'E': [np.nan, np.nan, 1, 2,
np.nan, 1, np.nan, np.nan]})
self.sparse = self.dense.to_sparse()
def test_first_last_nth(self):
# tests for first / last / nth
sparse_grouped = self.sparse.groupby('A')
dense_grouped = self.dense.groupby('A')
tm.assert_frame_equal(sparse_grouped.first(),
dense_grouped.first())
tm.assert_frame_equal(sparse_grouped.last(),
dense_grouped.last())
tm.assert_frame_equal(sparse_grouped.nth(1),
dense_grouped.nth(1))
def test_aggfuncs(self):
sparse_grouped = self.sparse.groupby('A')
dense_grouped = self.dense.groupby('A')
tm.assert_frame_equal(sparse_grouped.mean(),
dense_grouped.mean())
# ToDo: sparse sum includes str column
# tm.assert_frame_equal(sparse_grouped.sum(),
# dense_grouped.sum())
tm.assert_frame_equal(sparse_grouped.count(),
dense_grouped.count())
| mit |
wjlei1990/pycmt3d | src/pycmt3d/plot_util.py | 1 | 22974 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Plot utils
:copyright:
Wenjie Lei ([email protected]), 2016
:license:
GNU Lesser General Public License, version 3 (LGPLv3)
(http://www.gnu.org/licenses/lgpl-3.0.en.html)
"""
from __future__ import print_function, division, absolute_import
import os
from collections import defaultdict
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.patches import Rectangle
from obspy.geodetics import gps2dist_azimuth
from obspy.imaging.beachball import beach
from . import logger
from .util import get_cmt_par, get_trwin_tag
from .measure import _envelope
# earth half circle
EARTH_HC, _, _ = gps2dist_azimuth(0, 0, 0, 180)
def _plot_new_seismogram_sub(trwin, outputdir, cmtsource, figure_format):
obsd = trwin.datalist['obsd']
synt = trwin.datalist['synt']
new_synt = trwin.datalist['new_synt']
station = obsd.stats.station
network = obsd.stats.network
channel = obsd.stats.channel
location = obsd.stats.location
outputfig = os.path.join(outputdir, "%s.%s.%s.%s.%s" % (
network, station, location, channel, figure_format))
if cmtsource is None:
offset = 0
else:
offset = obsd.stats.starttime - cmtsource.cmt_time
times = [offset + obsd.stats.delta*i for i in range(obsd.stats.npts)]
fig = plt.figure(figsize=(15, 5))
# plot seismogram
plt.subplot(211)
plt.plot(times, obsd.data, color="black", linewidth=0.5, alpha=0.6,
label="obsd")
plt.plot(times, synt.data, color="red", linewidth=0.8,
label="synt")
plt.plot(times, new_synt.data, color="green", linewidth=0.8,
label="new synt")
plt.xlim(times[0], times[-1])
xlim1 = plt.xlim()[1]
ylim1 = plt.ylim()[1]
fontsize = 9
plt.text(0.01*xlim1, 0.80*ylim1, "Network: %2s Station: %s" %
(network, station), fontsize=fontsize)
plt.text(0.01*xlim1, 0.65*ylim1, "Location: %2s Channel:%3s" %
(location, channel), fontsize=fontsize)
for win in trwin.windows:
left = win[0] + offset
right = win[1] + offset
re = Rectangle((left, plt.ylim()[0]), right - left,
plt.ylim()[1] - plt.ylim()[0], color="blue",
alpha=0.25)
plt.gca().add_patch(re)
# plot envelope
plt.subplot(212)
plt.plot(times, _envelope(obsd.data), color="black", linewidth=0.5,
alpha=0.6, label="obsd")
plt.plot(times, _envelope(synt.data), color="red", linewidth=0.8,
label="synt")
plt.plot(times, _envelope(new_synt.data), color="green", linewidth=0.8,
label="new synt")
plt.xlim(times[0], times[-1])
for win in trwin.windows:
left = win[0] + offset
right = win[1] + offset
re = Rectangle((left, plt.ylim()[0]), right - left,
plt.ylim()[1] - plt.ylim()[0], color="blue",
alpha=0.25)
plt.gca().add_patch(re)
logger.info("output figname: %s" % outputfig)
plt.legend(prop={'size': 6})
plt.savefig(outputfig)
plt.close(fig)
def plot_seismograms(data_container, outputdir, cmtsource=None,
figure_format="png"):
"""
Plot the new synthetic and old synthetic data together with data.
So we can see the how the seimogram changes after inversion.
"""
# make a check
if 'new_synt' not in data_container.trwins[0].datalist.keys():
return "New synt not generated...Can't plot"
if not os.path.exists(outputdir):
os.makedirs(outputdir)
logger.info("Plotting observed, synthetics and windows to dir: %s"
% outputdir)
for trwin in data_container:
_plot_new_seismogram_sub(trwin, outputdir, cmtsource,
figure_format)
class PlotStats(object):
""" plot histogram utils"""
def __init__(self, data_container, metas, outputfn):
self.data_container = data_container
self.metas = metas
self.outputfn = outputfn
self.metas_sort = None
self.key_map = None
def sort_metas(self):
metas_sort = defaultdict(list)
key_map = defaultdict(set)
""" sort metas into different categories for future plotting """
for trwin, meta in zip(self.data_container, self.metas):
comp = trwin.channel
cat_name = get_trwin_tag(trwin)
key_map[comp].add(cat_name)
metas_sort[cat_name].append(meta)
self.metas_sort = metas_sort
self.key_map = key_map
@staticmethod
def plot_stats_histogram_one_entry(pos, cat, vtype, data_b, data_a,
num_bin):
plt.subplot(pos)
plt.xlabel(vtype, fontsize=15)
plt.ylabel(cat, fontsize=15)
if vtype == "cc":
ax_min = min(min(data_b), min(data_a))
ax_max = max(max(data_b), max(data_a))
elif vtype == "chi":
ax_min = 0.0
ax_max = max(max(data_b), max(data_a))
else:
ax_min = min(min(data_b), min(data_a))
ax_max = max(max(data_b), max(data_a))
abs_max = max(abs(ax_min), abs(ax_max))
ax_min = -abs_max
ax_max = abs_max
binwidth = (ax_max - ax_min) / num_bin
plt.hist(
data_b, bins=np.arange(ax_min, ax_max+binwidth/2., binwidth),
facecolor='blue', alpha=0.3)
plt.hist(
data_a, bins=np.arange(ax_min, ax_max+binwidth/2., binwidth),
facecolor='green', alpha=0.5)
def extract_metadata(self, cat_name, meta_varname):
data_old = []
data_new = []
cat_data = self.metas_sort[cat_name]
for meta in cat_data:
data_old.extend(meta.prov["synt"][meta_varname])
data_new.extend(meta.prov["new_synt"][meta_varname])
return data_old, data_new
def plot_stats_histogram_one_category(
self, G, irow, cat_name, vtype_list, num_bins, vtype_dict):
for var_idx, varname in enumerate(vtype_list):
meta_varname = vtype_dict[varname]
data_before, data_after = \
self.extract_metadata(cat_name, meta_varname)
self.plot_stats_histogram_one_entry(
G[irow, var_idx], cat_name, varname, data_before, data_after,
num_bins[var_idx])
def plot_stats_histogram(self):
"""
Plot histogram of tshift, cc, power, cc_amplitude_ratio,
waveform misfit values before and after inversion inside
windows.
:return:
"""
vtype_list = ['time shift', 'cc',
'power_l1_ratio(dB)', 'power_l2_ratio(dB)',
'CC amplitude ratio(dB)', 'chi']
num_bins = [15, 15, 15, 15, 15, 15]
vtype_dict = {'time shift': "tshift", 'cc': "cc",
"power_l1_ratio(dB)": "power_l1",
"power_l2_ratio(dB)": "power_l2",
"CC amplitude ratio(dB)": "cc_amp",
"chi": "chi"}
self.sort_metas()
nrows = len(self.metas_sort.keys())
ncols = len(vtype_list)
plt.figure(figsize=(4*ncols, 4*nrows))
G = gridspec.GridSpec(nrows, ncols)
cat_names = sorted(self.metas_sort.keys())
for irow, cat in enumerate(cat_names):
self.plot_stats_histogram_one_category(
G, irow, cat, vtype_list, num_bins, vtype_dict)
plt.tight_layout()
plt.savefig(self.outputfn)
class PlotInvSummary(object):
def __init__(self, data_container=None, cmtsource=None, config=None,
nregions=12, new_cmtsource=None, bootstrap_mean=None,
bootstrap_std=None, var_reduction=0.0, mode="regional"):
self.data_container = data_container
self.cmtsource = cmtsource
self.trwins = data_container.trwins
self.config = config
self.nregions = nregions
self.new_cmtsource = new_cmtsource
self.bootstrap_mean = bootstrap_mean
self.bootstrap_std = bootstrap_std
self.var_reduction = var_reduction
if mode.lower() not in ["global", "regional"]:
raise ValueError("Plot mode: 1) global; 2) regional")
self.mode = mode.lower()
self.sta_lat = None
self.sta_lon = None
self.sta_dist = []
# azimuth in degree unit
self.sta_azi = []
# azimuth in radius unit
self.sta_theta = []
self.prepare_array()
def prepare_array(self):
# station
self.sta_lat = [window.latitude for window in self.trwins]
self.sta_lon = [window.longitude for window in self.trwins]
for sta_lat, sta_lon in zip(self.sta_lat, self.sta_lon):
dist, az, baz = gps2dist_azimuth(self.cmtsource.latitude,
self.cmtsource.longitude,
sta_lat, sta_lon)
self.sta_azi.append(az)
self.sta_theta.append(az / 180.0 * np.pi)
if self.mode == "regional":
# if regional, then use original distance(in km)
self.sta_dist.append(dist / 1000.0)
elif self.mode == "global":
# if global, then use degree as unit
self.sta_dist.append(dist/EARTH_HC)
def get_azimuth_bin_number(self, azimuth):
"""
Calculate the bin number of a given azimuth
:param azimuth: test test test
:return:
"""
# the azimth ranges from [0,360]
# so a little modification here
daz = 360.0 / self.nregions
k = int(math.floor(azimuth / daz))
if k < 0 or k > self.nregions:
if azimuth - 360.0 < 0.0001:
k = self.nregions - 1
else:
raise ValueError('Error bining azimuth')
return k
def calculate_azimuth_bin(self, azimuth_array):
"""
Calculate the azimuth and sort them into bins
:return:
"""
delta = 2*np.pi/self.nregions
bins = [delta*i for i in range(self.nregions)]
naz_wins = np.zeros(self.nregions)
for azimuth in azimuth_array:
bin_idx = self.get_azimuth_bin_number(azimuth[0])
naz_wins[bin_idx] += azimuth[1]
return bins, naz_wins
@staticmethod
def plot_si_bb(ax, cmt):
# get moment tensor
mt = [cmt.m_rr, cmt.m_tt, cmt.m_pp, cmt.m_rt, cmt.m_rp, cmt.m_tp]
# plot beach ball
b = beach(mt, linewidth=1, xy=(0, 0.6), width=1, size=2,
facecolor='r')
ax.add_collection(b)
# set axis
ax.set_xlim([-1, 1])
ax.set_ylim([-1, 1.5])
ax.set_aspect('equal')
# magnitude
text = "Mw=%4.3f" % cmt.moment_magnitude
plt.text(-0.9, -0.3, text, fontsize=7)
# lat and lon
text = r"lat=%6.3f$^\circ$; lon=%6.3f$^\circ$" \
% (cmt.latitude, cmt.longitude)
plt.text(-0.9, -0.5, text, fontsize=7)
# depth
text = "dep=%6.3f km;" % (cmt.depth_in_m/1000.0)
plt.text(-0.9, -0.7, text, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# title
text = "Init CMT"
plt.text(-0.9, 1.3, text, fontsize=7)
@staticmethod
def plot_si_bb_comp(ax, cmt, cmt_init, tag):
# get moment tensor
mt = [cmt.m_rr, cmt.m_tt, cmt.m_pp, cmt.m_rt, cmt.m_rp, cmt.m_tp]
# plot beach ball
b = beach(mt, linewidth=1, xy=(0, 0.6), width=1, size=2,
facecolor='r')
ax.add_collection(b)
# set axis
ax.set_xlim([-1, 1])
ax.set_ylim([-1, 1.5])
ax.set_aspect('equal')
# magnitude
text = r"$\Delta$Mw=%4.3f" % (
cmt.moment_magnitude-cmt_init.moment_magnitude)
plt.text(-0.9, -0.3, text, fontsize=7)
# lat and lon
text = r"$\Delta$lat=%6.3f$^\circ$; $\Delta$lon=%6.3f$^\circ$" \
% (cmt.latitude-cmt_init.latitude,
cmt.longitude-cmt_init.longitude)
plt.text(-0.9, -0.5, text, fontsize=7)
# depth
text = r"$\Delta$dep=%6.3f km;" % (
(cmt.depth_in_m-cmt_init.depth_in_m)/1000.0)
plt.text(-0.9, -0.7, text, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
text = tag
plt.text(-0.9, 1.3, text, fontsize=7)
def plot_table(self):
par_mean = self.bootstrap_mean
par_std = self.bootstrap_std
std_over_mean = np.zeros(par_mean.shape)
for _i in range(par_mean.shape[0]):
if par_mean[_i] != 0:
std_over_mean[_i] = par_std[_i]/np.abs(par_mean[_i])
else:
std_over_mean[_i] = 0.0
fontsize = 9
incre = 0.06
pos = 1.00
format1 = "%15.4e %15.4e %15.4e %15.4e %10.2f%%"
format2 = "%16.3f %16.3f %20.3f %20.3f %15.2f%%"
format3 = "%15.3f %15.3f %18.3f %18.3f %18.2f%%"
text = "Number of stations: %d Number of widnows: %d" \
% (len(self.sta_lat), self.data_container.nwindows) + \
"Envelope coef: %5.2f"
plt.text(0, pos, text, fontsize=fontsize)
pos -= incre
text = "Number of Parameter:%3d Zero-Trace:%6s" \
" Double-couple:%6s " \
% (self.config.npar, self.config.zero_trace,
self.config.double_couple)
plt.text(0, pos, text, fontsize=fontsize)
pos -= incre
text = "Station Correction:%6s Norm_by_energy:%6s" \
" Norm_by_category:%6s" \
% (self.config.station_correction,
self.config.weight_config.normalize_by_energy,
self.config.weight_config.normalize_by_category)
plt.text(0, pos, text, fontsize=fontsize)
pos -= incre
energy_change = \
(self.new_cmtsource.M0 - self.cmtsource.M0) / self.cmtsource.M0
text = "Inversion Damping:%6.3f Energy Change: %6.2f%%" \
" Variance Reduction: %6.2f%%" \
% (self.config.damping, energy_change*100,
self.var_reduction*100)
plt.text(0, pos, text, fontsize=fontsize)
pos -= incre
text = "*"*20 + " Summary Table " + "="*20
plt.text(0, pos, text, fontsize=fontsize)
pos -= incre
text = " PAR Old_CMT New_CMT Bootstrap_Mean" \
" Bootstrap_STD STD/Mean"
plt.text(0, pos, text, fontsize=fontsize)
pos -= incre
text = "Mrr:" + format1 % (
self.cmtsource.m_rr, self.new_cmtsource.m_rr,
par_mean[0], par_std[0], std_over_mean[0] * 100)
plt.text(0, pos, text, fontsize=fontsize)
text = "Mtt:" + format1 % (
self.cmtsource.m_tt, self.new_cmtsource.m_tt,
par_mean[1], par_std[1], std_over_mean[1] * 100)
pos -= incre
plt.text(0, pos, text, fontsize=fontsize)
text = "Mpp:" + format1 % (
self.cmtsource.m_pp, self.new_cmtsource.m_pp,
par_mean[2], par_std[2], std_over_mean[2] * 100)
pos -= incre
plt.text(0, pos, text, fontsize=fontsize)
text = "Mrt:" + format1 % (
self.cmtsource.m_rt, self.new_cmtsource.m_rt,
par_mean[3], par_std[3], std_over_mean[3] * 100)
pos -= incre
plt.text(0, pos, text, fontsize=fontsize)
text = "Mrp:" + format1 % (
self.cmtsource.m_rp, self.new_cmtsource.m_rp,
par_mean[4], par_std[4], std_over_mean[4] * 100)
pos -= incre
plt.text(0, pos, text, fontsize=fontsize)
text = "Mtp:" + format1 % (
self.cmtsource.m_tp, self.new_cmtsource.m_tp,
par_mean[5], par_std[5], std_over_mean[5] * 100)
pos -= incre
plt.text(0, pos, text, fontsize=fontsize)
text = "DEP: " + format3 % (
self.cmtsource.depth_in_m,
self.new_cmtsource.depth_in_m,
par_mean[6], par_std[6], std_over_mean[6] * 100)
pos -= incre
plt.text(0, pos, text, fontsize=fontsize)
text = "LON:" + format2 % (
self.cmtsource.longitude, self.new_cmtsource.longitude,
par_mean[7], par_std[7], std_over_mean[7] * 100)
pos -= incre
plt.text(0, pos, text, fontsize=fontsize)
text = "LAT: " + format2 % (
self.cmtsource.latitude, self.new_cmtsource.latitude,
par_mean[8], par_std[8], std_over_mean[8] * 100)
pos -= incre
plt.text(0, pos, text, fontsize=fontsize)
text = "CMT: " + format2 % (
self.cmtsource.time_shift, self.new_cmtsource.time_shift,
par_mean[9], par_std[9], std_over_mean[9] * 100)
pos -= incre
plt.text(0, pos, text, fontsize=fontsize)
text = "HDR: " + format2 % (
self.cmtsource.half_duration, self.new_cmtsource.half_duration,
par_mean[10], par_std[10], std_over_mean[10] * 100)
pos -= incre
plt.text(0, pos, text, fontsize=fontsize)
plt.axis('off')
def plot_global_map(self):
"""
Plot global map of event and stations
"""
from mpl_toolkits.basemap import Basemap
# ax = plt.subplot(211)
plt.title(self.cmtsource.eventname)
m = Basemap(projection='cyl', lon_0=0.0, lat_0=0.0,
resolution='c')
m.drawcoastlines()
m.fillcontinents()
m.drawparallels(np.arange(-90., 120., 30.))
m.drawmeridians(np.arange(0., 420., 60.))
m.drawmapboundary()
x, y = m(self.sta_lon, self.sta_lat)
m.scatter(x, y, 30, color="r", marker="^", edgecolor="k",
linewidth='0.3', zorder=3)
cmt_lat = self.cmtsource.latitude
cmt_lon = self.cmtsource.longitude
focmecs = get_cmt_par(self.cmtsource)[:6]
ax = plt.gca()
if self.mode == 'regional':
minlon = min(self.sta_lon)
maxlon = max(self.sta_lon)
minlat = min(self.sta_lat)
maxlat = max(self.sta_lat)
padding = 5.
m.drawparallels(np.arange(-90., 120., padding))
m.drawmeridians(np.arange(0., 420., padding))
ax.set_xlim(minlon-padding, maxlon+padding)
ax.set_ylim(minlat-padding, maxlat+padding)
width_beach = min((maxlon+2*padding-minlon)/(4*padding),
(maxlat+2*padding-minlat)/(4*padding))
else:
width_beach = 20
bb = beach(focmecs, xy=(cmt_lon, cmt_lat),
width=width_beach, linewidth=1, alpha=1.0)
bb.set_zorder(10)
ax.add_collection(bb)
def plot_sta_dist_azi(self):
plt.title("Station Dist and Azi", fontsize=10)
ax = plt.gca()
c = plt.scatter(self.sta_theta, self.sta_dist, marker=u'^', c='r',
s=20, edgecolor='k', linewidth='0.3')
c.set_alpha(0.75)
plt.xticks(fontsize=8)
plt.yticks(fontsize=6)
if self.mode == "regional":
ax.set_rmax(1.10 * max(self.sta_dist))
elif self.mode == "global":
ax.set_rmax(1.0)
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
def plot_sta_azi(self):
# set plt.subplot(***, polar=True)
plt.title("Station Azimuth", fontsize=10)
azimuth_array = []
for azi in self.sta_azi:
azimuth_array.append([azi, 1])
bins, naz = self.calculate_azimuth_bin(azimuth_array)
norm_factor = np.max(naz)
bars = plt.bar(bins, naz, width=(bins[1]-bins[0]), bottom=0.0)
for r, bar in zip(naz, bars):
bar.set_facecolor(plt.cm.jet(r/norm_factor))
bar.set_alpha(0.5)
bar.set_linewidth(0.3)
# ax.set_xticklabels([])
# ax.set_yticklabels([])
plt.xticks(fontsize=8)
plt.yticks(fontsize=6)
ax = plt.gca()
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
def plot_win_azi(self):
# set plt.subplot(***, polar=True)
plt.title("Window Azimuth", fontsize=10)
win_azi = []
for azi, window in zip(self.sta_azi, self.trwins):
win_azi.append([azi, window.nwindows])
bins, naz = self.calculate_azimuth_bin(win_azi)
norm_factor = np.max(naz)
bars = plt.bar(bins, naz, width=(bins[1]-bins[0]), bottom=0.0)
for r, bar in zip(naz, bars):
bar.set_facecolor(plt.cm.jet(r/norm_factor))
bar.set_alpha(0.5)
bar.set_linewidth(0.3)
# ax.set_xticklabels([])
# ax.set_yticklabels([])
plt.xticks(fontsize=8)
plt.yticks(fontsize=6)
ax = plt.gca()
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
def plot_dataset(self, figurename=None):
"""
Plot only the dataset, including global map, station and window
distribution, and beach ball
"""
plt.figure(figsize=(10, 7), facecolor='w', edgecolor='k')
g = gridspec.GridSpec(2, 3)
plt.subplot(g[0, :-1])
self.plot_global_map()
plt.subplot(g[1, 0], polar=True)
self.plot_sta_dist_azi()
plt.subplot(g[1, 1], polar=True)
self.plot_sta_azi()
plt.subplot(g[1, 2], polar=True)
self.plot_win_azi()
ax = plt.subplot(g[0, 2])
self.plot_si_bb(ax, self.cmtsource)
if figurename is None:
plt.show()
else:
plt.savefig(figurename)
def plot_inversion_summary(self, figurename=None):
"""
Plot the dataset and the inversion result.
"""
if self.new_cmtsource is None:
raise ValueError("No new cmtsource...Can't plot summary")
plt.figure(figsize=(10, 11), facecolor='w', edgecolor='k')
g = gridspec.GridSpec(3, 3)
plt.subplot(g[0, :-1])
self.plot_global_map()
plt.subplot(g[1, 0], polar=True)
self.plot_sta_dist_azi()
plt.subplot(g[1, 1], polar=True)
self.plot_sta_azi()
plt.subplot(g[1, 2], polar=True)
self.plot_win_azi()
ax = plt.subplot(g[0, 2])
self.plot_si_bb(ax, self.cmtsource)
ax = plt.subplot(g[2, 2])
self.plot_si_bb_comp(ax, self.new_cmtsource, self.cmtsource,
"Inversion")
plt.subplot(g[2, :-1])
self.plot_table()
if figurename is None:
plt.show()
else:
plt.savefig(figurename)
| lgpl-3.0 |
garvitr/sympy | sympy/plotting/tests/test_plot.py | 43 | 8577 | from sympy import (pi, sin, cos, Symbol, Integral, summation, sqrt, log,
oo, LambertW, I, meijerg, exp_polar, Max, Piecewise)
from sympy.plotting import (plot, plot_parametric, plot3d_parametric_line,
plot3d, plot3d_parametric_surface)
from sympy.plotting.plot import unset_show
from sympy.utilities.pytest import skip, raises
from sympy.plotting.experimental_lambdify import lambdify
from sympy.external import import_module
from sympy.core.decorators import wraps
from tempfile import NamedTemporaryFile
import os
import sys
class MockPrint(object):
def write(self, s):
pass
def disable_print(func, *args, **kwargs):
@wraps(func)
def wrapper(*args, **kwargs):
sys.stdout = MockPrint()
func(*args, **kwargs)
sys.stdout = sys.__stdout__
return wrapper
unset_show()
# XXX: We could implement this as a context manager instead
# That would need rewriting the plot_and_save() function
# entirely
class TmpFileManager:
tmp_files = []
@classmethod
def tmp_file(cls, name=''):
cls.tmp_files.append(NamedTemporaryFile(prefix=name, suffix='.png').name)
return cls.tmp_files[-1]
@classmethod
def cleanup(cls):
map(os.remove, cls.tmp_files)
def plot_and_save(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
###
# Examples from the 'introduction' notebook
###
p = plot(x)
p = plot(x*sin(x), x*cos(x))
p.extend(p)
p[0].line_color = lambda a: a
p[1].line_color = 'b'
p.title = 'Big title'
p.xlabel = 'the x axis'
p[1].label = 'straight line'
p.legend = True
p.aspect_ratio = (1, 1)
p.xlim = (-15, 20)
p.save(tmp_file('%s_basic_options_and_colors' % name))
p.extend(plot(x + 1))
p.append(plot(x + 3, x**2)[1])
p.save(tmp_file('%s_plot_extend_append' % name))
p[2] = plot(x**2, (x, -2, 3))
p.save(tmp_file('%s_plot_setitem' % name))
p = plot(sin(x), (x, -2*pi, 4*pi))
p.save(tmp_file('%s_line_explicit' % name))
p = plot(sin(x))
p.save(tmp_file('%s_line_default_range' % name))
p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)))
p.save(tmp_file('%s_line_multiple_range' % name))
raises(ValueError, lambda: plot(x, y))
p = plot(Piecewise((1, x > 0), (0, True)),(x,-1,1))
p.save(tmp_file('%s_plot_piecewise' % name))
#parametric 2d plots.
#Single plot with default range.
plot_parametric(sin(x), cos(x)).save(tmp_file())
#Single plot with range.
p = plot_parametric(sin(x), cos(x), (x, -5, 5))
p.save(tmp_file('%s_parametric_range' % name))
#Multiple plots with same range.
p = plot_parametric((sin(x), cos(x)), (x, sin(x)))
p.save(tmp_file('%s_parametric_multiple' % name))
#Multiple plots with different ranges.
p = plot_parametric((sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)))
p.save(tmp_file('%s_parametric_multiple_ranges' % name))
#depth of recursion specified.
p = plot_parametric(x, sin(x), depth=13)
p.save(tmp_file('%s_recursion_depth' % name))
#No adaptive sampling.
p = plot_parametric(cos(x), sin(x), adaptive=False, nb_of_points=500)
p.save(tmp_file('%s_adaptive' % name))
#3d parametric plots
p = plot3d_parametric_line(sin(x), cos(x), x)
p.save(tmp_file('%s_3d_line' % name))
p = plot3d_parametric_line(
(sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)))
p.save(tmp_file('%s_3d_line_multiple' % name))
p = plot3d_parametric_line(sin(x), cos(x), x, nb_of_points=30)
p.save(tmp_file('%s_3d_line_points' % name))
# 3d surface single plot.
p = plot3d(x * y)
p.save(tmp_file('%s_surface' % name))
# Multiple 3D plots with same range.
p = plot3d(-x * y, x * y, (x, -5, 5))
p.save(tmp_file('%s_surface_multiple' % name))
# Multiple 3D plots with different ranges.
p = plot3d(
(x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)))
p.save(tmp_file('%s_surface_multiple_ranges' % name))
# Single Parametric 3D plot
p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y)
p.save(tmp_file('%s_parametric_surface' % name))
# Multiple Parametric 3D plots.
p = plot3d_parametric_surface(
(x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)),
(sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)))
p.save(tmp_file('%s_parametric_surface' % name))
###
# Examples from the 'colors' notebook
###
p = plot(sin(x))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_line_arity2' % name))
p = plot(x*sin(x), x*cos(x), (x, 0, 10))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_param_line_arity1' % name))
p[0].line_color = lambda a, b: a
p.save(tmp_file('%s_colors_param_line_arity2a' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_param_line_arity2b' % name))
p = plot3d_parametric_line(sin(x) + 0.1*sin(x)*cos(7*x),
cos(x) + 0.1*cos(x)*cos(7*x),
0.1*sin(7*x),
(x, 0, 2*pi))
p[0].line_color = lambda a: sin(4*a)
p.save(tmp_file('%s_colors_3d_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_3d_line_arity2' % name))
p[0].line_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_3d_line_arity3' % name))
p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_surface_arity1' % name))
p[0].surface_color = lambda a, b: b
p.save(tmp_file('%s_colors_surface_arity2' % name))
p[0].surface_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_surface_arity3a' % name))
p[0].surface_color = lambda a, b, c: sqrt((a - 3*pi)**2 + b**2)
p.save(tmp_file('%s_colors_surface_arity3b' % name))
p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y,
(x, -1, 1), (y, -1, 1))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_param_surf_arity1' % name))
p[0].surface_color = lambda a, b: a*b
p.save(tmp_file('%s_colors_param_surf_arity2' % name))
p[0].surface_color = lambda a, b, c: sqrt(a**2 + b**2 + c**2)
p.save(tmp_file('%s_colors_param_surf_arity3' % name))
###
# Examples from the 'advanced' notebook
###
i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y))
p = plot(i, (y, 1, 5))
p.save(tmp_file('%s_advanced_integral' % name))
s = summation(1/x**y, (x, 1, oo))
p = plot(s, (y, 2, 10))
p.save(tmp_file('%s_advanced_inf_sum' % name))
p = plot(summation(1/x, (x, 1, y)), (y, 2, 10), show=False)
p[0].only_integers = True
p[0].steps = True
p.save(tmp_file('%s_advanced_fin_sum' % name))
###
# Test expressions that can not be translated to np and generate complex
# results.
###
plot(sin(x) + I*cos(x)).save(tmp_file())
plot(sqrt(sqrt(-x))).save(tmp_file())
plot(LambertW(x)).save(tmp_file())
plot(sqrt(LambertW(x))).save(tmp_file())
#Characteristic function of a StudentT distribution with nu=10
plot((meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), 5 * x**2 * exp_polar(-I*pi)/2)
+ meijerg(((1/2,), ()), ((5, 0, 1/2), ()),
5*x**2 * exp_polar(I*pi)/2)) / (48 * pi), (x, 1e-6, 1e-2)).save(tmp_file())
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
# Tests for exception handling in experimental_lambdify
def test_experimental_lambify():
x = Symbol('x')
f = lambdify([x], Max(x, 5))
# XXX should f be tested? If f(2) is attempted, an
# error is raised because a complex produced during wrapping of the arg
# is being compared with an int.
assert Max(2, 5) == 5
assert Max(5, 7) == 7
x = Symbol('x-3')
f = lambdify([x], x + 1)
assert f(1) == 2
@disable_print
def test_append_issue_7140():
x = Symbol('x')
p1 = plot(x)
p2 = plot(x**2)
p3 = plot(x + 2)
# append a series
p2.append(p1[0])
assert len(p2._series) == 2
with raises(TypeError):
p1.append(p2)
with raises(TypeError):
p1.append(p2._series)
| bsd-3-clause |
carrillo/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
xya/sms-tools | lectures/06-Harmonic-model/plots-code/harmonic-inharmonic-sines.py | 24 | 2256 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import sineModel as SM
import stft as STFT
import utilFunctions as UF
plt.figure(1, figsize=(9, 7))
plt.subplot(211)
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/vibraphone-C6.wav'))
w = np.blackman(401)
N = 512
H = 100
t = -100
minSineDur = .02
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.01
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
maxplotfreq = 10000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sine frequencies (vibraphone-C6.wav)')
plt.subplot(212)
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/vignesh.wav'))
w = np.blackman(1101)
N = 2048
H = 250
t = -90
minSineDur = .1
maxnSines = 200
freqDevOffset = 20
freqDevSlope = 0.02
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
maxplotfreq = 5000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sine frequencies (vignesh.wav)')
plt.tight_layout()
plt.savefig('harmonic-inharmonic-sines.png')
plt.show() | agpl-3.0 |
akhilaananthram/nupic.research | vehicle-control/agent/run_q.py | 2 | 5501 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import defaultdict
import operator
import time
import numpy
from unity_client.server import Server
from sensorimotor.encoders.one_d_depth import OneDDepthEncoder
from sensorimotor.q_learner import QLearner
ACTIIONS = ["-1", "0", "1"]
class Agent(object):
def __init__(self, position):
self.encoder = OneDDepthEncoder(positions=positions,
radius=5,
wrapAround=True,
nPerPosition=28,
wPerPosition=3,
minVal=0,
maxVal=1)
self.plotter = Plotter(self.encoder)
self.learner = QLearner(ACTIIONS, n=1008)
self.lastState = None
self.lastAction = None
def sync(self, outputData):
if not ("ForwardsSweepSensor" in outputData and
"steer" in outputData):
print "Warning: Missing data:", outputData
return
if outputData.get("reset"):
print "Reset."
sensor = outputData["ForwardsSweepSensor"]
steer = outputData["steer"]
reward = outputData.get("reward") or 0
encoding = self.encoder.encode(numpy.array(sensor))
if self.lastState is not None:
self.learner.update(self.lastState, str(self.lastAction),
encoding, str(steer), reward)
value = self.learner.value(encoding)
qValues = {}
for action in ACTIIONS:
qValues[action] = self.learner.qValue(encoding, action)
inputData = {}
inputData["qValues"] = qValues
inputData["bestAction"] = self.learner.bestAction(encoding)
self.plotter.update(sensor, encoding, steer, reward, value, qValues)
if outputData.get("reset"):
self.plotter.render()
self.lastState = encoding
self.lastAction = steer
return inputData
class Plotter(object):
def __init__(self, encoder):
self.encoder = encoder
self.sensor = []
self.encoding = []
self.steer = []
self.reward = []
self.value = []
self.qValues = defaultdict(lambda: [])
self.bestAction = []
import matplotlib.pyplot as plt
self.plt = plt
import matplotlib.cm as cm
self.cm = cm
from pylab import rcParams
rcParams.update({'figure.figsize': (6, 9)})
# rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.facecolor': 'white'})
def update(self, sensor, encoding, steer, reward, value, qValues):
self.sensor.append(sensor)
self.encoding.append(encoding)
self.steer.append(steer)
self.reward.append(reward)
self.value.append(value)
for key, value in qValues.iteritems():
self.qValues[key].append(value)
bestAction = int(max(qValues.iteritems(), key=operator.itemgetter(1))[0])
self.bestAction.append(bestAction)
def render(self):
self.plt.figure(1)
self.plt.clf()
n = 7
self.plt.subplot(n,1,1)
self._plot(self.steer, "Steer over time")
self.plt.subplot(n,1,2)
self._plot(self.reward, "Reward over time")
self.plt.subplot(n,1,3)
self._plot(self.value, "Value over time")
self.plt.subplot(n,1,4)
shape = len(self.encoder.positions), self.encoder.scalarEncoder.getWidth()
encoding = numpy.array(self.encoding[-1]).reshape(shape).transpose()
self._imshow(encoding, "Encoding at time t")
self.plt.subplot(n,1,5)
data = self.encoding
w = self.encoder.w
overlaps = [sum(a & b) / float(w) for a, b in zip(data[:-1], data[1:])]
self._plot(overlaps, "Encoding overlaps between consecutive times")
# for i, action in enumerate(ACTIIONS):
# self.plt.subplot(n,1,4+i)
# self._plot(self.qValues[action], "Q value: {0}".format(action))
# self.plt.subplot(n,1,7)
# self._plot(self.bestAction, "Best action")
self.plt.draw()
self.plt.savefig("q-{0}.png".format(time.time()))
def _plot(self, data, title):
self.plt.title(title)
self.plt.xlim(0, len(data))
self.plt.plot(range(len(data)), data)
def _imshow(self, data, title):
self.plt.title(title)
self.plt.imshow(data,
cmap=self.cm.Greys,
interpolation="nearest",
aspect='auto',
vmin=0,
vmax=1)
if __name__ == "__main__":
# complete uniform
# positions = [i*20 for i in range(36)]
# forward uniform
positions = [i*10 for i in range(-18, 18)]
agent = Agent(positions)
Server(agent)
| gpl-3.0 |
PatrickOReilly/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
ianatpn/nupictest | external/linux32/lib/python2.6/site-packages/matplotlib/offsetbox.py | 69 | 17728 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.patches import bbox_artist as mbbox_artist
DEBUG=False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = zip(*wd_list)
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
sep = (total - sum(w_list))/(len(w_list)-1.)
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh+sep)*len(w_list)
else:
sep = float(total)/(len(w_list)) - maxh
offsets = np.array([(maxh+sep)*i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Geiven a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h-d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left","top"]:
descent=0.
offsets = [d for h, d in hd_list]
elif align in ["right","bottom"]:
descent=0.
offsets = [height-h+d for h, d in hd_list]
elif align == "center":
descent=0.
offsets = [(height-h)*.5+d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
self._children = []
self._offset = (0, 0)
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent):
"""
Get the offset
accepts extent of the box
"""
if callable(self._offset):
return self._offset(width, height, xdescent, ydescent)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd)
return mtransforms.Bbox.from_bounds(px-xd, py-yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent)
for c, (ox, oy) in zip(self.get_children(), offsets):
c.set_offset((px+ox, py+oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
whd_list = [(w, h, xd, (h-yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w,h,xd,yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
self.sep, self.mode)
yoffsets = yoffsets_ + [yd for w,h,xd,yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent+self.pad, ydescent+self.pad, \
zip(xoffsets, yoffsets)
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
if self.height is None:
height_descent = max([h-yd for w,h,xd,yd in whd_list])
ydescent = max([yd for w,h,xd,yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2*self._pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w,h,xd,yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
self.sep, self.mode)
xoffsets = xoffsets_ + [xd for w,h,xd,yd in whd_list]
xdescent=whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent + self.pad, ydescent + self.pad, \
zip(xoffsets, yoffsets)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
return self.width, self.height, self.xdescent, self.ydescent
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
*s* : a string to be displayed.
*textprops* : property dictionary for the text
*multilinebaseline* : If True, baseline for multiline text is
adjusted so that it is (approximatedly)
center-aligned with singleline text.
*minimumdescent* : If True, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if not textprops.has_key("va"):
textprops["va"]="baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform+self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[0][0] # first line
_, hh, dd = renderer.get_text_width_height_descent(
clean_line, self._text._fontproperties, ismath=ismath)
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline: # multi line
d = h-(hh-dd) # the baseline of the first line
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h-dd)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(dd, d_)
else:
d = dd
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
| gpl-3.0 |
juanitopereza/Metodos_vacaciones | Lab/electron/electron.py | 1 | 2329 | # coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
#%%
def f(x,y,yp):
return q_m * B_z * yp + (q_m * Lambda * x)/(2 * np.pi * epsilon_0 * (x**2 + y**2))
def g(x,y,xp):
return -q_m * B_z * xp + (q_m * Lambda * y)/(2 * np.pi * epsilon_0 * (x**2 + y**2))
def RK_step(delta_t, x_old, y_old, xp_old, yp_old):
k_x1 = xp_old * delta_t
k_y1 = yp_old * delta_t
k_xp1 = f(x_old, y_old, yp_old) * delta_t
k_yp1 = g(x_old, y_old, xp_old) * delta_t
#print (k_x1, k_y1, k_xp1, k_yp1)
k_x2 = (xp_old + k_xp1/2.0) * delta_t
k_y2 = (yp_old + k_yp1/2.0) * delta_t
k_xp2 = f(x_old + k_x1/2.0, y_old + k_y1/2.0, yp_old + k_yp1/2.0) * delta_t
k_yp2 = g(x_old + k_x1/2.0, y_old + k_y1/2.0, xp_old + k_xp1/2.0) * delta_t
#print (k_x2, k_y2, k_xp2, k_yp2)
k_x3 = (xp_old + k_xp2/2.0) * delta_t
k_y3 = (yp_old + k_yp2/2.0) * delta_t
k_xp3 = f(x_old + k_x2/2.0, y_old + k_y2/2.0, yp_old + k_yp2/2.0) * delta_t
k_yp3 = g(x_old + k_x2/2.0, y_old + k_y2/2.0, xp_old + k_xp2/2.0) * delta_t
#print (k_x3, k_y3, k_xp3, k_yp3)
k_x4 = (xp_old + k_xp3) * delta_t
k_y4 = (yp_old + k_yp3) * delta_t
k_xp4 = f(x_old + k_x3, y_old + k_y3, yp_old + k_yp3) * delta_t
k_yp4 = g(x_old + k_x3, y_old + k_y3, xp_old + k_xp3) * delta_t
#print (k_x4, k_y4, k_xp4, k_yp4)
x_new = x_old + (1.0/6.0)*(k_x1 + 2.0*k_x2 + 2.0*k_x3 + k_x4)
y_new = y_old + (1.0/6.0)*(k_y1 + 2.0*k_y2 + 2.0*k_y3 + k_y4)
xp_new = xp_old + (1.0/6.0)*(k_xp1 + 2.0*k_xp2 + 2.0*k_xp3 + k_xp4)
yp_new = yp_old + (1.0/6.0)*(k_yp1 + 2.0*k_yp2 + 2.0*k_yp3 + k_yp4)
return x_new, y_new, xp_new, yp_new
#%%
q_m = -1.76e11
Lambda = -1e-12
B_z = 1e-5
epsilon_0 = 9e-12
N_points = 2000
t_final = 2e-5
delta_t = t_final/N_points
delta_t
t = np.zeros(N_points)
X = np.zeros(N_points)
Y = np.zeros(N_points)
XP = np.zeros(N_points)
YP = np.zeros(N_points)
t[0] = 0.0
X[0] = 2.0
Y[0] = 2.0
XP[0] = 0.0
YP[0] =-3000.0
for i in range(1,N_points):
X[i], Y[i], XP[i], YP[i] = RK_step(delta_t, X[i-1], Y[i-1], XP[i-1], YP[i-1])
t[i] = t[i-1] + delta_t
#print (t[i], X[i], Y[i], XP[i], YP[i])
#%%
plt.plot(X,Y, label= "$RK4$")
plt.legend()
plt.xlabel("$x\ [m]$")
plt.ylabel("$y\ [m]$")
plt.title(u"$Trayectoria\ electrón$")
plt.savefig("electron.pdf")
#plt.show()
| mit |
glouppe/scikit-learn | examples/svm/plot_weighted_samples.py | 95 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
piskuliche/Python-Code-Collection | CP2K_Util/stupid_pmf/calculate_pmf.py | 1 | 1704 | import numpy as np
import itertools
import matplotlib.pyplot as plt
class distances:
def __init__(self,nframes):
self.r = np.zeros((3,2,nframes))
self.dr = np.zeros((3,nframes))
self.drmag = np.zeros(nframes)
def read_frames(self, nframes, infile):
rx, ry, rz = np.genfromtxt(infile, usecols = (1,2,3), unpack=True,max_rows=nframes*2)
self.r[0,0] = rx[::2]
self.r[1,0] = ry[::2]
self.r[2,0] = rz[::2]
self.r[0,1] = rx[1::2]
self.r[1,1] = ry[1::2]
self.r[2,1] = rz[1::2]
def calc_dist(self,nframes, L):
for frame in range(nframes):
self.dr[0,frame]=self.r[0,0,frame]-self.r[0,1,frame]
self.dr[1,frame]=self.r[1,0,frame]-self.r[1,1,frame]
self.dr[2,frame]=self.r[2,0,frame]-self.r[2,1,frame]
self.dr[0,frame]=self.dr[0,frame]-L*int(round(self.dr[0,frame]/L))
self.dr[1,frame]=self.dr[1,frame]-L*int(round(self.dr[1,frame]/L))
self.dr[2,frame]=self.dr[2,frame]-L*int(round(self.dr[2,frame]/L))
self.drmag[frame] = np.sqrt(self.dr[0,frame]**2 + self.dr[1,frame]**2 + self.dr[2,frame]**2)
print frame, self.drmag[frame], self.dr[0,frame], self.dr[1,frame], self.dr[2,frame]
def bin_dist(self,nframes, L):
dr = 0.1
rcut = L/2.0
self.hist = np.histogram(self.drmag,range=(0.0,rcut), bins = int(rcut)*100,density=True)
np.savetxt('out.hist', np.c_[self.hist[1][1:], self.hist[0]*(L**3/(4*np.pi*self.hist[1][1:]**2))])
nframes = 100001
infile = 'lif.xyz'
v = distances(nframes)
L = 14.219066
v.read_frames(nframes,infile)
v.calc_dist(nframes,L)
v.bin_dist(nframes, L)
| gpl-3.0 |
waltervh/BornAgain-tutorial | old/bornagain-python/fitting/advanced-tutorial/step3_basicfit.py | 2 | 3341 | """
Fitting example: 4 parameters fit with simple output. Example explains:
* How to define fit parameters (start values, limits, steps)
* How to invoke drawing of fit progress
* How to change another minimizer, and set its properties
"""
import bornagain as ba
from bornagain import deg, angstrom, nm, AttLimits
from matplotlib import pyplot as plt
def get_sample():
"""
Returns a sample with uncorrelated cylinders and prisms on a substrate.
"""
# defining materials
m_air = ba.HomogeneousMaterial("Air", 0.0, 0.0)
m_substrate = ba.HomogeneousMaterial("Substrate", 6e-6, 2e-8)
m_particle = ba.HomogeneousMaterial("Particle", 6e-4, 2e-8)
# collection of particles
cylinder_ff = ba.FormFactorCylinder(1.0*nm, 1.0*nm)
cylinder = ba.Particle(m_particle, cylinder_ff)
prism_ff = ba.FormFactorPrism3(1.0*nm, 1.0*nm)
prism = ba.Particle(m_particle, prism_ff)
particle_layout = ba.ParticleLayout()
particle_layout.addParticle(cylinder, 0.5)
particle_layout.addParticle(prism, 0.5)
interference = ba.InterferenceFunctionNone()
particle_layout.addInterferenceFunction(interference)
# air layer with particles and substrate form multi layer
air_layer = ba.Layer(m_air)
air_layer.addLayout(particle_layout)
substrate_layer = ba.Layer(m_substrate, 0)
multi_layer = ba.MultiLayer()
multi_layer.addLayer(air_layer)
multi_layer.addLayer(substrate_layer)
return multi_layer
def get_simulation():
"""
Returns a GISAXS simulation with beam and detector defined
"""
simulation = ba.GISASSimulation()
simulation.setDetectorParameters(100, -1.0*deg, 1.0*deg,
100, 0.0*deg, 2.0*deg)
simulation.setBeamParameters(1.0*angstrom, 0.2*deg, 0.0*deg)
return simulation
def run_fitting():
"""
run fitting
"""
sample = get_sample()
simulation = get_simulation()
simulation.setSample(sample)
real_data = ba.IntensityDataIOFactory.readIntensityData(
'refdata_fitcylinderprisms.int.gz')
fit_suite = ba.FitSuite()
fit_suite.addSimulationAndRealData(simulation, real_data)
fit_suite.initPrint(10)
# setting fitting parameters with starting values and limits
fit_suite.addFitParameter("*Cylinder/Height", 4.*nm).setLowerLimited(0.01).setStep(0.02)
fit_suite.addFitParameter("*Cylinder/Radius", 6.*nm).setLimited(3.0, 8.0)
fit_suite.addFitParameter("*Prism3/Height", 4.*nm).setUpperLimited(10.0)
fit_suite.addFitParameter("*Prism3/BaseEdge", 5.*nm).setFixed()
# > Changing minimization algorithm
# catalogue = ba.MinimizerCatalogue()
# print(catalogue.toString())
# fit_suite.setMinimizer("Minuit2", "Migrad") # ba.Default
# fit_suite.setMinimizer("Minuit2", "Fumili")
# fit_suite.setMinimizer("Minuit2", "Fumili", "MaxFunctionCalls=20")
# fit_suite.setMinimizer("GSLLMA")
# > Drawing fit progress evey 10'th iteration
# draw_observer = ba.DefaultFitObserver(draw_every_nth=10)
# fit_suite.attachObserver(draw_observer)
# running fit
fit_suite.runFit()
print("Fitting completed.")
print("chi2:", fit_suite.getChi2())
for par in fit_suite.fitParameters():
print(par.name(), par.value(), par.error())
if __name__ == '__main__':
run_fitting()
plt.show()
| gpl-3.0 |
sangwook236/SWDT | sw_dev/python/rnd/test/language_processing/aru_net_test.py | 2 | 4071 | #!/usr/bin/env python
from __future__ import print_function, division
import os, time
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import imageio
from PIL import Image
from pix_lab.util.util import load_graph, read_image_list
#from pix_lab.util.inference_pb import Inference_pb
import cv2
def load_image(path, scale, mode):
aImg = imageio.imread(path, pilmode=mode)
sImg = np.array(Image.fromarray(aImg).resize((round(aImg.shape[1] * scale), round(aImg.shape[0] * scale)), resample=Image.BICUBIC))
fImg = sImg
if len(sImg.shape) == 2:
fImg = np.expand_dims(fImg, 2)
fImg = np.expand_dims(fImg, 0)
return fImg
# REF [file] >> ${ARU-Net_HOME}/pix_lab/util/inference_pb.py
def inference(path_to_pb, img_list, scale=1.0, mode='L', print_result=True, gpu_device='0'):
graph = load_graph(path_to_pb)
val_size = len(img_list)
if val_size is None:
print('No Inference Data available. Skip Inference.')
return
output_dir_path = './prediction'
os.makedirs(output_dir_path, exist_ok=False)
session_conf = tf.ConfigProto()
session_conf.gpu_options.visible_device_list = gpu_device
with tf.Session(graph=graph, config=session_conf) as sess:
x = graph.get_tensor_by_name('inImg:0')
predictor = graph.get_tensor_by_name('output:0')
print('Start Inference...')
timeSum = 0.0
for step in range(0, val_size):
aTime = time.time()
aImgPath = img_list[step]
print('Image: {:} '.format(aImgPath))
batch_x = load_image(aImgPath, scale, mode)
print('Resolution: h {:}, w {:} '.format(batch_x.shape[1],batch_x.shape[2]))
# Run validation.
aPred = sess.run(predictor, feed_dict={x: batch_x})
curTime = (time.time() - aTime) * 1000.0
timeSum += curTime
print('Update time: {:.2f} ms'.format(curTime))
if print_result:
n_class = aPred.shape[3]
channels = batch_x.shape[3]
"""
fig = plt.figure()
for aI in range(0, n_class+1):
if aI == 0:
a = fig.add_subplot(1, n_class+1, 1)
if channels == 1:
plt.imshow(batch_x[0, :, :, 0], cmap=plt.cm.gray)
else:
plt.imshow(batch_x[0, :, :, :])
a.set_title('input')
else:
a = fig.add_subplot(1, n_class+1, aI+1)
plt.imshow(aPred[0,:, :,aI-1], cmap=plt.cm.gray, vmin=0.0, vmax=1.0)
#misc.imsave('out' + str(aI) + '.jpg', aPred[0,:, :,aI-1])
a.set_title('Channel: ' + str(aI-1))
print('To go on just CLOSE the current plot.')
plt.show()
"""
"""
for cls in range(0, n_class):
print('***', np.min(aPred[0,:, :,cls]), np.max(aPred[0,:, :,cls]))
pred = aPred[0,:, :,cls]
if cls < 2:
pred[pred > 0.5] = 1
else:
pred[pred < 0.5] = 0.0
cv2.imshow('Class ' + str(cls), pred)
"""
if 1 == channels:
rgb = cv2.cvtColor(batch_x[0, :, :, 0], cv2.COLOR_GRAY2BGR)
else:
rgb = batch_x[0, :, :, :]
cls = 0
pred = aPred[0,:, :,cls]
if 0 == cls:
rgb[pred > 0.1] = (0, 0, 255)
elif 1 == cls:
rgb[pred > 0.2] = (0, 0, 255)
else:
rgb[pred < 0.5] = (0, 0, 255)
cv2.imwrite(os.path.join(output_dir_path, 'prediction{}_{}.tif'.format(cls, step)), rgb)
#cv2.imshow('Prediction', pred)
#cv2.imshow('Overlay', rgb)
#cv2.waitKey(0)
print('Inference avg update time: {:.2f} ms'.format(timeSum / val_size))
print('Inference Finished!')
def main():
if 'posix' == os.name:
aru_net_dir_path = '/home/sangwook/lib_repo/python/ARU-Net_github'
else:
aru_net_dir_path = 'D:/lib_repo/python/rnd/ARU-Net_github'
path_to_pb = os.path.join(aru_net_dir_path, 'demo_nets/model100_ema.pb') # ${ARU-Net_HOME}/demo_nets/model100_ema.pb
#path_list_imgs = os.path.join(aru_net_dir_path, 'demo_images/imgs.lst') # ${ARU-Net_HOME}/demo_images/imgs.lst
path_list_imgs = './epapyrus_images.lst'
#path_list_imgs = './keit_images.lst'
img_list = read_image_list(path_list_imgs)
inference(path_to_pb, img_list, scale=1.0, mode='L', print_result=True, gpu_device='0')
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-3.0 |
daodaoliang/neural-network-animation | matplotlib/colors.py | 10 | 57606 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of colors called
a colormap. Colormapping typically involves two steps: a data array is first
mapped onto the range 0-1 using an instance of :class:`Normalize` or of a
subclass; then this number in the 0-1 range is mapped to a color using an
instance of a subclass of :class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all the built-in
colormap instances, but is also useful for making custom colormaps, and
:class:`ListedColormap`, which is used for generating a custom colormap from a
list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single color
specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic built-in colors, you can use a single letter
- b: blue
- g: green
- r: red
- c: cyan
- m: magenta
- y: yellow
- k: black
- w: white
Gray shades can be given as a string encoding a float in the 0-1 range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify the
color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B* are in
the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and 'chartreuse'
are supported.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import warnings
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
cnames = {
'aliceblue': '#F0F8FF',
'antiquewhite': '#FAEBD7',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksage': '#598556',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgreen': '#90EE90',
'lightgray': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsage': '#BCECAC',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightsteelblue': '#B0C4DE',
'lightyellow': '#FFFFE0',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sage': '#87AE73',
'sandybrown': '#FAA460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32'}
# add british equivs
for k, v in list(six.iteritems(cnames)):
if k.find('gray') >= 0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given an rgb or rgba sequence of 0-1 floats, return the hex string'
a = '#%02x%02x%02x' % tuple([int(np.round(val * 255)) for val in rgb[:3]])
return a
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, six.string_types):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "%s"' % s)
return tuple([int(n, 16) / 255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter(object):
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b': (0.0, 0.0, 1.0),
'g': (0.0, 0.5, 0.0),
'r': (1.0, 0.0, 0.0),
'c': (0.0, 0.75, 0.75),
'm': (0.75, 0, 0.75),
'y': (0.75, 0.75, 0),
'k': (0.0, 0.0, 0.0),
'w': (1.0, 1.0, 1.0), }
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a string representation of a float, like '0.4',
indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
# Gray must be a string to distinguish 3-4 grays from RGB or RGBA.
try:
return self.cache[arg]
except KeyError:
pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try:
return self.cache[arg]
except KeyError:
pass
except TypeError:
raise ValueError(
'to_rgb: arg "%s" is unhashable even inside a tuple'
% (str(arg),))
try:
if cbook.is_string_like(arg):
argl = arg.lower()
color = self.colors.get(argl, None)
if color is None:
str1 = cnames.get(argl, argl)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(argl)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = (fl,)*3
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is %d; must be 3 or 4' % len(arg))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError(
'number in rbg sequence outside 0-1 range')
else:
raise ValueError(
'cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError) as exc:
raise ValueError(
'to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
In addition, if *arg* is "none" (case-insensitive),
then (0,0,0,0) will be returned.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if arg.lower() == 'none':
return (0.0, 0.0, 0.0, 0.0)
except AttributeError:
pass
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if any(float(x) < 0 or x > 1 for x in arg):
raise ValueError(
'number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], alpha
if len(arg) == 3:
r, g, b = arg
if any(float(x) < 0 or x > 1 for x in arg):
raise ValueError(
'number in rbg sequence outside 0-1 range')
else:
raise ValueError(
'length of rgba sequence should be either 3 or 4')
else:
r, g, b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r, g, b, alpha
except (TypeError, ValueError) as exc:
raise ValueError(
'to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
nc = len(c)
except TypeError:
raise ValueError(
"Cannot convert argument type %s to rgba array" % type(c))
try:
if nc == 0 or c.lower() == 'none':
return np.zeros((0, 4), dtype=np.float)
except AttributeError:
pass
try:
# Single value? Put it in an array with a single row.
return np.array([self.to_rgba(c, alpha)], dtype=np.float)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
if (c.ndim == 2 and c.shape[1] == 4 and c.dtype.kind == 'f'):
if (c.ravel() > 1).any() or (c.ravel() < 0).any():
raise ValueError(
"number in rgba sequence is outside 0-1 range")
result = np.asarray(c, np.float)
if alpha is not None:
if alpha > 1 or alpha < 0:
raise ValueError("alpha must be in 0-1 range")
result[:, 3] = alpha
return result
# This alpha operation above is new, and depends
# on higher levels to refrain from setting alpha
# to values other than None unless there is
# intent to override any existing alpha values.
# It must be some other sequence of color specs.
result = np.zeros((nc, 4), dtype=np.float)
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha)
return result
colorConverter = ColorConverter()
def makeMappingArray(N, data, gamma=1.0):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
Alternatively, data can be a function mapping values between 0 - 1
to 0 - 1.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
if six.callable(data):
xind = np.linspace(0, 1, N) ** gamma
lut = np.clip(np.array(data(xind), dtype=np.float), 0, 1)
return lut
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:, 0]
y0 = adata[:, 1]
y1 = adata[:, 2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x) - x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N - 1)
lut = np.zeros((N,), np.float)
xind = (N - 1) * np.linspace(0, 1, N) ** gamma
ind = np.searchsorted(x, xind)[1:-1]
lut[1:-1] = (((xind[1:-1] - x[ind - 1]) / (x[ind] - x[ind - 1])) *
(y0[ind] - y1[ind - 1]) + y1[ind - 1])
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
np.clip(lut, 0.0, 1.0)
#lut = where(lut > 1., 1., lut)
#lut = where(lut < 0., 0., lut)
return lut
class Colormap(object):
"""
Baseclass for all scalar to RGBA mappings.
Typically Colormap instances are used to convert data values (floats) from
the interval ``[0, 1]`` to the RGBA color that the respective Colormap
represents. For scaling of data into the ``[0, 1]`` interval see
:class:`matplotlib.colors.Normalize`. It is worth noting that
:class:`matplotlib.cm.ScalarMappable` subclasses make heavy use of this
``data->normalize->map-to-color`` processing chain.
"""
def __init__(self, name, N=256):
r"""
Parameters
----------
name : str
The name of the colormap.
N : int
The number of rgb quantization levels.
"""
self.name = name
self.N = int(N) # ensure that N is always int
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = self.N
self._i_over = self.N + 1
self._i_bad = self.N + 2
self._isinit = False
#: When this colormap exists on a scalar mappable and colorbar_extend
#: is not False, colorbar creation will pick up ``colorbar_extend`` as
#: the default value for the ``extend`` keyword in the
#: :class:`matplotlib.colorbar.Colorbar` constructor.
self.colorbar_extend = False
def __call__(self, X, alpha=None, bytes=False):
"""
Parameters
----------
X : scalar, ndarray
The data value(s) to convert to RGBA.
For floats, X should be in the interval ``[0.0, 1.0]`` to
return the RGBA values ``X*100`` percent along the Colormap line.
For integers, X should be in the interval ``[0, Colormap.N)`` to
return RGBA values *indexed* from the Colormap with index ``X``.
alpha : float, None
Alpha must be a scalar between 0 and 1, or None.
bytes : bool
If False (default), the returned RGBA values will be floats in the
interval ``[0, 1]`` otherwise they will be uint8s in the interval
``[0, 255]``.
Returns
-------
Tuple of RGBA values if X is scalar, othewise an array of
RGBA values with a shape of ``X.shape + (4, )``.
"""
# See class docstring for arg/kwarg documentation.
if not self._isinit:
self._init()
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.array(X, copy=True) # Copy here to avoid side effects.
mask_bad = xma.mask # Mask will be used below.
xa = xma.filled() # Fill to avoid infs, etc.
del xma
# Calculations with native byteorder are faster, and avoid a
# bug that otherwise can occur with putmask when the last
# argument is a numpy scalar.
if not xa.dtype.isnative:
xa = xa.byteswap().newbyteorder()
if xa.dtype.kind == "f":
# Treat 1.0 as slightly less than 1.
vals = np.array([1, 0], dtype=xa.dtype)
almost_one = np.nextafter(*vals)
cbook._putmask(xa, xa == 1.0, almost_one)
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
xa *= self.N
np.clip(xa, -1, self.N, out=xa)
# ensure that all 'under' values will still have negative
# value after casting to int
cbook._putmask(xa, xa < 0.0, -1)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
cbook._putmask(xa, xa > self.N - 1, self._i_over)
cbook._putmask(xa, xa < 0, self._i_under)
if mask_bad is not None:
if mask_bad.shape == xa.shape:
cbook._putmask(xa, mask_bad, self._i_bad)
elif mask_bad:
xa.fill(self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut.copy() # Don't let alpha modify original _lut.
if alpha is not None:
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
if bytes:
alpha = int(alpha * 255)
if (lut[-1] == 0).all():
lut[:-1, -1] = alpha
# All zeros is taken as a flag for the default bad
# color, which is no color--fully transparent. We
# don't want to override this.
else:
lut[:, -1] = alpha
# If the bad value is set to have a color, then we
# override its alpha just as for any other value.
rgba = np.empty(shape=xa.shape + (4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
# twice as fast as lut[xa];
# using the clip or wrap mode and providing an
# output array speeds it up a little more.
if vtype == 'scalar':
rgba = tuple(rgba[0, :])
return rgba
def set_bad(self, color='k', alpha=None):
"""Set color to be used for masked values.
"""
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def set_under(self, color='k', alpha=None):
"""Set color to be used for low out-of-range values.
Requires norm.clip = False
"""
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def set_over(self, color='k', alpha=None):
"""Set color to be used for high out-of-range values.
Requires norm.clip = False
"""
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N - 1]
self._lut[self._i_bad] = self._rgba_bad
def _init(self):
"""Generate the lookup table, self._lut"""
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit:
self._init()
return (np.alltrue(self._lut[:, 0] == self._lut[:, 1]) and
np.alltrue(self._lut[:, 0] == self._lut[:, 2]))
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256, gamma=1.0):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table. Entries for alpha are optional.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:meth:`LinearSegmentedColormap.from_list`
Static method; factory function for generating a
smoothly-varying LinearSegmentedColormap.
:func:`makeMappingArray`
For information about making a mapping array.
"""
# True only if all colors in map are identical; needed for contouring.
self.monochrome = False
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
self._gamma = gamma
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(
self.N, self._segmentdata['red'], self._gamma)
self._lut[:-3, 1] = makeMappingArray(
self.N, self._segmentdata['green'], self._gamma)
self._lut[:-3, 2] = makeMappingArray(
self.N, self._segmentdata['blue'], self._gamma)
if 'alpha' in self._segmentdata:
self._lut[:-3, 3] = makeMappingArray(
self.N, self._segmentdata['alpha'], 1)
self._isinit = True
self._set_extremes()
def set_gamma(self, gamma):
"""
Set a new gamma value and regenerate color map.
"""
self._gamma = gamma
self._init()
@staticmethod
def from_list(name, colors, N=256, gamma=1.0):
"""
Make a linear segmented colormap with *name* from a sequence
of *colors* which evenly transitions from colors[0] at val=0
to colors[-1] at val=1. *N* is the number of rgb quantization
levels.
Alternatively, a list of (value, color) tuples can be given
to divide the range unevenly.
"""
if not cbook.iterable(colors):
raise ValueError('colors must be iterable')
if cbook.iterable(colors[0]) and len(colors[0]) == 2 and \
not cbook.is_string_like(colors[0]):
# List of value, color pairs
vals, colors = list(zip(*colors))
else:
vals = np.linspace(0., 1., len(colors))
cdict = dict(red=[], green=[], blue=[], alpha=[])
for val, color in zip(vals, colors):
r, g, b, a = colorConverter.to_rgba(color)
cdict['red'].append((val, r, r))
cdict['green'].append((val, g, g))
cdict['blue'].append((val, b, b))
cdict['alpha'].append((val, a, a))
return LinearSegmentedColormap(name, cdict, N, gamma)
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name='from_list', N=None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 or Nx4 floating point array
(*N* rgb or rgba values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are
# identical; needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try:
gray = float(self.colors)
except TypeError:
pass
else:
self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgba = colorConverter.to_rgba_array(self.colors)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3] = rgba
self._isinit = True
self._set_extremes()
class Normalize(object):
"""
A class which, when called, can normalize data into
the ``[0.0, 1.0]`` interval.
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
@staticmethod
def process_value(value):
"""
Homogenize the input *value* for easy and efficient normalization.
*value* can be a scalar or sequence.
Returns *result*, *is_scalar*, where *result* is a
masked array matching *value*. Float dtypes are preserved;
integer types with two bytes or smaller are converted to
np.float32, and larger types are converted to np.float.
Preserving float32 when possible, and using in-place operations,
can greatly improve speed for large arrays.
Experimental; we may want to add an option to force the
use of float32.
"""
if cbook.iterable(value):
is_scalar = False
result = ma.asarray(value)
if result.dtype.kind == 'f':
if isinstance(value, np.ndarray):
result = result.copy()
elif result.dtype.itemsize > 2:
result = result.astype(np.float)
else:
result = result.astype(np.float32)
else:
is_scalar = True
result = ma.array([value]).astype(np.float)
return result, is_scalar
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin == vmax:
result.fill(0) # Or should it be all masked? Or 0.5?
elif vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
else:
vmin = float(vmin)
vmax = float(vmax)
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# ma division is very slow; we can take a shortcut
resdat = result.data
resdat -= vmin
resdat /= (vmax - vmin)
result = np.ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin = float(self.vmin)
vmax = float(self.vmax)
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
self.vmin = ma.min(A)
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None and np.size(A) > 0:
self.vmin = ma.min(A)
if self.vmax is None and np.size(A) > 0:
self.vmax = ma.max(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
result = ma.masked_less_equal(result, 0, copy=False)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin <= 0:
raise ValueError("values must all be positive")
elif vmin == vmax:
result.fill(0)
else:
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# in-place equivalent of above can be much faster
resdat = result.data
mask = result.mask
if mask is np.ma.nomask:
mask = (resdat <= 0)
else:
mask |= resdat <= 0
cbook._putmask(resdat, mask, 1)
np.log(resdat, resdat)
resdat -= np.log(vmin)
resdat /= (np.log(vmax) - np.log(vmin))
result = np.ma.array(resdat, mask=mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax / vmin), val)
else:
return vmin * pow((vmax / vmin), value)
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
A = ma.masked_less_equal(A, 0, copy=False)
self.vmin = ma.min(A)
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is not None and self.vmax is not None:
return
A = ma.masked_less_equal(A, 0, copy=False)
if self.vmin is None:
self.vmin = ma.min(A)
if self.vmax is None:
self.vmax = ma.max(A)
class SymLogNorm(Normalize):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
"""
def __init__(self, linthresh, linscale=1.0,
vmin=None, vmax=None, clip=False):
"""
*linthresh*:
The range within which the plot is linear (to
avoid having the plot go to infinity around zero).
*linscale*:
This allows the linear range (-*linthresh* to *linthresh*)
to be stretched relative to the logarithmic range. Its
value is the number of decades to use for each half of the
linear range. For example, when *linscale* == 1.0 (the
default), the space used for the positive and negative
halves of the linear range will be equal to one decade in
the logarithmic range. Defaults to 1.
"""
Normalize.__init__(self, vmin, vmax, clip)
self.linthresh = float(linthresh)
self._linscale_adj = (linscale / (1.0 - np.e ** -1))
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result.fill(0)
else:
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# in-place equivalent of above can be much faster
resdat = self._transform(result.data)
resdat -= self._lower
resdat /= (self._upper - self._lower)
if is_scalar:
result = result[0]
return result
def _transform(self, a):
"""
Inplace transformation.
"""
masked = np.abs(a) > self.linthresh
sign = np.sign(a[masked])
log = (self._linscale_adj + np.log(np.abs(a[masked]) / self.linthresh))
log *= sign * self.linthresh
a[masked] = log
a[~masked] *= self._linscale_adj
return a
def _inv_transform(self, a):
"""
Inverse inplace Transformation.
"""
masked = np.abs(a) > (self.linthresh * self._linscale_adj)
sign = np.sign(a[masked])
exp = np.exp(sign * a[masked] / self.linthresh - self._linscale_adj)
exp *= sign * self.linthresh
a[masked] = exp
a[~masked] /= self._linscale_adj
return a
def _transform_vmin_vmax(self):
"""
Calculates vmin and vmax in the transformed system.
"""
vmin, vmax = self.vmin, self.vmax
arr = np.array([vmax, vmin]).astype(np.float)
self._upper, self._lower = self._transform(arr)
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
val = ma.asarray(value)
val = val * (self._upper - self._lower) + self._lower
return self._inv_transform(val)
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
self.vmin = ma.min(A)
self.vmax = ma.max(A)
self._transform_vmin_vmax()
def autoscale_None(self, A):
""" autoscale only None-valued vmin or vmax """
if self.vmin is not None and self.vmax is not None:
pass
if self.vmin is None:
self.vmin = ma.min(A)
if self.vmax is None:
self.vmax = ma.max(A)
self._transform_vmin_vmax()
class PowerNorm(Normalize):
"""
Normalize a given value to the ``[0, 1]`` interval with a power-law
scaling. This will clip any negative data points to 0.
"""
def __init__(self, gamma, vmin=None, vmax=None, clip=False):
Normalize.__init__(self, vmin, vmax, clip)
self.gamma = gamma
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
gamma = self.gamma
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result.fill(0)
else:
res_mask = result.data < 0
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
resdat = result.data
resdat -= vmin
np.power(resdat, gamma, resdat)
resdat /= (vmax - vmin) ** gamma
result = np.ma.array(resdat, mask=result.mask, copy=False)
result[res_mask] = 0
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
gamma = self.gamma
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return ma.power(value, 1. / gamma) * (vmax - vmin) + vmin
else:
return pow(value, 1. / gamma) * (vmax - vmin) + vmin
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
self.vmin = ma.min(A)
if self.vmin < 0:
self.vmin = 0
warnings.warn("Power-law scaling on negative values is "
"ill-defined, clamping to 0.")
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None and np.size(A) > 0:
self.vmin = ma.min(A)
if self.vmin < 0:
self.vmin = 0
warnings.warn("Power-law scaling on negative values is "
"ill-defined, clamping to 0.")
if self.vmax is None and np.size(A) > 0:
self.vmax = ma.max(A)
class BoundaryNorm(Normalize):
"""
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
"""
def __init__(self, boundaries, ncolors, clip=False):
"""
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped to -1 if low and ncolors
if high; these are converted to valid indices by
:meth:`Colormap.__call__` .
"""
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N - 1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, x, clip=None):
if clip is None:
clip = self.clip
x = ma.asarray(x)
mask = ma.getmaskarray(x)
xx = x.filled(self.vmax + 1)
if clip:
np.clip(xx, self.vmin, self.vmax)
iret = np.zeros(x.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx >= b] = i
if self._interp:
scalefac = float(self.Ncmap - 1) / (self.N - 2)
iret = (iret * scalefac).astype(np.int16)
iret[xx < self.vmin] = -1
iret[xx >= self.vmax] = self.Ncmap
ret = ma.array(iret, mask=mask)
if ret.shape == () and not mask:
ret = int(ret) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
"""
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
"""
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
# compatibility with earlier class names that violated convention:
normalize = cbook.deprecated('1.3', alternative='Normalize',
name='normalize',
obj_type='class alias')(Normalize)
no_norm = cbook.deprecated('1.3', alternative='NoNorm',
name='no_norm',
obj_type='class alias')(NoNorm)
def rgb_to_hsv(arr):
"""
convert float rgb values (in the range [0, 1]), in a numpy array to hsv
values.
Parameters
----------
arr : (..., 3) array-like
All values must be in the range [0, 1]
Returns
-------
hsv : (..., 3) ndarray
Colors converted to hsv values in range [0, 1]
"""
# make sure it is an ndarray
arr = np.asarray(arr)
# check length of the last dimension, should be _some_ sort of rgb
if arr.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
"shape {shp} was found.".format(shp=arr.shape))
in_ndim = arr.ndim
if arr.ndim == 1:
arr = np.array(arr, ndmin=2)
# make sure we don't have an int image
if arr.dtype.kind in ('iu'):
arr = arr.astype(np.float32)
out = np.zeros_like(arr)
arr_max = arr.max(-1)
ipos = arr_max > 0
delta = arr.ptp(-1)
s = np.zeros_like(delta)
s[ipos] = delta[ipos] / arr_max[ipos]
ipos = delta > 0
# red is max
idx = (arr[..., 0] == arr_max) & ipos
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[..., 1] == arr_max) & ipos
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[..., 2] == arr_max) & ipos
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out[..., 0] = (out[..., 0] / 6.0) % 1.0
out[..., 1] = s
out[..., 2] = arr_max
if in_ndim == 1:
out.shape = (3,)
return out
def hsv_to_rgb(hsv):
"""
convert hsv values in a numpy array to rgb values
all values assumed to be in range [0, 1]
Parameters
----------
hsv : (..., 3) array-like
All values assumed to be in range [0, 1]
Returns
-------
rgb : (..., 3) ndarray
Colors converted to RGB values in range [0, 1]
"""
# make sure it is an ndarray
hsv = np.asarray(hsv)
# check length of the last dimension, should be _some_ sort of rgb
if hsv.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
"shape {shp} was found.".format(shp=hsv.shape))
# if we got pased a 1D array, try to treat as
# a single color and reshape as needed
in_ndim = hsv.ndim
if in_ndim == 1:
hsv = np.array(hsv, ndmin=2)
# make sure we don't have an int image
if hsv.dtype.kind in ('iu'):
hsv = hsv.astype(np.float32)
h = hsv[..., 0]
s = hsv[..., 1]
v = hsv[..., 2]
r = np.empty_like(h)
g = np.empty_like(h)
b = np.empty_like(h)
i = (h * 6.0).astype(np.int)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
idx = i % 6 == 0
r[idx] = v[idx]
g[idx] = t[idx]
b[idx] = p[idx]
idx = i == 1
r[idx] = q[idx]
g[idx] = v[idx]
b[idx] = p[idx]
idx = i == 2
r[idx] = p[idx]
g[idx] = v[idx]
b[idx] = t[idx]
idx = i == 3
r[idx] = p[idx]
g[idx] = q[idx]
b[idx] = v[idx]
idx = i == 4
r[idx] = t[idx]
g[idx] = p[idx]
b[idx] = v[idx]
idx = i == 5
r[idx] = v[idx]
g[idx] = p[idx]
b[idx] = q[idx]
idx = s == 0
r[idx] = v[idx]
g[idx] = v[idx]
b[idx] = v[idx]
rgb = np.empty_like(hsv)
rgb[..., 0] = r
rgb[..., 1] = g
rgb[..., 2] = b
if in_ndim == 1:
rgb.shape = (3, )
return rgb
class LightSource(object):
"""
Create a light source coming from the specified azimuth and elevation.
Angles are in degrees, with the azimuth measured
clockwise from north and elevation up from the zero plane of the surface.
The :meth:`shade` is used to produce rgb values for a shaded relief image
given a data array.
"""
def __init__(self, azdeg=315, altdeg=45,
hsv_min_val=0, hsv_max_val=1, hsv_min_sat=1,
hsv_max_sat=0):
"""
Specify the azimuth (measured clockwise from south) and altitude
(measured up from the plane of the surface) of the light source
in degrees.
The color of the resulting image will be darkened
by moving the (s,v) values (in hsv colorspace) toward
(hsv_min_sat, hsv_min_val) in the shaded regions, or
lightened by sliding (s,v) toward
(hsv_max_sat hsv_max_val) in regions that are illuminated.
The default extremes are chose so that completely shaded points
are nearly black (s = 1, v = 0) and completely illuminated points
are nearly white (s = 0, v = 1).
"""
self.azdeg = azdeg
self.altdeg = altdeg
self.hsv_min_val = hsv_min_val
self.hsv_max_val = hsv_max_val
self.hsv_min_sat = hsv_min_sat
self.hsv_max_sat = hsv_max_sat
def shade(self, data, cmap, norm=None):
"""
Take the input data array, convert to HSV values in the
given colormap, then adjust those color values
to give the impression of a shaded relief map with a
specified light source.
RGBA values are returned, which can then be used to
plot the shaded image with imshow.
"""
if norm is None:
norm = Normalize(vmin=data.min(), vmax=data.max())
rgb0 = cmap(norm(data))
rgb1 = self.shade_rgb(rgb0, elevation=data)
rgb0[:, :, 0:3] = rgb1
return rgb0
def shade_rgb(self, rgb, elevation, fraction=1.):
"""
Take the input RGB array (ny*nx*3) adjust their color values
to given the impression of a shaded relief map with a
specified light source using the elevation (ny*nx).
A new RGB array ((ny*nx*3)) is returned.
"""
# imagine an artificial sun placed at infinity in some azimuth and
# elevation position illuminating our surface. The parts of the
# surface that slope toward the sun should brighten while those sides
# facing away should become darker. convert alt, az to radians
az = self.azdeg * np.pi / 180.0
alt = self.altdeg * np.pi / 180.0
# gradient in x and y directions
dx, dy = np.gradient(elevation)
slope = 0.5 * np.pi - np.arctan(np.hypot(dx, dy))
aspect = np.arctan2(dx, dy)
intensity = (np.sin(alt) * np.sin(slope) + np.cos(alt) *
np.cos(slope) * np.cos(-az - aspect - 0.5 * np.pi))
# rescale to interval -1,1
# +1 means maximum sun exposure and -1 means complete shade.
intensity = (intensity - intensity.min()) / \
(intensity.max() - intensity.min())
intensity = (2. * intensity - 1.) * fraction
# convert to rgb, then rgb to hsv
#rgb = cmap((data-data.min())/(data.max()-data.min()))
hsv = rgb_to_hsv(rgb[:, :, 0:3])
# modify hsv values to simulate illumination.
hsv[:, :, 1] = np.where(np.logical_and(np.abs(hsv[:, :, 1]) > 1.e-10,
intensity > 0),
((1. - intensity) * hsv[:, :, 1] +
intensity * self.hsv_max_sat),
hsv[:, :, 1])
hsv[:, :, 2] = np.where(intensity > 0,
((1. - intensity) * hsv[:, :, 2] +
intensity * self.hsv_max_val),
hsv[:, :, 2])
hsv[:, :, 1] = np.where(np.logical_and(np.abs(hsv[:, :, 1]) > 1.e-10,
intensity < 0),
((1. + intensity) * hsv[:, :, 1] -
intensity * self.hsv_min_sat),
hsv[:, :, 1])
hsv[:, :, 2] = np.where(intensity < 0,
((1. + intensity) * hsv[:, :, 2] -
intensity * self.hsv_min_val),
hsv[:, :, 2])
hsv[:, :, 1:] = np.where(hsv[:, :, 1:] < 0., 0, hsv[:, :, 1:])
hsv[:, :, 1:] = np.where(hsv[:, :, 1:] > 1., 1, hsv[:, :, 1:])
# convert modified hsv back to rgb.
return hsv_to_rgb(hsv)
def from_levels_and_colors(levels, colors, extend='neither'):
"""
A helper routine to generate a cmap and a norm instance which
behave similar to contourf's levels and colors arguments.
Parameters
----------
levels : sequence of numbers
The quantization levels used to construct the :class:`BoundaryNorm`.
Values ``v`` are quantizized to level ``i`` if
``lev[i] <= v < lev[i+1]``.
colors : sequence of colors
The fill color to use for each level. If `extend` is "neither" there
must be ``n_level - 1`` colors. For an `extend` of "min" or "max" add
one extra color, and for an `extend` of "both" add two colors.
extend : {'neither', 'min', 'max', 'both'}, optional
The behaviour when a value falls out of range of the given levels.
See :func:`~matplotlib.pyplot.contourf` for details.
Returns
-------
(cmap, norm) : tuple containing a :class:`Colormap` and a \
:class:`Normalize` instance
"""
colors_i0 = 0
colors_i1 = None
if extend == 'both':
colors_i0 = 1
colors_i1 = -1
extra_colors = 2
elif extend == 'min':
colors_i0 = 1
extra_colors = 1
elif extend == 'max':
colors_i1 = -1
extra_colors = 1
elif extend == 'neither':
extra_colors = 0
else:
raise ValueError('Unexpected value for extend: {0!r}'.format(extend))
n_data_colors = len(levels) - 1
n_expected_colors = n_data_colors + extra_colors
if len(colors) != n_expected_colors:
raise ValueError('With extend == {0!r} and n_levels == {1!r} expected'
' n_colors == {2!r}. Got {3!r}.'
''.format(extend, len(levels), n_expected_colors,
len(colors)))
cmap = ListedColormap(colors[colors_i0:colors_i1], N=n_data_colors)
if extend in ['min', 'both']:
cmap.set_under(colors[0])
else:
cmap.set_under('none')
if extend in ['max', 'both']:
cmap.set_over(colors[-1])
else:
cmap.set_over('none')
cmap.colorbar_extend = extend
norm = BoundaryNorm(levels, ncolors=n_data_colors)
return cmap, norm
| mit |
jenshnielsen/basemap | examples/contour_demo.py | 4 | 4103 | from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import sys
# examples of filled contour plots on map projections.
# read in data on lat/lon grid.
hgt = np.loadtxt('500hgtdata.gz')
lons = np.loadtxt('500hgtlons.gz')
lats = np.loadtxt('500hgtlats.gz')
lons, lats = np.meshgrid(lons, lats)
# create new figure
fig=plt.figure()
# setup of sinusoidal basemap
m = Basemap(resolution='c',projection='sinu',lon_0=0)
# make a filled contour plot.
# create contour lines
CS1 = m.contour(lons,lats,hgt,15,linewidths=0.5,colors='k',latlon=True)
# fill between contour lines.
CS2 =\
m.contourf(lons,lats,hgt,CS1.levels,cmap=plt.cm.jet,extend='both',latlon=True)
m.colorbar(CS2) # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawmapboundary()
m.fillcontinents()
# draw parallels and meridians.
parallels = np.arange(-60.,90,30.)
m.drawparallels(parallels,labels=[1,0,0,0])
meridians = np.arange(-360.,360.,30.)
m.drawmeridians(meridians)
plt.title('Sinusoidal Filled Contour Demo')
sys.stdout.write('plotting with sinusoidal basemap ...\n')
# create new figure
fig=plt.figure()
# setup of mollweide basemap
m = Basemap(resolution='c',projection='moll',lon_0=0)
# make a filled contour plot.
CS1 = m.contour(lons,lats,hgt,15,linewidths=0.5,colors='k',latlon=True)
CS2 =\
m.contourf(lons,lats,hgt,CS1.levels,cmap=plt.cm.jet,extend='both',latlon=True)
m.colorbar(CS2) # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawmapboundary()
m.fillcontinents()
# draw parallels and meridians.
parallels = np.arange(-60.,90,30.)
m.drawparallels(parallels,labels=[1,0,0,0])
meridians = np.arange(-360.,360.,30.)
m.drawmeridians(meridians)
plt.title('Mollweide Filled Contour Demo')
sys.stdout.write('plotting with mollweide basemap ...\n')
# create new figure
fig=plt.figure()
# set up Robinson map projection.
m = Basemap(resolution='c',projection='robin',lon_0=0)
# make a filled contour plot.
CS1 = m.contour(lons,lats,hgt,15,linewidths=0.5,colors='k',latlon=True)
CS2 = m.contourf(lons,lats,hgt,CS1.levels,cmap=plt.cm.jet,extend='both',latlon=True)
m.colorbar(CS2) # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawmapboundary()
m.fillcontinents()
# draw parallels and meridians.
parallels = np.arange(-60.,90,30.)
m.drawparallels(parallels,labels=[1,0,0,0])
meridians = np.arange(-360.,360.,60.)
m.drawmeridians(meridians,labels=[0,0,0,1])
plt.title('Robinson Filled Contour Demo')
sys.stdout.write('plotting with robinson basemap ...\n')
# create new figure
fig=plt.figure()
# set up map projection (azimuthal equidistant).
m = Basemap(projection='npaeqd',lon_0=-90,boundinglat=15.,resolution='c')
# make a filled contour plot.
x, y = m(lons, lats)
CS1 = m.contour(x,y,hgt,15,linewidths=0.5,colors='k')
CS2 = m.contourf(x,y,hgt,CS2.levels,cmap=plt.cm.jet,extend='both')
m.colorbar(CS2,pad='12%') # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawmapboundary()
m.fillcontinents()
# draw parallels and meridians.
parallels = np.arange(0.,80,20.)
m.drawparallels(parallels,labels=[0,0,1,1])
meridians = np.arange(10.,360.,20.)
m.drawmeridians(meridians,labels=[1,1,1,1])
plt.title('Azimuthal Equidistant Filled Contour Demo',y=1.075)
sys.stdout.write('plotting with azimuthal equidistant basemap ...\n')
# create new figure
fig=plt.figure()
# setup of orthographic basemap
m = Basemap(resolution='c',projection='ortho',\
lat_0=45.,lon_0=-120.)
# make a filled contour plot.
x, y = m(lons, lats)
CS1 = m.contour(x,y,hgt,15,linewidths=0.5,colors='k')
CS2 = m.contourf(x,y,hgt,CS1.levels,cmap=plt.cm.jet,extend='both')
m.colorbar(CS2) # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.fillcontinents()
m.drawmapboundary()
# draw parallels and meridians.
parallels = np.arange(-80.,90,20.)
m.drawparallels(parallels)
meridians = np.arange(-360.,360.,20.)
m.drawmeridians(meridians)
plt.title('Orthographic Filled Contour Demo')
sys.stdout.write('plotting with orthographic basemap ..\n')
plt.show()
| gpl-2.0 |
mayblue9/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
0asa/scikit-learn | examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
oliverlee/sympy | doc/ext/docscrape_sphinx.py | 51 | 9709 | from __future__ import division, absolute_import, print_function
import sys
import re
import inspect
import textwrap
import pydoc
import sphinx
import collections
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self, name='Returns'):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
# Lines that are commented out are used to make the
# autosummary:: table. Since SymPy does not use the
# autosummary:: functionality, it is easiest to just comment it
# out.
# autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
# if param_obj and (pydoc.getdoc(param_obj) or not desc):
# # Referenced object has a docstring
# autosum += [" %s%s" % (prefix, param)]
# else:
others.append((param, param_type, desc))
# if autosum:
# out += ['.. autosummary::']
# if self.class_members_toctree:
# out += [' :toctree:']
# out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', '', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns('Returns')
out += self._str_returns('Yields')
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |