repo_name
stringlengths 7
90
| path
stringlengths 4
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 762
838k
| license
stringclasses 15
values |
---|---|---|---|---|---|
tridesclous/tridesclous | tridesclous/export.py | 1 | 4852 | import os
from collections import OrderedDict
import numpy as np
import scipy.io
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class GenericSpikeExporter:
def __call__(self,spikes, catalogue, seg_num, chan_grp, export_path,
split_by_cluster=False,
use_cell_label=True,
#~ use_index=True,
):
if not os.path.exists(export_path):
os.makedirs(export_path)
#~ print('export', spikes.size, seg_num, export_path)
#~ print('split_by_cluster', split_by_cluster, 'use_cell_label', use_cell_label)
clusters = catalogue['clusters']
spike_labels = spikes['cluster_label']
if use_cell_label:
spike_labels = spikes['cluster_label'].copy()
for l in clusters:
mask = spike_labels==l['cluster_label']
spike_labels[mask] = l['cell_label']
spike_indexes = spikes['index']
out_data = OrderedDict()
if split_by_cluster:
if use_cell_label:
possible_labels = np.unique(clusters['cell_label'])
label_name = 'cell'
else:
possible_labels = clusters['cluster_label']
label_name = 'cluster'
for k in possible_labels:
keep = k == spike_labels
out_data[label_name + '#'+ str(k)] = (spike_indexes[keep], spike_labels[keep])
else:
out_data['cell#all'] = (spike_indexes, spike_labels)
name = 'spikes - segNum {} - chanGrp {}'.format(seg_num, chan_grp)
filename = os.path.join(export_path, name)
self.write_out_data(out_data, filename)
class CsvSpikeExporter(GenericSpikeExporter):
ext = 'csv'
def write_out_data(self, out_data, filename):
for key, (spike_indexes, spike_labels) in out_data.items():
filename2 = filename +' - '+key+'.csv'
self._write_one_file(filename2, spike_indexes, spike_labels)
def _write_one_file(self, filename, labels, indexes):
rows = [''] * len(labels)
for i in range(len(labels)):
rows[i]='{},{}\n'.format(labels[i], indexes[i])
with open(filename, 'w') as out:
out.writelines(rows)
export_csv = CsvSpikeExporter()
class MatlabSpikeExporter(GenericSpikeExporter):
ext = 'mat'
def write_out_data(self, out_data, filename):
mdict = {}
for key, (spike_indexes, spike_labels) in out_data.items():
mdict['index_'+key] = spike_indexes
mdict['label_'+key] =spike_labels
scipy.io.savemat(filename+'.mat', mdict)
export_matlab = MatlabSpikeExporter()
class ExcelSpikeExporter(GenericSpikeExporter):
ext = 'xslx'
def write_out_data(self, out_data, filename):
assert HAS_PANDAS
writer = pd.ExcelWriter(filename+'.xlsx')
for key, (spike_indexes, spike_labels) in out_data.items():
df = pd.DataFrame()
df['index'] = spike_indexes
df['label'] = spike_labels
df.to_excel(writer, sheet_name=key, index=False)
writer.save()
export_excel = ExcelSpikeExporter()
# list
export_list = [export_csv, export_matlab, ]
if HAS_PANDAS:
export_list.append(export_excel)
export_dict = {e.ext:e for e in export_list}
def export_catalogue_spikes(cc, export_path=None, formats=None):
"""
This export spikes from catalogue.
Usefull when when catalogue peak sampler mode is all.
This avoid the peeler.
"""
dataio = cc.dataio
chan_grp = cc.chan_grp
sampler_mode = cc.info['peak_sampler']['mode']
if sampler_mode != 'all':
print('You are trying to export peak from catalogue but peak_sampler mode is not "all"')
if export_path is None:
export_path = os.path.join(dataio.dirname, 'export_catalogue_chan_grp_{}'.format(chan_grp))
catalogue = {}
catalogue['clusters'] = cc.clusters.copy()
if formats is None:
exporters = export_list
elif isinstance(formats, str):
assert formats in export_dict
exporters = [ export_dict[formats] ]
elif isinstance(format, list):
exporters = [ export_dict[format] for format in formats]
else:
raise ValueError()
for seg_num in range(dataio.nb_segment):
in_segment = (cc.all_peaks['segment'] == seg_num)
pos_label = (cc.all_peaks['cluster_label'] >= 0)
spikes = cc.all_peaks[in_segment & pos_label]
if spikes is None: continue
args = (spikes, catalogue, seg_num, chan_grp, export_path,)
kargs = dict(split_by_cluster=False, use_cell_label=False)
for exporter in exporters:
exporter(*args, **kargs)
| mit |
lefthandedroo/Cosmo-models | zprev versions/Models_py_backup/stats copy.py | 1 | 12889 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 23 16:02:10 2018
@author: BallBlueMeercat
"""
import matplotlib.pyplot as plt
from emcee import EnsembleSampler
import numpy as np
import time
import os.path
import datasim
import tools
import ln
import plots
def stats(test_params, data_dict, sigma, nsteps,
save_path, firstderivs_key):
"""
Takes in:
test_params = dictionary of parameters to be emcee fitted
'm':int/float = e_m(t)/ec(t0) at t=t0;
'gamma':int/float = interaction term;
'zeta':int/float = interaction term;
'alpha':int/float = SN peak mag correlation parameter;
'beta' :int/float = SN peak mag correlation parameter;
data_dict = dictionary of parameters from data
'colour': numpy.ndarray = SN colour;
'x1': numpy.ndarray = SN stretch correction as;
'zpicks':list of redshifts sorted in accending order;
'mag':list of apparent magnitudes;
sigma = standard deviation of error on the data;
nsteps = int, steps to be taken by each emcee walker;
save_path = string, directory for saving output;
firstderivs_key = string, name of IVCDM model to use for model mag.
Returns:
"""
# print('-stats has been called')
zpicks = data_dict.get('zpicks',0)
mag = data_dict.get('mag',0)
if firstderivs_key == 'exotic':
pass
elif firstderivs_key == 'LCDM':
test_params['gamma'] = 0
del test_params['gamma']
test_params['zeta'] = 0
del test_params['zeta']
else:
test_params['zeta'] = 0
del test_params['zeta']
# emcee parameters:
ndim = len(test_params)
nwalkers = int(ndim * 2)
# Initializing walkers.
poslist = list(test_params.values())
pos = []
for i in poslist:
pos.append(i)
startpos = np.array(pos)
pos = [startpos + 0.001*np.random.randn(ndim) for i in range(nwalkers)]
# Are walkers starting outside of prior?
i = 0
while i < nwalkers:
theta = pos[i]
lp = ln.lnprior(theta, firstderivs_key)
if not np.isfinite(lp):
print('~~~~~~~pos[%s] (outside of prior) = %s ~~~~~~~'%(i, theta))
i += 1
# Sampler setup.
times0 = time.time() # starting sampler timer
sampler = EnsembleSampler(nwalkers, ndim, ln.lnprob,
args=(data_dict, sigma, firstderivs_key, ndim))
# Burnin.
burnin = int(nsteps/4) # steps to discard
print('_____ burnin start')
timeb0 = time.time() # starting burnin timer
pos, prob, state = sampler.run_mcmc(pos, burnin)
timeb1=time.time() # stopping burnin timer
print('_____ burnin end')
sampler.reset()
# Starting sampler after burnin.
print('_____ sampler start')
sampler.run_mcmc(pos, nsteps)
print('_____ sampler end')
times1=time.time() # stopping sampler timer
# Walker steps.
lnprob = sampler.flatlnprobability
# Index of best parameters found by emcee.
bi = np.argmax(sampler.flatlnprobability) # index with highest post prob
trace = sampler.chain[:, burnin:, :].reshape(-1, ndim)
# Extracting results:
thetabest = np.zeros(ndim)
parambest = {}
true = []
propert = {}
propert['trace'] = trace
colours = ['coral', 'orchid', 'apple', 'orange', 'aquamarine', 'black']
def stat(i, sampler, string, test_params, propert):
best_output = sampler.flatchain[bi,i]
# Input m = e_m(z)/ec(z=0).
param_true = test_params.get(string, 0)
true.append(param_true)
# Output m.
output = sampler.flatchain[:,i]
# Standard deviation and mean of the m distribution.
propert[string+'_sd'] = np.std(output)
propert[string+'_mean'] = np.mean(output)
propert[string] = sampler.flatchain[bi,i]
return best_output, output, param_true, propert
for i in range(ndim):
if i == 0:
# mbest = sampler.flatchain[bi,i]
# thetabest[i] = mbest
# parambest['m'] = mbest
# # Input m = e_m(z)/ec(z=0).
# m_true = test_params.get('m', 0)
# true.append(m_true)
# # Output m.
# m = sampler.flatchain[:,i]
# # Standard deviation and mean of the m distribution.
# m_sd = np.std(m)
# m_mean = np.mean(m)
# propert['m_sd'] = m_sd
# propert['m_mean'] = m_mean
# propert['m'] = mbest
# plots.stat('coral', m, m_true, 'Matter', lnprob, zpicks,
# mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
best, output, param_true, propert = stat(i, sampler, 'm', test_params, propert)
plots.stat(colours[i], output, param_true, 'matter', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['m'] = best
elif i == 1:
# Mbest = sampler.flatchain[bi,i]
# thetabest[i] = Mbest
# parambest['M'] = Mbest
# # Input M.
# M_true = test_params.get('M',0)
# true.append(M_true)
# # Output alpha.
# M = sampler.flatchain[:,i]
# # Standard deviation and mean of the alpha distribution
# M_sd = np.std(M)
# M_mean = np.mean(M)
# propert['M_sd'] = M_sd
# propert['M_mean'] = M_mean
# propert['M'] = Mbest
# plots.stat('orchid', M, M_true, 'Mcorr', lnprob, zpicks,
# mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
best, output, param_true, propert = stat(i, sampler, 'M', test_params, propert)
plots.stat(colours[i], output, param_true, 'Mcorr', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['M'] = best
elif i == 2:
# alphabest = sampler.flatchain[bi,i]
# thetabest[i] = alphabest
# parambest['alpha'] = alphabest
# # Input interaction term.
# a_true = test_params.get('alpha',0)
# true.append(a_true)
# # Output gamma.
# alpha = sampler.flatchain[:,i]
# # Standard deviation and mean of the gamme distribution.
# alpha_sd = np.std(alpha)
# alpha_mean = np.mean(alpha)
# propert['alpha_sd'] = alpha_sd
# propert['alpha_mean'] = alpha_mean
# propert['alpha'] = alphabest
# plots.stat('apple', alpha, a_true, 'alpha', lnprob, zpicks,
# mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
best, output, param_true, propert = stat(i, sampler, 'a', test_params, propert)
plots.stat(colours[i], output, param_true, 'alpha', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['alpha'] = best
elif i == 3:
# betabest = sampler.flatchain[bi,i]
# thetabest[i] = betabest
# parambest['beta'] = betabest
# # Input interaction term.
# b_true = test_params.get('beta',0)
# true.append(b_true)
# # Output gamma.
# beta = sampler.flatchain[:,i]
# # Standard deviation and mean of the gamme distribution.
# beta_sd = np.std(beta)
# beta_mean = np.mean(beta)
# propert['beta_sd'] = beta_sd
# propert['beta_mean'] = beta_mean
# propert['beta'] = betabest
# plots.stat('orange', beta, b_true, 'beta', lnprob, zpicks,
# mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
best, output, param_true, propert = stat(i, sampler, 'b', test_params, propert)
plots.stat(colours[i], output, param_true, 'beta', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['beta'] = best
elif i == 4:
# gammabest = sampler.flatchain[bi,i]
# thetabest[i] = gammabest
# parambest['gamma'] = gammabest
# # Input interaction term.
# g_true = test_params.get('gamma',0)
# true.append(g_true)
# # Output gamma.
# gamma = sampler.flatchain[:,i]
# # Standard deviation and mean of the gamme distribution.
# gamma_sd = np.std(gamma)
# gamma_mean = np.mean(gamma)
# propert['gamma_sd'] = gamma_sd
# propert['gamma_mean'] = gamma_mean
# propert['gamma'] = gammabest
# plots.stat('aquamarine', gamma, g_true, 'Gamma', lnprob, zpicks,
# mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
best, output, param_true, propert = stat(i, sampler, 'g', test_params, propert)
plots.stat(colours[i], output, param_true, 'gamma', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['gamma'] = best
elif i == 5:
# zetabest = sampler.flatchain[bi,i]
# thetabest[i] = zetabest
# parambest['zeta'] = zetabest
# # Input interaction term.
# z_true = test_params.get('zeta',0)
# true.append(z_true)
# # Output zeta.
# zeta = sampler.flatchain[:,i]
# # Standard deviation and mean of the gamme distribution.
# zeta_sd = np.std(zeta)
# zeta_mean = np.mean(zeta)
# propert['zeta_sd'] = zeta_sd
# propert['zeta_mean'] = zeta_mean
# propert['zeta'] = zetabest
# plots.stat('black', zeta, z_true, 'Zeta', lnprob, zpicks,
# mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
best, output, param_true, propert = stat(i, sampler, 'z', test_params, propert)
plots.stat(colours[i], output, param_true, 'zeta', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['zeta'] = best
# Checking if best found parameters are within prior.
lp = ln.lnprior(thetabest, firstderivs_key)
if not np.isfinite(lp):
print('')
print('best emcee parameters outside of prior (magbest calculation)')
print('')
# Plot of data mag and redshifts, overlayed with
# mag simulated using emcee best parameters and data redshifts.
magbest = datasim.magn(parambest, data_dict, firstderivs_key)
plt.figure()
plt.title('model: '+firstderivs_key
+'\n Evolution of magnitude with redshift \n nsteps: '
+str(nsteps)+', noise: '+str(sigma)+', npoints: '+str(len(zpicks)))
data = plt.errorbar(zpicks, mag, yerr=sigma, fmt='.', alpha=0.3)
best_fit = plt.scatter(zpicks, magbest, lw='1', c='xkcd:tomato')
plt.ylabel('magnitude')
plt.xlabel('z')
plt.legend([data, best_fit], ['LCDM', firstderivs_key])
stamp = str(int(time.time()))
filename = str(stamp)+'____magz__nsteps_'+str(nsteps)+'_nwalkers_' \
+str(nwalkers)+'_noise_'+str(sigma)+'_numpoints_'+str(len(zpicks))+'.png'
filename = os.path.join(save_path, filename)
plt.savefig(filename)
plt.show(block=False)
# Corner plot (walkers' walk + histogram).
import corner
# samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
samples = sampler.chain[:, :, :].reshape((-1, ndim))
corner.corner(samples, labels=["$m$", "$M$", "$alpha$", "$beta$", "$g$", "$z$"],
truths=true)
# Results getting printed:
if bi == 0:
print('@@@@@@@@@@@@@@@@@')
print('best index =',str(bi))
print('@@@@@@@@@@@@@@@@@')
print('best parameters =',str(parambest))
print('m.a.f.:', np.mean(sampler.acceptance_fraction))
print('nsteps:', str(nsteps))
print('sigma:', str(sigma))
print('npoints:', str(len(zpicks)))
print('model:', firstderivs_key)
tools.timer('burnin', timeb0, timeb1)
tools.timer('sampler', times0, times1)
return propert, sampler | mit |
dungvtdev/upsbayescpm | bayespy/inference/vmp/nodes/CovarianceFunctions.py | 5 | 28645 | ################################################################################
# Copyright (C) 2011-2012 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import itertools
import numpy as np
#import scipy as sp
import scipy.sparse as sp # prefer CSC format
#import scipy.linalg.decomp_cholesky as decomp
#import scipy.linalg as linalg
#import scipy.special as special
#import matplotlib.pyplot as plt
#import time
#import profile
import scipy.spatial.distance as dist
#import scikits.sparse.distance as spdist
from . import node as ef
from bayespy.utils import misc as utils
# Covariance matrices can be either arrays or matrices so be careful
# with products and powers! Use explicit multiply or dot instead of
# *-operator.
def gp_cov_se(D2, overwrite=False):
if overwrite:
K = D2
K *= -0.5
np.exp(K, out=K)
else:
K = np.exp(-0.5*D2)
return K
def gp_cov_pp2_new(r, d, derivative=False):
# Dimension dependent parameter
q = 2
j = np.floor(d/2) + q + 1
# Polynomial coefficients
a2 = j**2 + 4*j + 3
a1 = 3*j + 6
a0 = 3
# Two parts of the covariance function
k1 = (1-r) ** (j+2)
k2 = (a2*r**2 + a1*r + 3)
# TODO: Check that derivative is 0, 1 or 2!
if derivative == 0:
# Return covariance
return k1 * k2 / 3
dk1 = - (j+2) * (1-r)**(j+1)
dk2 = 2*a2*r + a1
if derivative == 1:
# Return first derivative of the covariance
return (k1 * dk2 + dk1 * k2) / 3
ddk1 = (j+2) * (j+1) * (1-r)**j
ddk2 = 2*a2
if derivative == 2:
# Return second derivative of the covariance
return (ddk1*k2 + 2*dk1*dk2 + k1*ddk2) / 3
def gp_cov_pp2(r, d, gradient=False):
# Dimension dependent parameter
j = np.floor(d/2) + 2 + 1
# Polynomial coefficients
a2 = j**2 + 4*j + 3
a1 = 3*j + 6
a0 = 3
# Two parts of the covariance function
k1 = (1-r) ** (j+2)
k2 = (a2*r**2 + a1*r + 3)
# The covariance function
k = k1 * k2 / 3
if gradient:
# The gradient w.r.t. r
dk = k * (j+2) / (r-1) + k1 * (2*a2*r + a1) / 3
return (k, dk)
else:
return k
def gp_cov_delta(N):
# TODO: Use sparse matrices here!
if N > 0:
#print('in gpcovdelta', N, sp.identity(N).shape)
return sp.identity(N)
else:
# Sparse matrices do not allow zero-length dimensions
return np.identity(N)
#return np.identity(N)
#return np.asmatrix(np.identity(N))
def squared_distance(x1, x2):
## # Reshape arrays to 2-D arrays
## sh1 = np.shape(x1)[:-1]
## sh2 = np.shape(x2)[:-1]
## d = np.shape(x1)[-1]
## x1 = np.reshape(x1, (-1,d))
## x2 = np.reshape(x2, (-1,d))
(m1,n1) = x1.shape
(m2,n2) = x2.shape
if m1 == 0 or m2 == 0:
D2 = np.empty((m1,m2))
else:
# Compute squared Euclidean distance
D2 = dist.cdist(x1, x2, metric='sqeuclidean')
#D2 = np.asmatrix(D2)
# Reshape the result
#D2 = np.reshape(D2, sh1 + sh2)
return D2
# General rule for the parameters for covariance functions:
#
# (value, [ [dvalue1, ...], [dvalue2, ...], [dvalue3, ...], ...])
#
# For instance,
#
# k = covfunc_se((1.0, []), (15, [ [1,update_grad] ]))
# K = k((x1, [ [dx1,update_grad] ]), (x2, []))
#
# Plain values are converted as:
# value -> (value, [])
def gp_standardize_input(x):
if np.size(x) == 0:
x = np.reshape(x, (0,0))
elif np.ndim(x) == 0:
x = np.reshape(x, (1,1))
elif np.ndim(x) == 1:
x = np.reshape(x, (-1,1))
elif np.ndim(x) == 2:
x = np.atleast_2d(x)
else:
raise Exception("Standard GP inputs must be 2-dimensional")
return x
def gp_preprocess_inputs(x1,x2=None):
#args = list(args)
#if len(args) < 1 or len(args) > 2:
#raise Exception("Number of inputs must be one or two")
if x2 is None:
x1 = gp_standardize_input(x1)
return x1
else:
if x1 is x2:
x1 = gp_standardize_input(x1)
x2 = x1
else:
x1 = gp_standardize_input(x1)
x2 = gp_standardize_input(x2)
return (x1, x2)
#return args
## def gp_preprocess_inputs(x1,x2=None):
## #args = list(args)
## #if len(args) < 1 or len(args) > 2:
## #raise Exception("Number of inputs must be one or two")
## if x2 is not None: len(args) == 2:
## if args[0] is args[1]:
## args[0] = gp_standardize_input(args[0])
## args[1] = args[0]
## else:
## args[1] = gp_standardize_input(args[1])
## args[0] = gp_standardize_input(args[0])
## else:
## args[0] = gp_standardize_input(args[0])
## return args
# TODO:
# General syntax for these covariance functions:
# covfunc(hyper1,
# hyper2,
# ...
# hyperN,
# x1,
# x2=None,
# gradient=list_of_booleans_for_each_hyperparameter)
def covfunc_zeros(x1, x2=None, gradient=False):
inputs = gp_preprocess_inputs(*inputs)
# Compute distance and covariance matrix
if x2 is None:
x1 = gp_preprocess_inputs(x1)
# Only variance vector asked
N = np.shape(x1)[0]
# TODO: Use sparse matrices!
K = np.zeros(N)
#K = np.asmatrix(np.zeros((N,1)))
else:
(x1,x2) = gp_preprocess_inputs(x1,x2)
# Full covariance matrix asked
#x1 = inputs[0]
#x2 = inputs[1]
# Number of inputs x1
N1 = np.shape(x1)[0]
N2 = np.shape(x2)[0]
# TODO: Use sparse matrices!
K = np.zeros((N1,N2))
#K = np.asmatrix(np.zeros((N1,N2)))
if gradient is not False:
return (K, [])
else:
return K
def covfunc_delta(amplitude, x1, x2=None, gradient=False):
# Make sure that amplitude is a scalar, not an array object
amplitude = utils.array_to_scalar(amplitude)
## if gradient:
## gradient_amplitude = gradient[0]
## else:
## gradient_amplitude = []
## inputs = gp_preprocess_inputs(*inputs)
# Compute distance and covariance matrix
if x2 is None:
x1 = gp_preprocess_inputs(x1)
# Only variance vector asked
#x = inputs[0]
N = np.shape(x1)[0]
K = np.ones(N) * amplitude**2
else:
(x1,x2) = gp_preprocess_inputs(x1,x2)
# Full covariance matrix asked
#x1 = inputs[0]
#x2 = inputs[1]
# Number of inputs x1
N1 = np.shape(x1)[0]
# x1 == x2?
if x1 is x2:
delta = True
# Delta covariance
#
# FIXME: Broadcasting doesn't work with sparse matrices,
# so must use scalar multiplication
K = gp_cov_delta(N1) * amplitude**2
#K = gp_cov_delta(N1).multiply(amplitude**2)
else:
delta = False
# Number of inputs x2
N2 = np.shape(x2)[0]
# Zero covariance
if N1 > 0 and N2 > 0:
K = sp.csc_matrix((N1,N2))
else:
K = np.zeros((N1,N2))
# Gradient w.r.t. amplitude
if gradient:
# FIXME: Broadcasting doesn't work with sparse matrices,
# so must use scalar multiplication
gradient_amplitude = K*(2/amplitude)
print("noise grad", gradient_amplitude)
return (K, (gradient_amplitude,))
else:
return K
def covfunc_pp2(amplitude, lengthscale, x1, x2, gradient=False):
# Make sure that hyperparameters are scalars, not an array objects
amplitude = utils.array_to_scalar(amplitude)
lengthscale = utils.array_to_scalar(lengthscale)
#amplitude = theta[0]
#lengthscale = theta[1]
## if gradient:
## gradient_amplitude = gradient[0]
## gradient_lengthscale = gradient[1]
## else:
## gradient_amplitude = []
## gradient_lengthscale = []
## inputs = gp_preprocess_inputs(*inputs)
# Compute covariance matrix
if x2 is None:
x1 = gp_preprocess_inputs(x1)
# Compute variance vector
K = np.ones(np.shape(x)[:-1])
K *= amplitude**2
# Compute gradient w.r.t. lengthscale
if gradient:
gradient_lengthscale = np.zeros(np.shape(x1)[:-1])
else:
(x1,x2) = gp_preprocess_inputs(x1,x2)
# Compute (sparse) distance matrix
if x1 is x2:
x1 = inputs[0] / (lengthscale)
x2 = x1
D2 = spdist.pdist(x1, 1.0, form="full", format="csc")
else:
x1 = inputs[0] / (lengthscale)
x2 = inputs[1] / (lengthscale)
D2 = spdist.cdist(x1, x2, 1.0, format="csc")
r = np.sqrt(D2.data)
N1 = np.shape(x1)[0]
N2 = np.shape(x2)[0]
# Compute the covariances
if gradient:
(k, dk) = gp_cov_pp2(r, np.shape(x1)[-1], gradient=True)
else:
k = gp_cov_pp2(r, np.shape(x1)[-1])
k *= amplitude**2
# Compute gradient w.r.t. lengthscale
if gradient:
if N1 >= 1 and N2 >= 1:
dk *= r * (-amplitude**2 / lengthscale)
gradient_lengthscale = sp.csc_matrix((dk, D2.indices, D2.indptr),
shape=(N1,N2))
else:
gradient_lengthscale = np.empty((N1,N2))
# Form sparse covariance matrix
if N1 >= 1 and N2 >= 1:
## K = sp.csc_matrix((k, ij), shape=(N1,N2))
K = sp.csc_matrix((k, D2.indices, D2.indptr), shape=(N1,N2))
else:
K = np.empty((N1, N2))
#print(K.__class__)
# Gradient w.r.t. amplitude
if gradient:
gradient_amplitude = K * (2 / amplitude)
# Return values
if gradient:
print("pp2 grad", gradient_lengthscale)
return (K, (gradient_amplitude, gradient_lengthscale))
else:
return K
def covfunc_se(amplitude, lengthscale, x1, x2=None, gradient=False):
# Make sure that hyperparameters are scalars, not an array objects
amplitude = utils.array_to_scalar(amplitude)
lengthscale = utils.array_to_scalar(lengthscale)
# Compute covariance matrix
if x2 is None:
x1 = gp_preprocess_inputs(x1)
#x = inputs[0]
# Compute variance vector
N = np.shape(x1)[0]
K = np.ones(N)
np.multiply(K, amplitude**2, out=K)
# Compute gradient w.r.t. lengthscale
if gradient:
# TODO: Use sparse matrices?
gradient_lengthscale = np.zeros(N)
else:
(x1,x2) = gp_preprocess_inputs(x1,x2)
x1 = x1 / (lengthscale)
x2 = x2 / (lengthscale)
# Compute distance matrix
K = squared_distance(x1, x2)
# Compute gradient partly
if gradient:
gradient_lengthscale = np.divide(K, lengthscale)
# Compute covariance matrix
gp_cov_se(K, overwrite=True)
np.multiply(K, amplitude**2, out=K)
# Compute gradient w.r.t. lengthscale
if gradient:
gradient_lengthscale *= K
# Gradient w.r.t. amplitude
if gradient:
gradient_amplitude = K * (2 / amplitude)
# Return values
if gradient:
print("se grad", gradient_amplitude, gradient_lengthscale)
return (K, (gradient_amplitude, gradient_lengthscale))
else:
return K
class CovarianceFunctionWrapper():
def __init__(self, covfunc, *params):
# Parse parameter values and their gradients to separate lists
self.covfunc = covfunc
self.params = list(params)
self.gradient_params = list()
## print(params)
for ind in range(len(params)):
if isinstance(params[ind], tuple):
# Parse the value and the list of gradients from the
# form:
# ([value, ...], [ [grad1, ...], [grad2, ...], ... ])
self.gradient_params.append(params[ind][1])
self.params[ind] = params[ind][0][0]
else:
# No gradients, parse from the form:
# [value, ...]
self.gradient_params.append([])
self.params[ind] = params[ind][0]
def fixed_covariance_function(self, *inputs, gradient=False):
# What if this is called several times??
if gradient:
## grads = [[grad[0] for grad in self.gradient_params[ind]]
## for ind in range(len(self.gradient_params))]
## (K, dK) = self.covfunc(self.params,
## *inputs,
## gradient=self.gradient_params)
arguments = tuple(self.params) + tuple(inputs)
(K, dK) = self.covfunc(*arguments,
gradient=True)
## (K, dK) = self.covfunc(self.params,
## *inputs,
## gradient=grads)
DK = []
for ind in range(len(dK)):
# Gradient w.r.t. covariance function's ind-th
# hyperparameter
dk = dK[ind]
# Chain rule: Multiply by the gradient of the
# hyperparameter w.r.t. parent node and append the
# list DK:
# DK = [ (dx1_1, callback), ..., (dx1_n, callback) ]
for grad in self.gradient_params[ind]:
#print(grad[0])
#print(grad[1:])
#print(dk)
if sp.issparse(dk):
print(dk.shape)
print(grad[0].shape)
DK += [ [dk.multiply(grad[0])] + grad[1:] ]
else:
DK += [ [np.multiply(dk,grad[0])] + grad[1:] ]
#DK += [ [np.multiply(grad[0], dk)] + grad[1:] ]
## DK += [ (np.multiply(grad, dk),) + grad[1:]
## for grad in self.gradient_params[ind] ]
## for grad in self.gradient_params[ind]:
## DK += ( (np.multiply(grad, dk),) + grad[1:] )
## DK = []
## for ind in range(len(dK)):
## for (grad, dk) in zip(self.gradient_params[ind], dK[ind]):
## DK += [ [dk] + grad[1:] ]
K = [K]
return (K, DK)
else:
arguments = tuple(self.params) + tuple(inputs)
#print(arguments)
K = self.covfunc(*arguments,
gradient=False)
return [K]
class CovarianceFunction(ef.Node):
def __init__(self, covfunc, *args, **kwargs):
self.covfunc = covfunc
params = list(args)
for i in range(len(args)):
# Check constant parameters
if utils.is_numeric(args[i]):
params[i] = ef.NodeConstant([np.asanyarray(args[i])],
dims=[np.shape(args[i])])
# TODO: Parameters could be constant functions? :)
ef.Node.__init__(self, *params, dims=[(np.inf, np.inf)], **kwargs)
def __call__(self, x1, x2):
""" Compute covariance matrix for inputs x1 and x2. """
covfunc = self.message_to_child()
return covfunc(x1, x2)[0]
def message_to_child(self, gradient=False):
params = [parent.message_to_child(gradient=gradient) for parent in self.parents]
covfunc = self.get_fixed_covariance_function(*params)
return covfunc
def get_fixed_covariance_function(self, *params):
get_cov_func = CovarianceFunctionWrapper(self.covfunc, *params)
return get_cov_func.fixed_covariance_function
## def covariance_function(self, *params):
## # Parse parameter values and their gradients to separate lists
## params = list(params)
## gradient_params = list()
## print(params)
## for ind in range(len(params)):
## if isinstance(params[ind], tuple):
## # Parse the value and the list of gradients from the
## # form:
## # ([value, ...], [ [grad1, ...], [grad2, ...], ... ])
## gradient_params.append(params[ind][1])
## params[ind] = params[ind][0][0]
## else:
## # No gradients, parse from the form:
## # [value, ...]
## gradient_params.append([])
## params[ind] = params[ind][0]
## # This gradient_params changes mysteriously..
## print('grad_params before')
## if isinstance(self, SquaredExponential):
## print(gradient_params)
## def cov(*inputs, gradient=False):
## if gradient:
## print('grad_params after')
## print(gradient_params)
## grads = [[grad[0] for grad in gradient_params[ind]]
## for ind in range(len(gradient_params))]
## print('CovarianceFunction.cov')
## #if isinstance(self, SquaredExponential):
## #print(self.__class__)
## #print(grads)
## (K, dK) = self.covfunc(params,
## *inputs,
## gradient=grads)
## for ind in range(len(dK)):
## for (grad, dk) in zip(gradient_params[ind], dK[ind]):
## grad[0] = dk
## K = [K]
## dK = []
## for grad in gradient_params:
## dK += grad
## return (K, dK)
## else:
## K = self.covfunc(params,
## *inputs,
## gradient=False)
## return [K]
## return cov
class Sum(CovarianceFunction):
def __init__(self, *args, **kwargs):
CovarianceFunction.__init__(self,
None,
*args,
**kwargs)
def get_fixed_covariance_function(self, *covfunc_parents):
def covfunc(*inputs, gradient=False):
K_sum = None
if gradient:
dK_sum = list()
for k in covfunc_parents:
if gradient:
(K, dK) = k(*inputs, gradient=gradient)
print("dK in sum", dK)
dK_sum += dK
#print("dK_sum in sum", dK_sum)
else:
K = k(*inputs, gradient=gradient)
if K_sum is None:
K_sum = K[0]
else:
try:
K_sum += K[0]
except:
# You have to do this way, for instance, if
# K_sum is sparse and K[0] is dense.
K_sum = K_sum + K[0]
if gradient:
#print("dK_sum on: ", dK_sum)
#print('covsum', dK_sum)
return ([K_sum], dK_sum)
else:
return [K_sum]
return covfunc
class Delta(CovarianceFunction):
def __init__(self, amplitude, **kwargs):
CovarianceFunction.__init__(self,
covfunc_delta,
amplitude,
**kwargs)
class Zeros(CovarianceFunction):
def __init__(self, **kwargs):
CovarianceFunction.__init__(self,
covfunc_zeros,
**kwargs)
class SquaredExponential(CovarianceFunction):
def __init__(self, amplitude, lengthscale, **kwargs):
CovarianceFunction.__init__(self,
covfunc_se,
amplitude,
lengthscale,
**kwargs)
class PiecewisePolynomial2(CovarianceFunction):
def __init__(self, amplitude, lengthscale, **kwargs):
CovarianceFunction.__init__(self,
covfunc_pp2,
amplitude,
lengthscale,
**kwargs)
# TODO: Rename to Blocks or Joint ?
class Multiple(CovarianceFunction):
def __init__(self, covfuncs, **kwargs):
self.d = len(covfuncs)
#self.sparse = sparse
parents = [covfunc for row in covfuncs for covfunc in row]
CovarianceFunction.__init__(self,
None,
*parents,
**kwargs)
def get_fixed_covariance_function(self, *covfuncs):
def cov(*inputs, gradient=False):
# Computes the covariance matrix from blocks which all
# have their corresponding covariance functions
if len(inputs) < 2:
# For one input, return the variance vector instead of
# the covariance matrix
x1 = inputs[0]
# Collect variance vectors from the covariance
# functions corresponding to the diagonal blocks
K = [covfuncs[i*self.d+i](x1[i], gradient=gradient)[0]
for i in range(self.d)]
# Form the variance vector from the collected vectors
if gradient:
raise Exception('Gradient not yet implemented.')
else:
## print("in cov multiple")
## for (k,kf) in zip(K,covfuncs):
## print(np.shape(k), k.__class__, kf)
#K = np.vstack(K)
K = np.concatenate(K)
else:
x1 = inputs[0]
x2 = inputs[1]
# Collect the covariance matrix (and possibly
# gradients) from each block.
#print('cov mat collection begins')
K = [[covfuncs[i*self.d+j](x1[i], x2[j], gradient=gradient)
for j in range(self.d)]
for i in range(self.d)]
#print('cov mat collection ends')
# Remove matrices that have zero length dimensions?
if gradient:
K = [[K[i][j]
for j in range(self.d)
if np.shape(K[i][j][0][0])[1] != 0]
for i in range(self.d)
if np.shape(K[i][0][0][0])[0] != 0]
else:
K = [[K[i][j]
for j in range(self.d)
if np.shape(K[i][j][0])[1] != 0]
for i in range(self.d)
if np.shape(K[i][0][0])[0] != 0]
n_blocks = len(K)
#print("nblocks", n_blocks)
#print("K", K)
# Check whether all blocks are sparse
is_sparse = True
for i in range(n_blocks):
for j in range(n_blocks):
if gradient:
A = K[i][j][0][0]
else:
A = K[i][j][0]
if not sp.issparse(A):
is_sparse = False
if gradient:
## Compute the covariance matrix and the gradients
# Create block matrices of zeros. This helps in
# computing the gradient.
if is_sparse:
# Empty sparse matrices. Some weird stuff here
# because sparse matrices can't have zero
# length dimensions.
Z = [[sp.csc_matrix(np.shape(K[i][j][0][0]))
for j in range(n_blocks)]
for i in range(n_blocks)]
else:
# Empty dense matrices
Z = [[np.zeros(np.shape(K[i][j][0][0]))
for j in range(n_blocks)]
for i in range(n_blocks)]
## for j in range(self.d)]
## for i in range(self.d)]
# Compute gradients block by block
dK = list()
for i in range(n_blocks):
for j in range(n_blocks):
# Store the zero block
z_old = Z[i][j]
# Go through the gradients for the (i,j)
# block
for dk in K[i][j][1]:
# Keep other blocks at zero and set
# the gradient to (i,j) block. Form
# the matrix from blocks
if is_sparse:
Z[i][j] = dk[0]
dk[0] = sp.bmat(Z).tocsc()
else:
if sp.issparse(dk[0]):
Z[i][j] = dk[0].toarray()
else:
Z[i][j] = dk[0]
#print("Z on:", Z)
dk[0] = np.asarray(np.bmat(Z))
# Append the computed gradient matrix
# to the list of gradients
dK.append(dk)
# Restore the zero block
Z[i][j] = z_old
## Compute the covariance matrix but not the
## gradients
if is_sparse:
# Form the full sparse covariance matrix from
# blocks. Ignore blocks having a zero-length
# axis because sparse matrices consider zero
# length as an invalid shape (BUG IN SCIPY?).
K = [[K[i][j][0][0]
for j in range(n_blocks)]
for i in range(n_blocks)]
K = sp.bmat(K).tocsc()
else:
# Form the full dense covariance matrix from
# blocks. Transform sparse blocks to dense
# blocks.
K = [[K[i][j][0][0]
if not sp.issparse(K[i][j][0][0]) else
K[i][j][0][0].toarray()
for j in range(n_blocks)]
for i in range(n_blocks)]
K = np.asarray(np.bmat(K))
else:
## Compute the covariance matrix but not the
## gradients
if is_sparse:
# Form the full sparse covariance matrix from
# blocks. Ignore blocks having a zero-length
# axis because sparse matrices consider zero
# length as an invalid shape (BUG IN SCIPY?).
K = [[K[i][j][0]
for j in range(n_blocks)]
for i in range(n_blocks)]
K = sp.bmat(K).tocsc()
else:
# Form the full dense covariance matrix from
# blocks. Transform sparse blocks to dense
# blocks.
K = [[K[i][j][0]
if not sp.issparse(K[i][j][0]) else
K[i][j][0].toarray()
for j in range(n_blocks)]
for i in range(n_blocks)]
K = np.asarray(np.bmat(K))
if gradient:
return ([K], dK)
else:
return [K]
return cov
| mit |
micahhausler/pandashells | pandashells/test/p_crypt_test.py | 9 | 2552 | #! /usr/bin/env python
from mock import patch, MagicMock
from unittest import TestCase
from pandashells.bin.p_crypt import main
class MainTests(TestCase):
@patch(
'pandashells.bin.p_crypt.sys.argv',
'p.crypt -i my_in -o my_out'.split())
@patch('pandashells.bin.p_crypt.os.system')
@patch('pandashells.bin.p_crypt.os.path.isfile')
def test_proper_encrypt(self, isfile_mock, system_mock):
isfile_mock.return_value = True
main()
system_mock.assert_called_with(
'cat my_in | openssl enc -aes-256-cbc -salt > my_out')
@patch(
'pandashells.bin.p_crypt.sys.argv',
'p.crypt -i my_in -o my_out -v'.split())
@patch('pandashells.bin.p_crypt.sys.stdout')
@patch('pandashells.bin.p_crypt.os.system')
@patch('pandashells.bin.p_crypt.os.path.isfile')
def test_proper_encrypt_verbose(
self, isfile_mock, system_mock, stdout_mock):
stdout_mock.write = MagicMock()
isfile_mock.return_value = True
main()
system_mock.assert_called_with(
'cat my_in | openssl enc -aes-256-cbc -salt > my_out')
self.assertTrue(stdout_mock.write.called)
@patch(
'pandashells.bin.p_crypt.sys.argv',
'p.crypt -i my_in -o my_out --password xx'.split())
@patch('pandashells.bin.p_crypt.os.system')
@patch('pandashells.bin.p_crypt.os.path.isfile')
def test_proper_encypt_with_password(self, isfile_mock, system_mock):
isfile_mock.return_value = True
main()
system_mock.assert_called_with(
"cat my_in | openssl enc -aes-256-cbc -salt -k 'xx' > my_out")
@patch(
'pandashells.bin.p_crypt.sys.argv',
'p.crypt -i my_in -o my_out --password xx'.split())
@patch('pandashells.bin.p_crypt.sys.stderr')
@patch('pandashells.bin.p_crypt.os.system')
@patch('pandashells.bin.p_crypt.os.path.isfile')
def test_proper_encypt_no_input_file(
self, isfile_mock, stderr_mock, system_mock):
isfile_mock.return_value = False
with self.assertRaises(SystemExit):
main()
@patch(
'pandashells.bin.p_crypt.sys.argv',
'p.crypt -i my_in -o my_out -d'.split())
@patch('pandashells.bin.p_crypt.os.system')
@patch('pandashells.bin.p_crypt.os.path.isfile')
def test_proper_decrypt(self, isfile_mock, system_mock):
isfile_mock.return_value = True
main()
system_mock.assert_called_with(
'cat my_in | openssl enc -d -aes-256-cbc > my_out')
| bsd-2-clause |
robcarver17/pysystemtrade | sysquant/optimisation/full_handcrafting.py | 1 | 47297 | # This is the *full* handcrafting code
# It can be used for long only
# It is *not* the code actually used in pysystemtrade
# It is completely self contained with no pysystemtrade imports
# CAVEATS:
# Uses weekly returns (resample needed first)
# Doesn't deal with missing assets
from copy import copy
import numpy as np
import pandas as pd
import scipy.stats as stats
from scipy.stats import norm
import scipy.cluster.hierarchy as sch
FLAG_BAD_RETURN = -9999999.9
from scipy.optimize import minimize
from collections import namedtuple
CALENDAR_DAYS_IN_YEAR = 365.25
WEEKS_IN_YEAR = CALENDAR_DAYS_IN_YEAR / 7.0
MAX_CLUSTER_SIZE = 3 # Do not change
WARN_ON_SUBPORTFOLIO_SIZE = (
0.2 # change if you like, sensible values are between 0 and 0.5
)
APPROX_MIN_WEIGHT_IN_CORR_WEIGHTS = 0.1
FUDGE_FACTOR_FOR_CORR_WEIGHT_UNCERTAINTY = 4.0
MAX_ROWS_FOR_CORR_ESTIMATION = 100
PSTEP_FOR_CORR_ESTIMATION = 0.25
# Convenience objects
NO_SUB_PORTFOLIOS = object()
NO_RISK_TARGET = object()
NO_TOP_LEVEL_WEIGHTS = object()
class diagobject(object):
def __init__(self):
pass
def __repr__(self):
return "%s \n %s " % (self.calcs, self.description)
def norm_weights(list_of_weights):
norm_weights = list(np.array(list_of_weights) / np.sum(list_of_weights))
return norm_weights
# To make comparision easier we compare sorted correlations to sorted correlations; otherwise we'd need many more than 10
# candidate matrices to cope with different ordering of the same matrix
def get_weights_using_uncertainty_method(cmatrix, data_points=100):
if len(cmatrix) == 1:
return [1.0]
if len(cmatrix) == 2:
return [0.5, 0.5]
if len(cmatrix) > MAX_CLUSTER_SIZE:
raise Exception("Cluster too big")
average_weights = optimised_weights_given_correlation_uncertainty(cmatrix, data_points)
weights = apply_min_weight(average_weights)
return weights
def optimised_weights_given_correlation_uncertainty(corr_matrix, data_points, p_step=PSTEP_FOR_CORR_ESTIMATION):
dist_points = np.arange(p_step, stop=(1-p_step)+0.000001, step=p_step)
list_of_weights = []
for conf1 in dist_points:
for conf2 in dist_points:
for conf3 in dist_points:
conf_intervals = labelledCorrelations(conf1, conf2, conf3)
weights = optimise_for_corr_matrix_with_uncertainty(corr_matrix, conf_intervals, data_points)
list_of_weights.append(weights)
array_of_weights = np.array(list_of_weights)
average_weights = np.nanmean(array_of_weights, axis=0)
return average_weights
labelledCorrelations = namedtuple("labelledCorrelations", 'ab ac bc')
def optimise_for_corr_matrix_with_uncertainty(corr_matrix, conf_intervals, data_points):
labelled_correlations = extract_asset_pairwise_correlations_from_matrix(corr_matrix)
labelled_correlation_points = calculate_correlation_points_from_tuples(labelled_correlations, conf_intervals, data_points)
corr_matrix_at_distribution_point = three_asset_corr_matrix(labelled_correlation_points)
weights = optimise_for_corr_matrix(corr_matrix_at_distribution_point)
return weights
def extract_asset_pairwise_correlations_from_matrix(corr_matrix):
ab = corr_matrix[0][1]
ac = corr_matrix[0][2]
bc = corr_matrix[1][2]
return labelledCorrelations(ab=ab, ac=ac, bc=bc)
def calculate_correlation_points_from_tuples(labelled_correlations, conf_intervals, data_points):
correlation_point_list = [get_correlation_distribution_point(corr_value, data_points, confidence_interval)
for corr_value, confidence_interval in
zip(labelled_correlations, conf_intervals)]
labelled_correlation_points = labelledCorrelations(*correlation_point_list)
return labelled_correlation_points
def get_correlation_distribution_point(corr_value, data_points, conf_interval):
fisher_corr = fisher_transform(corr_value)
point_in_fisher_units = \
get_fisher_confidence_point(fisher_corr, data_points, conf_interval)
point_in_natural_units = inverse_fisher(point_in_fisher_units)
return point_in_natural_units
def fisher_transform(corr_value):
if corr_value>=1.0:
corr_value = 0.99999999999999
elif corr_value<=-1.0:
corr_value = -0.99999999999999
return 0.5*np.log((1+corr_value) / (1-corr_value)) # also arctanh
def get_fisher_confidence_point(fisher_corr, data_points, conf_interval):
if conf_interval<0.5:
confidence_in_fisher_units = fisher_confidence(data_points, conf_interval)
point_in_fisher_units = fisher_corr - confidence_in_fisher_units
elif conf_interval>0.5:
confidence_in_fisher_units = fisher_confidence(data_points, 1-conf_interval)
point_in_fisher_units = fisher_corr + confidence_in_fisher_units
else:
point_in_fisher_units = fisher_corr
return point_in_fisher_units
def fisher_confidence(data_points, conf_interval):
data_point_root =fisher_stdev(data_points)*FUDGE_FACTOR_FOR_CORR_WEIGHT_UNCERTAINTY
conf_point = get_confidence_point(conf_interval)
return data_point_root * conf_point
def fisher_stdev(data_points):
data_point_root = 1/((data_points-3)**.5)
return data_point_root
def get_confidence_point(conf_interval):
conf_point = norm.ppf(1-(conf_interval/2))
return conf_point
def inverse_fisher(fisher_corr_value):
return (np.exp(2*fisher_corr_value) - 1) / (np.exp(2*fisher_corr_value) + 1)
def three_asset_corr_matrix(labelled_correlations):
"""
:return: np.array 2 dimensions, size
"""
ab = labelled_correlations.ab
ac = labelled_correlations.ac
bc = labelled_correlations.bc
m = [[1.0, ab, ac], [ab, 1.0, bc], [ac, bc, 1.0]]
m = np.array(m)
return m
def optimise_for_corr_matrix(corr_matrix):
## arbitrary
mean_list = [.05]*3
std = .1
stdev_list = np.full(len(mean_list), std)
sigma = sigma_from_corr_and_std(stdev_list, corr_matrix)
return optimise(sigma, mean_list)
def apply_min_weight(average_weights):
weights_with_min = [min_weight(weight) for weight in average_weights]
adj_weights = norm_weights(weights_with_min)
return adj_weights
def min_weight(weight):
if weight<APPROX_MIN_WEIGHT_IN_CORR_WEIGHTS:
return APPROX_MIN_WEIGHT_IN_CORR_WEIGHTS
else:
return weight
"""
SR adjustment
"""
def multiplier_from_relative_SR(relative_SR, avg_correlation, years_of_data):
# Return a multiplier
# 1 implies no adjustment required
ratio = mini_bootstrap_ratio_given_SR_diff(
relative_SR, avg_correlation, years_of_data
)
return ratio
def mini_bootstrap_ratio_given_SR_diff(
SR_diff,
avg_correlation,
years_of_data,
avg_SR=0.5,
std=0.15,
how_many_assets=2,
p_step=0.01,
):
"""
Do a parametric bootstrap of portfolio weights to tell you what the ratio should be between an asset which
has a higher backtested SR (by SR_diff) versus another asset(s) with average Sharpe Ratio (avg_SR)
All assets are assumed to have same standard deviation and correlation
:param SR_diff: Difference in performance in Sharpe Ratio (SR) units between one asset and the rest
:param avg_correlation: Average correlation across portfolio
:param years_of_data: How many years of data do you have (can be float for partial years)
:param avg_SR: Should be realistic for your type of trading
:param std: Standard deviation (doesn't affect results, just a scaling parameter)
:param how_many_assets: How many assets in the imaginary portfolio
:param p_step: Step size to go through in the CDF of the mean estimate
:return: float, ratio of weight of asset with different SR to 1/n weight
"""
dist_points = np.arange(
p_step,
stop=(
1 -
p_step) +
0.00000001,
step=p_step)
list_of_weights = [
weights_given_SR_diff(
SR_diff,
avg_correlation,
confidence_interval,
years_of_data,
avg_SR=avg_SR,
std=std,
how_many_assets=how_many_assets,
)
for confidence_interval in dist_points
]
array_of_weights = np.array(list_of_weights)
average_weights = np.nanmean(array_of_weights, axis=0)
ratio_of_weights = weight_ratio(average_weights)
if np.sign(ratio_of_weights - 1.0) != np.sign(SR_diff):
# This shouldn't happen, and only occurs because weight distributions
# get curtailed at zero
return 1.0
return ratio_of_weights
def weight_ratio(weights):
"""
Return the ratio of weight of first asset to other weights
:param weights:
:return: float
"""
one_over_N_weight = 1.0 / len(weights)
weight_first_asset = weights[0]
return weight_first_asset / one_over_N_weight
def weights_given_SR_diff(
SR_diff,
avg_correlation,
confidence_interval,
years_of_data,
avg_SR=0.5,
std=0.15,
how_many_assets=2,
):
"""
Return the ratio of weight to 1/N weight for an asset with unusual SR
:param SR_diff: Difference between the SR and the average SR. 0.0 indicates same as average
:param avg_correlation: Average correlation amongst assets
:param years_of_data: How long has this been going one
:param avg_SR: Average SR to use for other asset
:param confidence_interval: How confident are we about our mean estimate (i.e. cdf point)
:param how_many_assets: .... are we optimising over (I only consider 2, but let's keep it general)
:param std: Standard deviation to use
:return: Ratio of weight, where 1.0 means no difference
"""
average_mean = avg_SR * std
asset1_mean = (SR_diff + avg_SR) * std
mean_difference = asset1_mean - average_mean
# Work out what the mean is with appropriate confidence
confident_mean_difference = calculate_confident_mean_difference(
std, years_of_data, mean_difference, confidence_interval, avg_correlation)
confident_asset1_mean = confident_mean_difference + average_mean
mean_list = [confident_asset1_mean] + \
[average_mean] * (how_many_assets - 1)
weights = optimise_using_correlation(mean_list, avg_correlation, std)
return list(weights)
def optimise_using_correlation(mean_list, avg_correlation, std):
corr_matrix = boring_corr_matrix(len(mean_list), offdiag=avg_correlation)
stdev_list = np.full(len(mean_list), std)
sigma = sigma_from_corr_and_std(stdev_list, corr_matrix)
return optimise(sigma, mean_list)
def boring_corr_matrix(size, offdiag=0.99, diag=1.0):
"""
Create a boring correlation matrix
:param size: dimensions
:param offdiag: value to put in off diagonal
:param diag: value to put in diagonal
:return: np.array 2 dimensions, size
"""
size_index = range(size)
def _od(i, j, offdiag, diag):
if i == j:
return diag
else:
return offdiag
m = [[_od(i, j, offdiag, diag) for i in size_index] for j in size_index]
m = np.array(m)
return m
def calculate_confident_mean_difference(
std, years_of_data, mean_difference, confidence_interval, avg_correlation
):
omega_difference = calculate_omega_difference(
std, years_of_data, avg_correlation)
confident_mean_difference = stats.norm(
mean_difference, omega_difference).ppf(confidence_interval)
return confident_mean_difference
def calculate_omega_difference(std, years_of_data, avg_correlation):
omega_one_asset = std / (years_of_data) ** 0.5
omega_variance_difference = 2 * \
(omega_one_asset ** 2) * (1 - avg_correlation)
omega_difference = omega_variance_difference ** 0.5
return omega_difference
def adjust_weights_for_SR(weights, SR_list, years_of_data, avg_correlation):
"""
Adjust weights according to heuristic method
:param weights: List of float, starting weights
:param SR_list: np.array of Sharpe Ratios
:param years_of_data: float
:return: list of adjusted weights
"""
assert len(weights) == len(SR_list)
avg_SR = np.nanmean(SR_list)
relative_SR_list = SR_list - avg_SR
multipliers = [
float(multiplier_from_relative_SR(relative_SR, avg_correlation, years_of_data))
for relative_SR in relative_SR_list
]
new_weights = list(np.array(weights) * np.array(multipliers))
norm_new_weights = norm_weights(new_weights)
return norm_new_weights
class Portfolio:
"""
Portfolios; what do they contain: a list of instruments, return characteristics, [vol weights], [cash weights]
can contain sub portfolios
they are initially created with some returns
"""
def __init__(
self,
instrument_returns,
allow_leverage=False,
risk_target=NO_RISK_TARGET,
use_SR_estimates=True,
top_level_weights=NO_TOP_LEVEL_WEIGHTS,
log=print,
):
"""
:param instrument_returns: A pandas data frame labelled with instrument names, containing weekly instrument_returns
:param allow_leverage: bool. Ignored if NO_RISK_TARGET
:param risk_target: (optionally) float, annual standard deviation estimate
:param use_SR_estimates: bool
:param top_level_weights: (optionally) pass a list, same length as top level. Used for partioning to hit risk target.
"""
instrument_returns = self._clean_instruments_remove_missing(
instrument_returns)
self.instrument_returns = instrument_returns
self.instruments = list(instrument_returns.columns)
self.corr_matrix = calc_correlation(instrument_returns)
self.vol_vector = np.array(
instrument_returns.std() * (WEEKS_IN_YEAR ** 0.5))
self.returns_vector = np.array(
instrument_returns.mean() * WEEKS_IN_YEAR)
self.sharpe_ratio = self.returns_vector / self.vol_vector
self.years_of_data = minimum_many_years_of_data_in_dataframe(
instrument_returns)
self.allow_leverage = allow_leverage
self.risk_target = risk_target
self.use_SR_estimates = use_SR_estimates
self.top_level_weights = top_level_weights
self.log = log
def __repr__(self):
return "Portfolio with %d instruments" % len(self.instruments)
def _missing_data_instruments(self, instrument_returns, min_periods=2):
"""
This will only affect top level portfolios
:return: list of instruments without enough data for correlation estimate
"""
instrument_returns[instrument_returns == 0.0] = np.nan
missing_values = instrument_returns.isna().sum()
total_data_length = len(instrument_returns)
missing_instruments = [
instrument for instrument,
missing_value_this_instrument in zip(
instrument_returns.columns,
missing_values) if (
total_data_length -
missing_value_this_instrument) < min_periods]
return missing_instruments
def _clean_instruments_remove_missing(self, instrument_returns):
"""
:return: pd.DataFrame with only valid instruments left in
"""
all_instruments = instrument_returns.columns
missing_instruments = self._missing_data_instruments(
instrument_returns)
valid_instruments = [
x for x in all_instruments if x not in missing_instruments]
self.all_instruments = all_instruments
self.missing_instruments = missing_instruments
self.valid_instruments = valid_instruments
return instrument_returns[valid_instruments]
def _cluster_breakdown(self) -> list:
"""
Creates clusters from the portfolio (doesn't create sub portfolios, but tells you which ones to make)
Credit to this notebook: https://github.com/TheLoneNut/CorrelationMatrixClustering/blob/master/CorrelationMatrixClustering.ipynb
:return: list of int same length as instruments
"""
corr_matrix = self.corr_matrix.values
ind = cluster_correlation_matrix(corr_matrix, max_cluster_size=MAX_CLUSTER_SIZE)
return ind
def _cluster_breakdown_using_risk_partition(self):
"""
Creates clusters, using a risk partitioning method
:return: list of int, same length as instruments
"""
risk_target = self.risk_target
self.log(
"Partioning into two groups to hit risk target of %f" %
risk_target)
assert risk_target is not NO_RISK_TARGET
vol_vector = self.vol_vector
count_is_higher_risk = sum(
[instrument_vol > risk_target for instrument_vol in vol_vector]
)
if count_is_higher_risk == 0:
raise Exception(
"Risk target greater than vol of any instrument: will be impossible to hit risk target"
)
if count_is_higher_risk < (
len(self.instruments) * WARN_ON_SUBPORTFOLIO_SIZE):
self.log(
"Not many instruments have risk higher than target; portfolio will be concentrated to hit risk target"
)
def _cluster_id(instrument_vol, risk_target):
# hard coded do not change; high vol is second group
if instrument_vol > risk_target:
return 2
else:
return 1
cluster_list = [
_cluster_id(
instrument_vol,
risk_target) for instrument_vol in vol_vector]
return cluster_list
def _create_single_subportfolio(self, instrument_list):
"""
Create a single sub portfolio object
:param instrument_list: a subset of the instruments in self.instruments
:return: a new Portfolio object
"""
sub_portfolio_returns = self.instrument_returns[instrument_list]
# IMPORTANT NOTE: Sub portfolios don't inherit risk targets or
# leverage... that is only applied at top level
sub_portfolio = Portfolio(
sub_portfolio_returns, use_SR_estimates=self.use_SR_estimates
)
return sub_portfolio
def _create_child_subportfolios(self):
"""
Create sub portfolios. This doesn't create the entire 'tree', just the level below us (our children)
:return: a list of new portfolio objects (also modifies self.sub_portfolios)
"""
# get clusters
if len(self.instruments) <= MAX_CLUSTER_SIZE:
return NO_SUB_PORTFOLIOS
if self._require_partioned_portfolio():
# Break into two groups to hit a risk target
self.log("Applying partition to hit risk target")
cluster_list = self._cluster_breakdown_using_risk_partition()
else:
self.log("Natural top level grouping used")
cluster_list = self._cluster_breakdown()
unique_clusters = list(set(cluster_list))
instruments_by_cluster = [
[
self.instruments[idx]
for idx, i in enumerate(cluster_list)
if i == cluster_id
]
for cluster_id in unique_clusters
]
sub_portfolios = [
self._create_single_subportfolio(instruments_for_this_cluster)
for instruments_for_this_cluster in instruments_by_cluster
]
return sub_portfolios
def _require_partioned_portfolio(self):
"""
If risk_target set and no leverage allowed will be True,
OR if top level weights are passed
otherwise False
:return: bool
"""
if self.top_level_weights is not NO_TOP_LEVEL_WEIGHTS:
# if top level weights are passed we need to partition
return True
elif (self.risk_target is not NO_RISK_TARGET) and (not self.allow_leverage):
# if a risk target is set, but also no leverage allowed, we need to
# partition
return True
return False
def _create_all_subportfolios(self):
"""
Decluster the entire portfolio into a tree of subportfolios
:return: None [populates self.subportfolios] or NO_SUB_PORTFOLIOS
"""
# Create the first level of sub portfolios underneath us
sub_portfolios = self._create_child_subportfolios()
if sub_portfolios is NO_SUB_PORTFOLIOS:
# nothing to do
return NO_SUB_PORTFOLIOS
# Create the rest of the tree
for single_sub_portfolio in sub_portfolios:
# This will create all nested portfolios
single_sub_portfolio._create_all_subportfolios()
return sub_portfolios
def show_subportfolio_tree(self, prefix=""):
"""
Display the sub portfolio tree
:return: None
"""
descrlist = []
if self.sub_portfolios is NO_SUB_PORTFOLIOS:
descrlist = ["%s Contains %s" % (prefix, str(self.instruments))]
return descrlist
descrlist.append("%s Contains %d sub portfolios" %
(prefix, len(self.sub_portfolios)))
for idx, sub_portfolio in enumerate(self.sub_portfolios):
descrlist.append(
sub_portfolio.show_subportfolio_tree(
prefix="%s[%d]" %
(prefix, idx)))
return descrlist
def _diags_as_dataframe(self):
"""
:return: A list of tuples (label, dataframes) showing how the portfolio weights were built up
"""
diag = diagobject()
# not used - make sure everything is available
vw = self.volatility_weights
if self.sub_portfolios is NO_SUB_PORTFOLIOS:
description = "Portfolio containing %s instruments " % (
str(self.instruments)
)
diag.description = description
vol_weights = self.volatility_weights
raw_weights = self.raw_weights
SR = self.sharpe_ratio
diagmatrix = pd.DataFrame(
[raw_weights, vol_weights, list(SR)],
columns=self.instruments,
index=["Raw vol (no SR adj)", "Vol (with SR adj)", "Sharpe Ratio"],
)
diag.calcs = diagmatrix
diag.cash = "No cash calculated"
diag.aggregate = "Not an aggregate portfolio"
return diag
description = "Portfolio containing %d sub portfolios" % len(
self.sub_portfolios
)
diag.description = description
# do instrument level
dm_by_instrument_list = self.dm_by_instrument_list
instrument_vol_weight_in_sub_list = self.instrument_vol_weight_in_sub_list
sub_portfolio_vol_weight_list = self.sub_portfolio_vol_weight_list
vol_weights = self.volatility_weights
diagmatrix = pd.DataFrame(
[
instrument_vol_weight_in_sub_list,
sub_portfolio_vol_weight_list,
dm_by_instrument_list,
vol_weights,
],
columns=self.instruments,
index=[
"Vol wt in group",
"Vol wt. of group",
"Div mult of group",
"Vol wt.",
],
)
diag.calcs = diagmatrix
# do aggregate next
diag.aggregate = diagobject()
diag.aggregate.description = description + " aggregate"
vol_weights = self.aggregate_portfolio.volatility_weights
raw_weights = self.aggregate_portfolio.raw_weights
div_mult = [
sub_portfolio.div_mult for sub_portfolio in self.sub_portfolios]
sharpe_ratios = list(self.aggregate_portfolio.sharpe_ratio)
# unlabelled, sub portfolios don't get names
diagmatrix = pd.DataFrame(
[raw_weights, vol_weights, sharpe_ratios, div_mult],
index=[
"Raw vol (no SR adj or DM)",
"Vol (with SR adj no DM)",
"SR",
"Div mult",
],
)
diag.aggregate.calcs = diagmatrix
# do cash
diag.cash = diagobject()
description = "Portfolio containing %d instruments (cash calculations)" % len(
self.instruments)
diag.cash.description = description
vol_weights = self.volatility_weights
cash_weights = self.cash_weights
vol_vector = list(self.vol_vector)
diagmatrix = pd.DataFrame(
[vol_weights, vol_vector, cash_weights],
columns=self.instruments,
index=["Vol weights", "Std.", "Cash weights"],
)
diag.cash.calcs = diagmatrix
return diag
def _calculate_weights_standalone_portfolio(self):
"""
For a standalone portfolio, calculates volatility weights
Uses the candidate matching method
:return: list of weights
"""
assert len(self.instruments) <= MAX_CLUSTER_SIZE
assert self.sub_portfolios is NO_SUB_PORTFOLIOS
raw_weights = get_weights_using_uncertainty_method(
self.corr_matrix.values, len(self.instrument_returns.index))
self.raw_weights = raw_weights
use_SR_estimates = self.use_SR_estimates
if use_SR_estimates:
SR_list = self.sharpe_ratio
years_of_data = self.years_of_data
avg_correlation = get_avg_corr(self.corr_matrix.values)
adjusted_weights = adjust_weights_for_SR(
raw_weights, SR_list, years_of_data, avg_correlation
)
else:
adjusted_weights = raw_weights
return adjusted_weights
def _calculate_portfolio_returns(self):
"""
If we have some weights, calculate the returns of the entire portfolio
Needs cash weights
:return: pd.Series of returns
"""
cash_weights = self.cash_weights
instrument_returns = self.instrument_returns
cash_weights_as_df = pd.DataFrame(
[cash_weights] * len(instrument_returns.index), instrument_returns.index
)
cash_weights_as_df.columns = instrument_returns.columns
portfolio_returns_df = cash_weights_as_df * instrument_returns
portfolio_returns = portfolio_returns_df.sum(axis=1)
return portfolio_returns
def _calculate_portfolio_returns_std(self):
return self.portfolio_returns.std() * (WEEKS_IN_YEAR ** 0.5)
def _calculate_diversification_mult(self):
"""
Calculates the diversification multiplier for a portfolio
:return: float
"""
corr_matrix = self.corr_matrix.values
vol_weights = np.array(self.volatility_weights)
div_mult = 1.0 / (
(np.dot(np.dot(vol_weights, corr_matrix), vol_weights.transpose())) ** 0.5
)
return div_mult
def _calculate_sub_portfolio_returns(self):
"""
Return a matrix of returns with sub portfolios each representing a single asset
:return: pd.DataFrame
"""
assert self.sub_portfolios is not NO_SUB_PORTFOLIOS
sub_portfolio_returns = [
sub_portfolio.portfolio_returns for sub_portfolio in self.sub_portfolios]
sub_portfolio_returns = pd.concat(sub_portfolio_returns, axis=1)
return sub_portfolio_returns
def _calculate_weights_aggregated_portfolio(self):
"""
Calculate weights when we have sub portfolios
This is done by pulling in the weights from each sub portfolio, giving weights to each sub portfolio, and then getting the product
:return: list of weights
"""
sub_portfolio_returns = self._calculate_sub_portfolio_returns()
# create another Portfolio object made up of the sub portfolios
aggregate_portfolio = Portfolio(
sub_portfolio_returns, use_SR_estimates=self.use_SR_estimates
)
# store to look at later if you want
self.aggregate_portfolio = aggregate_portfolio
# calculate the weights- these will be the weight on each sub portfolio
if self.top_level_weights is NO_TOP_LEVEL_WEIGHTS:
# calculate the weights in the normal way
aggregate_weights = aggregate_portfolio.volatility_weights
raw_weights = aggregate_portfolio.raw_weights
else:
# override with top_level_weights - used when risk targeting
try:
assert len(self.top_level_weights) == len(
aggregate_portfolio.instruments
)
except BaseException:
raise Exception(
"Top level weights length %d is different from number of top level groups %d" %
(len(
self.top_level_weights) == len(
self.aggregate_portfolio.instruments)))
aggregate_weights = self.top_level_weights
raw_weights = aggregate_weights
# calculate the product of div_mult, aggregate weights and sub
# portfolio weights, return as list
vol_weights = []
dm_by_instrument_list = []
instrument_vol_weight_in_sub_list = []
sub_portfolio_vol_weight_list = []
for instrument_code in self.instruments:
weight = None
for sub_portfolio, sub_weight in zip(
self.sub_portfolios, aggregate_weights
):
if instrument_code in sub_portfolio.instruments:
if weight is not None:
raise Exception(
"Instrument %s in multiple sub portfolios" %
instrument_code)
# A weight is the product of: the diversification multiplier for the subportfolio it comes from,
# the weight of that instrument within that subportfolio, and
# the weight of the subportfolio within the larger
# portfolio
div_mult = sub_portfolio.div_mult
instrument_idx = sub_portfolio.instruments.index(
instrument_code)
instrument_weight = sub_portfolio.volatility_weights[instrument_idx]
weight = div_mult * instrument_weight * sub_weight
# useful diagnostics
dm_by_instrument_list.append(div_mult)
instrument_vol_weight_in_sub_list.append(instrument_weight)
sub_portfolio_vol_weight_list.append(sub_weight)
if weight is None:
raise Exception(
"Instrument %s missing from all sub portfolios" %
instrument_code)
vol_weights.append(weight)
vol_weights = norm_weights(vol_weights)
# store diags
self.dm_by_instrument_list = dm_by_instrument_list
self.instrument_vol_weight_in_sub_list = instrument_vol_weight_in_sub_list
self.sub_portfolio_vol_weight_list = sub_portfolio_vol_weight_list
self.raw_weights = raw_weights
return vol_weights
def _calculate_volatility_weights(self):
"""
Calculates the volatility weights of the portfolio
If the portfolio contains sub_portfolios; it will calculate the volatility weights of each sub_portfolio, and then
weight towards sub_portfolios, and then calculate the multiplied out weights
If the portfolio does not contain sub_portfolios; just calculate the weights
:return: volatility weights, also sets self.volatility_weights
"""
if self.sub_portfolios is NO_SUB_PORTFOLIOS:
vol_weights = self._calculate_weights_standalone_portfolio()
else:
vol_weights = self._calculate_weights_aggregated_portfolio()
return vol_weights
def _calculate_cash_weights_no_risk_target(self):
"""
Calculate cash weights without worrying about risk targets
:return: list of cash weights
"""
vol_weights = self.volatility_weights
instrument_std = self.vol_vector
raw_cash_weights = [
vweight / vol for vweight, vol in zip(vol_weights, instrument_std)
]
raw_cash_weights = norm_weights(raw_cash_weights)
return raw_cash_weights
def _calculate_cash_weights_with_risk_target_partitioned(self):
"""
Readjust partitioned top level groups to hit a risk target
(https://qoppac.blogspot.com/2018/12/portfolio-construction-through_7.html)
:return: list of weights
"""
assert self._require_partioned_portfolio()
assert len(self.sub_portfolios) == 2
# hard coded - high vol is second group. Don't change!
high_vol_sub_portfolio = self.sub_portfolios[1]
low_vol_sub_portfolio = self.sub_portfolios[0]
high_vol_std = high_vol_sub_portfolio.portfolio_std
low_vol_std = low_vol_sub_portfolio.portfolio_std
risk_target_std = self.risk_target
assert high_vol_std > low_vol_std
# Now for the correlation estimate
# first create another Portfolio object made up of the sub portfolios
sub_portfolio_returns = self._calculate_sub_portfolio_returns()
assert (
len(sub_portfolio_returns.columns) == 2
) # should be guaranteed by partioning but just to check
correlation = sub_portfolio_returns.corr().values[0][
1
] # only works for groups of 2
# formula from
# https://qoppac.blogspot.com/2018/12/portfolio-construction-through_7.html
a_value = (
(high_vol_std ** 2)
+ (low_vol_std ** 2)
- (2 * high_vol_std * low_vol_std * correlation)
)
b_value = (2 * high_vol_std * low_vol_std * correlation) - 2 * (
low_vol_std ** 2
)
c_value = (low_vol_std ** 2) - (risk_target_std ** 2)
# standard formula for solving a quadratic
high_cash_weight = (
-b_value + (((b_value ** 2) - (4 * a_value * c_value)) ** 0.5)
) / (2 * a_value)
try:
assert high_cash_weight >= 0.0
except BaseException:
raise Exception(
"Something went wrong; cash weight target on high risk portfolio is negative!"
)
try:
assert high_cash_weight <= 1.0
except BaseException:
raise Exception(
"Can't hit risk target of %f - make it lower or include riskier assets!" %
risk_target_std)
# new_weight is the weight on the HIGH_VOL portfolio
low_cash_weight = 1.0 - high_cash_weight
# These are cash weights; change to a vol weight
high_vol_weight = high_cash_weight * high_vol_std
low_vol_weight = low_cash_weight * low_vol_std
self.log(
"Need to limit low cash group to %f (vol) %f (cash) of portfolio to hit risk target of %f" %
(low_vol_weight, low_cash_weight, risk_target_std))
# Hard coded - high vol is second group
top_level_weights = norm_weights([low_vol_weight, high_vol_weight])
#p.top_level_weights = top_level_weights
# We create an adjusted portfolio with the required top level weights as constraints
# we also need to pass the risk target to get same partitioning
# and use_SR_estimates to guarantee weights are the same
#
adjusted_portfolio = Portfolio(
self.instrument_returns,
use_SR_estimates=self.use_SR_estimates,
top_level_weights=top_level_weights,
risk_target=self.risk_target,
)
return adjusted_portfolio.cash_weights
def _calculate_cash_weights_with_risk_target(self):
"""
Calculate cash weights given a risk target
:return: list of weights
"""
target_std = self.risk_target
self.log("Calculating weights to hit a risk target of %f" % target_std)
# create version without risk target to check natural risk
# note all sub portfolios are like this
natural_portfolio = Portfolio(
self.instrument_returns, risk_target=NO_RISK_TARGET
)
natural_std = natural_portfolio.portfolio_std
natural_cash_weights = natural_portfolio.cash_weights
# store for diagnostics
self.natural_cash_weights = natural_cash_weights
self.natural_std = natural_std
if natural_std > target_std:
# Too much risk
# blend with cash
cash_required = (natural_std - target_std) / natural_std
portfolio_capital_left = 1.0 - cash_required
self.log(
"Too much risk %f of the portfolio will be cash" %
cash_required)
cash_weights = list(
np.array(natural_cash_weights) *
portfolio_capital_left)
# stored as diag
self.cash_required = cash_required
return cash_weights
elif natural_std < target_std:
# Not enough risk
if self.allow_leverage:
# calc leverage
leverage = target_std / natural_std
self.log(
"Not enough risk leverage factor of %f applied" %
leverage)
cash_weights = list(np.array(natural_cash_weights) * leverage)
# stored as diag
self.leverage = leverage
return cash_weights
else:
# no leverage allowed
# need to adjust weights
self.log(
"Not enough risk, no leverage allowed, using partition method")
return self._calculate_cash_weights_with_risk_target_partitioned()
# will only get here if the target and natural std are identical...
# unlikely - but!
return natural_cash_weights
def _calculate_cash_weights(self):
"""
Calculate cash weights
Note - this will apply a risk target if required
Note 2 - only top level portfolios have risk targets - sub portfolios don't
:return: list of weights
"""
target_std = self.risk_target
if target_std is NO_RISK_TARGET:
# no risk target, can use natural weights
return self._calculate_cash_weights_no_risk_target()
elif self.top_level_weights is not NO_TOP_LEVEL_WEIGHTS:
# top level weights passed, use natural weights
return self._calculate_cash_weights_no_risk_target()
else:
# need a risk target
return self._calculate_cash_weights_with_risk_target()
"""
Functions to return including missing data
"""
def _weights_with_missing_data(self, original_weights):
"""
:param original_weights:
:return: weights adding back original instruments
"""
original_weights_valid_only = dict(
[
(instrument, weight)
for instrument, weight in zip(self.valid_instruments, original_weights)
]
)
new_weights = []
for instrument in self.all_instruments:
if instrument in self.missing_instruments:
new_weights.append(np.nan)
elif instrument in self.valid_instruments:
new_weights.append(original_weights_valid_only[instrument])
else:
raise Exception("Gone horribly wrong")
return new_weights
def volatility_weights_with_missing_data(self):
"""
:return: vol weights, adding back any missing instruments
"""
vol_weights_valid_only = self.volatility_weights
vol_weights = self._weights_with_missing_data(vol_weights_valid_only)
return vol_weights
def cash_weights_with_missing_data(self):
"""
:return: cash weights, adding back any missing instruments
"""
cash_weights_valid_only = self.cash_weights
cash_weights = self._weights_with_missing_data(cash_weights_valid_only)
return cash_weights
"""
Boilerplate getter functions
"""
@property
def volatility_weights(self):
if hasattr(self, "_volatility_weights"):
return self._volatility_weights
else:
weights_vol = self._calculate_volatility_weights()
self._volatility_weights = weights_vol
return weights_vol
@property
def cash_weights(self):
if hasattr(self, "_cash_weights"):
return self._cash_weights
else:
weights_cash = self._calculate_cash_weights()
self._cash_weights = weights_cash
return weights_cash
@property
def sub_portfolios(self):
if hasattr(self, "_sub_portfolios"):
return self._sub_portfolios
else:
sub_portfolios = self._create_all_subportfolios()
self._sub_portfolios = sub_portfolios
return sub_portfolios
@property
def portfolio_returns(self):
if hasattr(self, "_portfolio_returns"):
return self._portfolio_returns
else:
portfolio_returns = self._calculate_portfolio_returns()
self._portfolio_returns = portfolio_returns
return portfolio_returns
@property
def portfolio_std(self):
if hasattr(self, "_portfolio_returns_std"):
return self._portfolio_returns_std
else:
portfolio_returns_std = self._calculate_portfolio_returns_std()
self._portfolio_returns_std = portfolio_returns_std
return portfolio_returns_std
@property
def div_mult(self):
if hasattr(self, "_div_mult"):
return self._div_mult
else:
div_mult = self._calculate_diversification_mult()
self._div_mult = div_mult
return div_mult
@property
def diags(self):
if hasattr(self, "_diags"):
return self._diags
else:
diags = self._diags_as_dataframe()
self._diags = diags
return diags
def calc_correlation(instrument_returns):
recent_instrument_returns = instrument_returns[-MAX_ROWS_FOR_CORR_ESTIMATION:]
corr = recent_instrument_returns.corr()
return corr
def minimum_many_years_of_data_in_dataframe(data):
years_of_data_dict = how_many_years_of_data_in_dataframe(data)
years_of_data_values = years_of_data_dict.values()
min_years_of_data = min(years_of_data_values)
return min_years_of_data
def how_many_years_of_data_in_dataframe(data):
"""
How many years of non NA data do we have?
Assumes daily timestamp
:param data: pd.DataFrame with labelled columns
:return: dict of floats,
"""
result_dict = dict(data.apply(how_many_years_of_data_in_pd_series, axis=0))
return result_dict
from syscore.pdutils import pd_readcsv
def how_many_years_of_data_in_pd_series(data_series):
"""
How many years of actual data do we have
Assume daily timestamp which is fairly regular
:param data_series:
:return: float
"""
first_valid_date = data_series.first_valid_index()
last_valid_date = data_series.last_valid_index()
date_difference = last_valid_date - first_valid_date
date_difference_days = date_difference.days
date_difference_years = float(date_difference_days) / CALENDAR_DAYS_IN_YEAR
return date_difference_years
def get_avg_corr(sigma):
"""
>>> sigma=np.array([[1.0,0.0,0.5], [0.0, 1.0, 0.75],[0.5, 0.75, 1.0]])
>>> get_avg_corr(sigma)
0.41666666666666669
>>> sigma=np.array([[1.0,np.nan], [np.nan, 1.0]])
>>> get_avg_corr(sigma)
nan
"""
new_sigma = copy(sigma)
np.fill_diagonal(new_sigma, np.nan)
if np.all(np.isnan(new_sigma)):
return np.nan
avg_corr = np.nanmean(new_sigma)
return avg_corr
def cluster_correlation_matrix(corr_matrix: np.array, max_cluster_size = 3) -> list:
d = sch.distance.pdist(corr_matrix)
L = sch.linkage(d, method="complete")
ind = sch.fcluster(L, max_cluster_size, criterion="maxclust")
ind = list(ind)
return ind
def optimise(sigma, mean_list):
# will replace nans with big negatives
mean_list = fix_mus(mean_list)
# replaces nans with zeros
sigma = fix_sigma(sigma)
mus = np.array(mean_list, ndmin=2).transpose()
number_assets = sigma.shape[1]
start_weights = [1.0 / number_assets] * number_assets
# Constraints - positive weights, adding to 1.0
bounds = [(0.0, 1.0)] * number_assets
cdict = [{"type": "eq", "fun": addem}]
ans = minimize(
neg_SR,
start_weights,
(sigma, mus),
method="SLSQP",
bounds=bounds,
constraints=cdict,
tol=0.00001,
)
# anything that had a nan will now have a zero weight
weights = ans["x"]
# put back the nans
weights = un_fix_weights(mean_list, weights)
return weights
def sigma_from_corr_and_std(stdev_list, corrmatrix):
sigma = np.diag(stdev_list).dot(corrmatrix).dot(np.diag(stdev_list))
return sigma
def fix_mus(mean_list):
"""
Replace nans with unfeasibly large negatives
result will be zero weights for these assets
"""
def _fixit(x):
if np.isnan(x):
return FLAG_BAD_RETURN
else:
return x
mean_list = [_fixit(x) for x in mean_list]
return mean_list
def un_fix_weights(mean_list, weights):
"""
When mean has been replaced, use nan weight
"""
def _unfixit(xmean, xweight):
if xmean == FLAG_BAD_RETURN:
return np.nan
else:
return xweight
fixed_weights = [
_unfixit(
xmean,
xweight) for (
xmean,
xweight) in zip(
mean_list,
weights)]
return fixed_weights
def fix_sigma(sigma):
"""
Replace nans with zeros
"""
def _fixit(x):
if np.isnan(x):
return 0.0
else:
return x
sigma = [[_fixit(x) for x in sigma_row] for sigma_row in sigma]
sigma = np.array(sigma)
return sigma
def neg_SR(weights, sigma, mus):
# Returns minus the Sharpe Ratio (as we're minimising)
weights = np.matrix(weights)
estreturn = (weights * mus)[0, 0]
std_dev = variance(weights, sigma) ** 0.5
return -estreturn / std_dev
def addem(weights):
# Used for constraints
return 1.0 - sum(weights)
def variance(weights, sigma):
# returns the variance (NOT standard deviation) given weights and sigma
return (weights * sigma * weights.transpose())[0, 0]
| gpl-3.0 |
midnightradio/gensim | gensim/sklearn_api/tfidf.py | 3 | 6995 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit-learn interface for :class:`~gensim.models.tfidfmodel.TfidfModel`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.sklearn_api import TfIdfTransformer
>>>
>>> # Transform the word counts inversely to their global frequency using the sklearn interface.
>>> model = TfIdfTransformer(dictionary=common_dictionary)
>>> tfidf_corpus = model.fit_transform(common_corpus)
"""
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim.models import TfidfModel
import gensim
class TfIdfTransformer(TransformerMixin, BaseEstimator):
"""Base TfIdf module, wraps :class:`~gensim.models.tfidfmodel.TfidfModel`.
For more information see `tf-idf <https://en.wikipedia.org/wiki/Tf%E2%80%93idf>`_.
"""
def __init__(self, id2word=None, dictionary=None, wlocal=gensim.utils.identity,
wglobal=gensim.models.tfidfmodel.df2idf, normalize=True, smartirs="nfc",
pivot=None, slope=0.65):
"""
Parameters
----------
id2word : {dict, :class:`~gensim.corpora.Dictionary`}, optional
Mapping from int id to word token, that was used for converting input data to bag of words format.
dictionary : :class:`~gensim.corpora.Dictionary`, optional
If specified it will be used to directly construct the inverse document frequency mapping.
wlocals : function, optional
Function for local weighting, default for `wlocal` is :func:`~gensim.utils.identity` which does nothing.
Other options include :func:`math.sqrt`, :func:`math.log1p`, etc.
wglobal : function, optional
Function for global weighting, default is :func:`~gensim.models.tfidfmodel.df2idf`.
normalize : bool, optional
It dictates how the final transformed vectors will be normalized. `normalize=True` means set to unit length
(default); `False` means don't normalize. You can also set `normalize` to your own function that accepts
and returns a sparse vector.
smartirs : str, optional
SMART (System for the Mechanical Analysis and Retrieval of Text) Information Retrieval System,
a mnemonic scheme for denoting tf-idf weighting variants in the vector space model.
The mnemonic for representing a combination of weights takes the form XYZ,
for example 'ntc', 'bpn' and so on, where the letters represents the term weighting of the document vector.
local_letter : str
Term frequency weighing, one of:
* `b` - binary,
* `t` or `n` - raw,
* `a` - augmented,
* `l` - logarithm,
* `d` - double logarithm,
* `L` - log average.
global_letter : str
Document frequency weighting, one of:
* `x` or `n` - none,
* `f` - idf,
* `t` - zero-corrected idf,
* `p` - probabilistic idf.
normalization_letter : str
Document normalization, one of:
* `x` or `n` - none,
* `c` - cosine,
* `u` - pivoted unique,
* `b` - pivoted character length.
Default is `nfc`.
For more info, visit `"Wikipedia" <https://en.wikipedia.org/wiki/SMART_Information_Retrieval_System>`_.
pivot : float, optional
It is the point around which the regular normalization curve is `tilted` to get the new pivoted
normalization curve. In the paper `Amit Singhal, Chris Buckley, Mandar Mitra:
"Pivoted Document Length Normalization" <http://singhal.info/pivoted-dln.pdf>`_ it is the point where the
retrieval and relevance curves intersect.
This parameter along with `slope` is used for pivoted document length normalization.
When `pivot` is None, `smartirs` specifies the pivoted unique document normalization scheme, and either
`corpus` or `dictionary` are specified, then the pivot will be determined automatically. Otherwise, no
pivoted document length normalization is applied.
slope : float, optional
It is the parameter required by pivoted document length normalization which determines the slope to which
the `old normalization` can be tilted. This parameter only works when pivot is defined by user and is not
None.
See Also
--------
~gensim.models.tfidfmodel.TfidfModel : Class that also uses the SMART scheme.
~gensim.models.tfidfmodel.resolve_weights : Function that also uses the SMART scheme.
"""
self.gensim_model = None
self.id2word = id2word
self.dictionary = dictionary
self.wlocal = wlocal
self.wglobal = wglobal
self.normalize = normalize
self.smartirs = smartirs
self.slope = slope
self.pivot = pivot
def fit(self, X, y=None):
"""Fit the model from the given training data.
Parameters
----------
X : iterable of iterable of (int, int)
Input corpus
y : None
Ignored. TF-IDF is an unsupervised model.
Returns
-------
:class:`~gensim.sklearn_api.tfidf.TfIdfTransformer`
The trained model.
"""
self.gensim_model = TfidfModel(
corpus=X, id2word=self.id2word, dictionary=self.dictionary, wlocal=self.wlocal,
wglobal=self.wglobal, normalize=self.normalize, smartirs=self.smartirs,
pivot=self.pivot, slope=self.slope,
)
return self
def transform(self, docs):
"""Get the tf-idf scores for `docs` in a bag-of-words representation.
Parameters
----------
docs: {iterable of list of (int, number)}
Document or corpus in bag-of-words format.
Returns
-------
iterable of list (int, float) 2-tuples.
The bag-of-words representation of each input document.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# Is the input a single document?
if isinstance(docs[0], tuple):
docs = [docs] # Yes => convert it to a corpus (of 1 document).
return [self.gensim_model[doc] for doc in docs]
| gpl-3.0 |
LucianU/ThinkStats2 | code/populations.py | 68 | 2609 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
| gpl-3.0 |
shaunwbell/FOCI_Analysis | ReanalysisRetreival_orig/GOA_Winds/GOA_Winds_NARR_model_prep.py | 1 | 10003 | #!/usr/bin/env
"""
GOA_Winds_NARR_model_prep.py
Retrieve NARR winds for two locations:
GorePoint - 58deg 58min N, 150deg 56min W
and Globec3 59.273701N, 148.9653W
Filter NARR winds with a triangular filter (1/4, 1/2, 1/4) and output every 3hrs
Provide U, V
Save in EPIC NetCDF standard
"""
#System Stack
import datetime
import sys
#Science Stack
import numpy as np
from netCDF4 import Dataset
# User Stack
import general_utilities.haversine as sphered
from utilities import ncutilities as ncutil
# Visual Stack
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, shiftgrid
__author__ = 'Shaun Bell'
__email__ = '[email protected]'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR','GLOBEC3', 'Gorept','3hr filtered', 'U,V','Winds', 'Gulf of Alaska'
"""------------------------General Modules-------------------------------------------"""
def from_netcdf(infile):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
print "Parameters available: "
print params
ncdata = ncutil.ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind)
ncutil.ncclose(nchandle)
return ncdata
def latlon_grid(infile):
nchandle = ncutil.ncopen(infile)
lat_lon = ncutil.get_geocoords(nchandle)
ncutil.ncclose(nchandle)
return (lat_lon)
def write2epic( file_name, stationid, time, lat_lon, data ):
ncinstance = ncutil.EPIC_NC(savefile=file_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts()
ncinstance.PMELglobal_atts(Station_Name=stationid, file_name=( __file__.split('/')[-1]) )
ncinstance.dimension_init(len_time=len(time[0]))
ncinstance.variable_init()
ncinstance.add_coord_data(time1=time[0], time2=time[1], latitude=lat_lon[0], longitude=-1 * lat_lon[1], \
depth_level=10. )
ncinstance.add_data('WU_422', data[0])
ncinstance.add_data('WV_423', data[1])
ncinstance.close()
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
""" Ingest EPIC date or NCEP Date and provide python serial date"""
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
elif file_flag == 'NARR':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
elif file_flag == 'NCEP':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
def pydate2EPIC(file_time):
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
time1 = np.floor(file_time) + offset #truncate to get day and add 2440000 for true julian day
time2 = ( file_time - np.floor(file_time) ) * (1000. * 60. * 60.* 24.) #milliseconds since 0000GMT
return(time1, time2)
"---"
def triangle_smoothing(data_in):
weights=np.array([0.25,0.5,0.25])
filtered_data = np.convolve(data_in,np.array(weights),'same') #edge effects
return filtered_data
"""------------------------- Topo Modules -------------------------------------------"""
def etopo5_data():
""" read in etopo5 topography/bathymetry. """
file = '/Users/bell/Data_Local/MapGrids/etopo5.nc'
etopodata = Dataset(file)
topoin = etopodata.variables['bath'][:]
lons = etopodata.variables['X'][:]
lats = etopodata.variables['Y'][:]
etopodata.close()
topoin,lons = shiftgrid(0.,topoin,lons,start=False) # -360 -> 0
lons, lats = np.meshgrid(lons, lats)
return(topoin, lats, lons)
"""------------------------- Main Modules -------------------------------------------"""
### list of files
NARR = '/Users/bell/Data_Local/Reanalysis_Files/NARR/3hourly/'
infile = [NARR + 'uwnd.10m.2003.nc']
### Grab grid points for future slicing - assume grid is same in all model output
lat_lon = latlon_grid(infile[0])
station_name = ['Globec3','GorePt']
sta_lat = [59.273701,58.9666666666666667]
sta_long = [148.9653,150.9333333333333333]
#Find NARR nearest point to moorings - haversine formula
# NARR data is -180->180 (positive east), Moorings are usually expressed +W for FOCI
globec_pt = sphered.nearest_point([sta_lat[0],-1 * sta_long[0]],lat_lon['lat'],lat_lon['lon'], '2d')
gorept_pt = sphered.nearest_point([sta_lat[1],-1 * sta_long[1]],lat_lon['lat'],lat_lon['lon'], '2d')
globec_modelpt = [lat_lon['lat'][globec_pt[3],globec_pt[4]],lat_lon['lon'][globec_pt[3],globec_pt[4]]]
gorept_modelpt = [lat_lon['lat'][gorept_pt[3],gorept_pt[4]],lat_lon['lon'][gorept_pt[3],gorept_pt[4]]]
print "Globec nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[0], sta_long[0], globec_modelpt[0], globec_modelpt[1])
print "GorePt nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[1], sta_long[1], gorept_modelpt[0], gorept_modelpt[1])
#loop over all requested data
#years = arange(1984,2014,1)
#years = [1984, 1987, 1989, 1991, 1994, 2001, 2002, 2003, 2004, 2005, 2006, 2011, 2013]
years = [1986,]
for yy in years:
# retrieve only these location's data
# uwnd
infile = NARR + 'uwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
globec3_data = from_netcdf_1dsplice(infile, None, globec_pt[3], globec_pt[4])
gorept_data = from_netcdf_1dsplice(infile, None, gorept_pt[3], gorept_pt[4])
#filter data
globec3u_f = triangle_smoothing(globec3_data['uwnd'])
goreptu_f = triangle_smoothing(gorept_data['uwnd'])
# retrieve only these location's data
# vwnd
infile = NARR + 'vwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
globec3_data = from_netcdf_1dsplice(infile, None, globec_pt[3], globec_pt[4])
gorept_data = from_netcdf_1dsplice(infile, None, gorept_pt[3], gorept_pt[4])
#filter data
globec3v_f = triangle_smoothing(globec3_data['vwnd'])
goreptv_f = triangle_smoothing(gorept_data['vwnd'])
#convert to EPIC time
pydate = date2pydate(globec3_data['time'], file_flag='NARR')
epic_time, epic_time1 = pydate2EPIC(pydate)
# output u,v wind components from model grid points
save_to_nc = True
if save_to_nc:
# write to NetCDF
outfile = 'data/NARR_globec_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
write2epic( outfile, station_name[1], [epic_time, epic_time1], globec_modelpt, [globec3u_f, globec3v_f])
outfile = 'data/NARR_gorept_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
write2epic( outfile, station_name[0], [epic_time, epic_time1], gorept_modelpt, [goreptu_f, goreptv_f])
plot_geoloc = True
if plot_geoloc:
(topoin, elats, elons) = etopo5_data()
fig = plt.figure()
ax = plt.subplot(111)
m = Basemap(resolution='i',projection='merc', llcrnrlat=55, \
urcrnrlat=62,llcrnrlon=-155,urcrnrlon=-145, lat_ts=45)
# Mooring Data
x_moor, y_moor = m([-1. * sta_long[0], -1. * sta_long[1]],sta_lat)
x_close, y_close = m([globec_modelpt[1],gorept_modelpt[1]], [globec_modelpt[0],gorept_modelpt[0]])
#ETOPO 5 contour data
ex, ey = m(elons, elats)
CS = m.contourf(ex,ey,topoin, levels=range(250,5000,250), cmap='gray_r', alpha=.75) #colors='black'
CS = m.contour(ex,ey,topoin, levels=range(250,5000,250), linewidths=0.2, colors='black', alpha=.75) #
CS = m.contour(ex,ey,topoin, levels=[-1000, -200, -100], linestyle='--', linewidths=0.2, colors='black', alpha=.75) #
#plot points
m.scatter(x_close,y_close,20,marker='+',color='b')
m.scatter(x_moor,y_moor,20,marker='o',color='g')
m.drawcountries(linewidth=0.5)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(55,62,2.),labels=[1,0,0,0],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(-155,-145,2.),labels=[0,0,0,1],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw meridians
#m.fillcontinents(color='black')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]) )
plt.savefig('images/Gorepoint_region.png', bbox_inches='tight', dpi = (100))
plt.close()
fig = plt.figure()
ax = plt.subplot(111)
m = Basemap(resolution='i',projection='merc', llcrnrlat=55, \
urcrnrlat=62,llcrnrlon=-155,urcrnrlon=-145, lat_ts=45)
| mit |
ch3ll0v3k/scikit-learn | examples/cluster/plot_lena_compress.py | 271 | 2229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
paris-saclay-cds/ramp-workflow | rampwf/tests/kits/mars_craters/problem.py | 1 | 3667 | import os
import numpy as np
import pandas as pd
import rampwf as rw
problem_title = 'Mars craters detection and classification'
# A type (class) which will be used to create wrapper objects for y_pred
Predictions = rw.prediction_types.make_detection()
# An object implementing the workflow
workflow = rw.workflows.ObjectDetector()
# The overlap between adjacent patches is 56 pixels
# The scoring region is chosen so that despite the overlap,
# no crater is scored twice, hence the boundaries of
# 28 = 56 / 2 and 196 = 224 - 56 / 2
minipatch = [28, 196, 28, 196]
score_types = [
rw.score_types.OSPA(minipatch=minipatch),
rw.score_types.SCP(shape=(224, 224), minipatch=minipatch),
rw.score_types.DetectionAveragePrecision(name='ap'),
rw.score_types.DetectionPrecision(
name='prec(0)', iou_threshold=0.0, minipatch=minipatch),
rw.score_types.DetectionPrecision(
name='prec(0.5)', iou_threshold=0.5, minipatch=minipatch),
rw.score_types.DetectionPrecision(
name='prec(0.9)', iou_threshold=0.9, minipatch=minipatch),
rw.score_types.DetectionRecall(
name='rec(0)', iou_threshold=0.0, minipatch=minipatch),
rw.score_types.DetectionRecall(
name='rec(0.5)', iou_threshold=0.5, minipatch=minipatch),
rw.score_types.DetectionRecall(
name='rec(0.9)', iou_threshold=0.9, minipatch=minipatch),
rw.score_types.MADCenter(name='madc', minipatch=minipatch),
rw.score_types.MADRadius(name='madr', minipatch=minipatch)
]
def get_cv(X, y):
# 3 quadrangles for training have not exactly the same size,
# but for simplicity just cut in 3
# for each fold use one quadrangle as test set, the other two as training
n_tot = len(X)
n1 = n_tot // 3
n2 = n1 * 2
return [(np.r_[0:n2], np.r_[n2:n_tot]),
(np.r_[n1:n_tot], np.r_[0:n1]),
(np.r_[0:n1, n2:n_tot], np.r_[n1:n2])]
def _read_data(path, typ):
"""
Read and process data and labels.
Parameters
----------
path : path to directory that has 'data' subdir
typ : {'train', 'test'}
Returns
-------
X, y data
"""
suffix = '_mini'
try:
data_path = os.path.join(path, 'data',
'data_{0}{1}.npy'.format(typ, suffix))
src = np.load(data_path, mmap_mode='r')
labels_path = os.path.join(path, 'data',
'labels_{0}{1}.csv'.format(typ, suffix))
labels = pd.read_csv(labels_path)
except IOError:
raise IOError("'data/data_{0}.npy' and 'data/labels_{0}.csv' are not "
"found. Ensure you ran 'python download_data.py' to "
"obtain the train/test data".format(typ))
# convert the dataframe with crater positions to list of
# list of (x, y, radius) tuples (list of arrays of shape (n, 3) with n
# true craters on an image
# determine locations of craters for each patch in the labels array
n_true_patches = labels.groupby('i').size().reindex(
range(src.shape[0]), fill_value=0).values
# make cumulative sum to obtain start/stop to slice the labels
n_cum = np.array(n_true_patches).cumsum()
n_cum = np.insert(n_cum, 0, 0)
labels_array = labels[['row_p', 'col_p', 'radius_p']].values
y = [[tuple(x) for x in labels_array[i:j]]
for i, j in zip(n_cum[:-1], n_cum[1:])]
# convert list to object array of lists
y_array = np.empty(len(y), dtype=object)
y_array[:] = y
return src, y_array
def get_test_data(path='.'):
return _read_data(path, 'test')
def get_train_data(path='.'):
return _read_data(path, 'train')
| bsd-3-clause |
errantlinguist/tangrams-analysis | tangrams_analysis/cross_validation.py | 1 | 5008 | """
Functionalities for cross-validating words-as-classifiers reference resolution (not yet finished!).
"""
__author__ = "Todd Shore <[email protected]>"
__copyright__ = "Copyright 2017 Todd Shore"
__license__ = "Apache License, Version 2.0"
import csv
import itertools
from collections import namedtuple
from typing import Callable, Iterable, Iterator, Mapping, Optional, Tuple
import pandas as pd
from . import game_utterances
from . import iristk
from . import session_data as sd
CATEGORICAL_VAR_COL_NAMES = (
game_utterances.EventColumn.ENTITY_SHAPE.value, game_utterances.EventColumn.EVENT_SUBMITTER.value)
# NOTE: For some reason, "pandas.get_dummies(..., columns=[col_name_1,...])" works with list objects but not with tuples
CATEGORICAL_DEPENDENT_VAR_COL_NAMES = [game_utterances.EventColumn.ENTITY_SHAPE.value]
assert all(col_name in CATEGORICAL_VAR_COL_NAMES for col_name in CATEGORICAL_DEPENDENT_VAR_COL_NAMES)
RESULTS_FILE_ENCODING = "utf-8"
__RESULTS_FILE_DTYPES = {"Cleaning.DISFLUENCIES": bool, "Cleaning.DUPLICATES": bool, "Cleaning.FILLERS": bool}
CrossValidationDataFrames = namedtuple("CrossValidationDataFrames", ("training", "testing"))
class CachingSessionDataFrameFactory(object):
def __init__(self, session_data_frame_factory: Optional[Callable[[sd.SessionData], pd.DataFrame]] = None):
self.session_data_frame_factory = game_utterances.SessionGameRoundUtteranceSequenceFactory() if session_data_frame_factory is None else session_data_frame_factory
self.cache = {}
def __call__(self, infile: str, session: sd.SessionData) -> pd.DataFrame:
try:
result = self.cache[infile]
except KeyError:
result = self.session_data_frame_factory(session)
result[game_utterances.EventColumn.DYAD_ID.value] = infile
self.cache[infile] = result
return result
class CrossValidationData(object):
def __init__(self, testing_data: Tuple[str, sd.SessionData], training_data: Mapping[str, sd.SessionData]):
self.testing_data = testing_data
self.training_data = training_data
@property
def __key(self):
return self.testing_data, self.training_data
def __eq__(self, other):
return (self is other or (isinstance(other, type(self))
and self.__key == other.__key))
def __hash__(self):
return hash(self.__key)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return self.__class__.__name__ + str(self.__dict__)
class CrossValidationDataFrameFactory(object):
@staticmethod
def __categoricize_data(training_feature_df: pd.DataFrame, testing_feature_df: pd.DataFrame):
for col_name in CATEGORICAL_VAR_COL_NAMES:
unique_values = tuple(sorted(frozenset(
itertools.chain(training_feature_df[col_name].unique(), testing_feature_df[col_name].unique()))))
training_feature_df[col_name] = pd.Categorical(training_feature_df[col_name], categories=unique_values,
ordered=False)
testing_feature_df[col_name] = pd.Categorical(testing_feature_df[col_name], categories=unique_values,
ordered=False)
def __init__(self, session_data_frame_factory: Optional[Callable[[str, sd.SessionData], pd.DataFrame]]):
self.session_data_frame_factory = CachingSessionDataFrameFactory() if session_data_frame_factory is None else session_data_frame_factory
def __call__(self, named_session_data=Iterable[Tuple[str, sd.SessionData]]) -> Iterator[CrossValidationDataFrames]:
for testing_session_name, testing_session_data in named_session_data:
training_sessions = dict(
(infile, training_session_data) for (infile, training_session_data) in named_session_data if
testing_session_data != training_session_data)
cross_validation_set = CrossValidationData((testing_session_name, testing_session_data),
training_sessions)
yield self.__create_cross_validation_data_frames(cross_validation_set)
def __create_cross_validation_data_frames(self,
cross_validation_data: CrossValidationData) -> CrossValidationDataFrames:
training_feature_df = pd.concat(self.session_data_frame_factory(infile, session) for (infile, session) in
cross_validation_data.training_data.items())
testing_feature_df = self.session_data_frame_factory(*cross_validation_data.testing_data)
# noinspection PyTypeChecker
self.__categoricize_data(training_feature_df, testing_feature_df)
dummified_training_feature_df = pd.get_dummies(training_feature_df, columns=CATEGORICAL_DEPENDENT_VAR_COL_NAMES)
dummified_testing_feature_df = pd.get_dummies(testing_feature_df, columns=CATEGORICAL_DEPENDENT_VAR_COL_NAMES)
return CrossValidationDataFrames(dummified_training_feature_df, dummified_testing_feature_df)
def read_results_file(inpath: str) -> pd.DataFrame:
return pd.read_csv(inpath, sep=csv.excel_tab.delimiter, dialect=csv.excel_tab, float_precision="round_trip",
encoding=RESULTS_FILE_ENCODING, memory_map=True, parse_dates=["TIME", "EVENT_TIME"],
date_parser=iristk.parse_timestamp,
dtype=__RESULTS_FILE_DTYPES)
| apache-2.0 |
wkfwkf/statsmodels | docs/source/plots/graphics_gofplots_qqplot.py | 38 | 1911 | # -*- coding: utf-8 -*-
"""
Created on Sun May 06 05:32:15 2012
Author: Josef Perktold
editted by: Paul Hobson (2012-08-19)
"""
from scipy import stats
from matplotlib import pyplot as plt
import statsmodels.api as sm
#example from docstring
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog, prepend=True)
mod_fit = sm.OLS(data.endog, data.exog).fit()
res = mod_fit.resid
left = -1.8 #x coordinate for text insert
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1)
sm.graphics.qqplot(res, ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, 'no keywords', verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 2)
sm.graphics.qqplot(res, line='s', ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='s'", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 3)
sm.graphics.qqplot(res, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='45', \nfit=True", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 4)
sm.graphics.qqplot(res, dist=stats.t, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "dist=stats.t, \nline='45', \nfit=True",
verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
fig.tight_layout()
plt.gcf()
# example with the new ProbPlot class
import numpy as np
x = np.random.normal(loc=8.25, scale=3.5, size=37)
y = np.random.normal(loc=8.00, scale=3.25, size=37)
pp_x = sm.ProbPlot(x, fit=True)
pp_y = sm.ProbPlot(y, fit=True)
# probability of exceedance
fig2 = pp_x.probplot(exceed=True)
# compare x quantiles to y quantiles
fig3 = pp_x.qqplot(other=pp_y, line='45')
# same as above with probabilities/percentiles
fig4 = pp_x.ppplot(other=pp_y, line='45')
| bsd-3-clause |
justthetips/PerformanceAnalytics | performanceanalytics/drawdowns.py | 1 | 6741 | # MIT License
# Copyright (c) 2017 Jacob Bourne
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pandas as pd
def find_drawdowns(series: pd.Series):
"""
find the drawdowns of a series, returns a list of drawdown holder objects
:param series: the series
:return: list of drawdown holders
"""
if not isinstance(series, pd.Series):
raise ValueError("Only works for Pandas Series, you passed in {}".format(type(series)))
# first turn the series into the cumprod
dd_series = (1 + series).cumprod()
# now walk through the time series finding the dd
prior_max = dd_series.iloc[0]
prior_min = prior_max
in_drawdown = False
current_dd = None
dd_list = []
for dt, value in dd_series.iteritems():
# if the value is lower than the previous we are in a drawdown
if value < prior_max:
# if we are not already in a drawdown we are now
if not in_drawdown:
in_drawdown = True
dd = DrawdownHolder(dt)
dd.max_value = prior_max
dd.min_value = value
dd.trough_date = dt
prior_min = value
current_dd = dd
elif value < prior_min:
# if we are in a drawdown, check to see if we are at the min
current_dd.min_value = value
current_dd.trough_date = dt
prior_min = value
else:
if in_drawdown:
# the drawdown is over
current_dd.end_date = dt
prior_max = value
in_drawdown = False
dd_list.append(current_dd)
else:
prior_max = value
return dd_list
class DrawdownHolder(object):
"""
Custom class to hold all the information about a drawdown
"""
def __init__(self, dd_start):
"""
initialization, must pass in the start date
:param dd_start:
"""
self._dd_start = dd_start
@property
def start_date(self):
"""
the start date
:return: the start date of the drawdown
"""
return self._dd_start
@property
def trough_date(self):
"""
the date of the trough of the drawdown
:return: the date
"""
return self._trough_date
@trough_date.setter
def trough_date(self, td):
"""
set the trough date
:param td: the date
:return:
"""
self._trough_date = td
@property
def end_date(self):
"""
the end date of the drawdown
:return: the date
"""
return self._end_date
@end_date.setter
def end_date(self, ed):
"""
the end date of the drawdown
:param ed: the date
:return:
"""
self._end_date = ed
@property
def max_value(self):
"""
the max value before the drawdown began
:return: the value
"""
return self._max_value
@max_value.setter
def max_value(self, mv):
"""
the max value before the drawdown began
:param mv: the value
:return:
"""
self._max_value = mv
@property
def min_value(self):
"""
the min value of the drawdown
:return: the value
"""
return self._min_value
@min_value.setter
def min_value(self, mv):
"""
the min value of the drawdown
:param mv: the value
:return:
"""
self._min_value = mv
@property
def depth(self):
"""
the depth of the drawdown (min / max) - 1
:return: the depth
"""
if (self.min_value is None) or (self.max_value is None):
raise AttributeError("Cannot be called until min value and max value are set")
return (self.min_value / self.max_value) - 1
@property
def length(self):
"""
the length of the drawdown in days
:return: the length
"""
if self.end_date is None:
raise AttributeError("Cannot be called until the end date is set")
return (self.end_date - self.start_date).days
@property
def recovery(self):
"""
the length of the recovery in days
:return: the length
"""
if (self.trough_date is None) or (self.end_date is None):
raise AttributeError("Cannot be called until trough date and end date are set")
return (self.end_date - self.trough_date).days
@property
def to_trough(self):
"""
the length from the start to the trough in days
:return: the length
"""
if self.trough_date is None:
raise AttributeError("Cannot be called until trough date is set")
return (self.trough_date - self.start_date).days
def __repr__(self):
return '{}: {} {} {}'.format(self.__class__.__name__,
self.start_date,
self.end_date, self.depth)
def __lt__(self, other):
return self.depth < other.depth
def __le__(self, other):
return self.depth <= other.depth
def __gt__(self, other):
return self.depth > other.depth
def __ge__(self, other):
return self.depth >= other.depth
def __eq__(self, other):
return self.start_date == other.start_date and self.trough_date == other.trough_date and self.end_date == other.end_date
def __ne__(self, other):
return self.start_date != other.start_date or self.trough_date != other.trough_date or self.end_date != other.end_date
| mit |
bartosh/zipline | zipline/testing/predicates.py | 1 | 15559 | from contextlib import contextmanager
import datetime
from functools import partial
import inspect
import re
from nose.tools import ( # noqa
assert_almost_equal,
assert_almost_equals,
assert_dict_contains_subset,
assert_false,
assert_greater,
assert_greater_equal,
assert_in,
assert_is,
assert_is_instance,
assert_is_none,
assert_is_not,
assert_is_not_none,
assert_less,
assert_less_equal,
assert_multi_line_equal,
assert_not_almost_equal,
assert_not_almost_equals,
assert_not_equal,
assert_not_equals,
assert_not_in,
assert_not_is_instance,
assert_raises,
assert_raises_regexp,
assert_regexp_matches,
assert_true,
assert_tuple_equal,
)
import numpy as np
import pandas as pd
from pandas.util.testing import (
assert_frame_equal,
assert_panel_equal,
assert_series_equal,
assert_index_equal,
)
from six import iteritems, viewkeys, PY2
from toolz import dissoc, keyfilter
import toolz.curried.operator as op
from zipline.testing.core import ensure_doctest
from zipline.dispatch import dispatch
from zipline.lib.adjustment import Adjustment
from zipline.lib.labelarray import LabelArray
from zipline.utils.functional import dzip_exact, instance
from zipline.utils.math_utils import tolerant_equals
@instance
@ensure_doctest
class wildcard(object):
"""An object that compares equal to any other object.
This is useful when using :func:`~zipline.testing.predicates.assert_equal`
with a large recursive structure and some fields to be ignored.
Examples
--------
>>> wildcard == 5
True
>>> wildcard == 'ayy'
True
# reflected
>>> 5 == wildcard
True
>>> 'ayy' == wildcard
True
"""
@staticmethod
def __eq__(other):
return True
@staticmethod
def __ne__(other):
return False
def __repr__(self):
return '<%s>' % type(self).__name__
__str__ = __repr__
def keywords(func):
"""Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
Notes
-----
Taken from odo.utils
"""
if isinstance(func, type):
return keywords(func.__init__)
elif isinstance(func, partial):
return keywords(func.func)
return inspect.getargspec(func).args
def filter_kwargs(f, kwargs):
"""Return a dict of valid kwargs for `f` from a subset of `kwargs`
Examples
--------
>>> def f(a, b=1, c=2):
... return a + b + c
...
>>> raw_kwargs = dict(a=1, b=3, d=4)
>>> f(**raw_kwargs)
Traceback (most recent call last):
...
TypeError: f() got an unexpected keyword argument 'd'
>>> kwargs = filter_kwargs(f, raw_kwargs)
>>> f(**kwargs)
6
Notes
-----
Taken from odo.utils
"""
return keyfilter(op.contains(keywords(f)), kwargs)
def _s(word, seq, suffix='s'):
"""Adds a suffix to ``word`` if some sequence has anything other than
exactly one element.
word : str
The string to add the suffix to.
seq : sequence
The sequence to check the length of.
suffix : str, optional.
The suffix to add to ``word``
Returns
-------
maybe_plural : str
``word`` with ``suffix`` added if ``len(seq) != 1``.
"""
return word + (suffix if len(seq) != 1 else '')
def _fmt_path(path):
"""Format the path for final display.
Parameters
----------
path : iterable of str
The path to the values that are not equal.
Returns
-------
fmtd : str
The formatted path to put into the error message.
"""
if not path:
return ''
return 'path: _' + ''.join(path)
def _fmt_msg(msg):
"""Format the message for final display.
Parameters
----------
msg : str
The message to show to the user to provide additional context.
returns
-------
fmtd : str
The formatted message to put into the error message.
"""
if not msg:
return ''
return msg + '\n'
def _safe_cls_name(cls):
try:
return cls.__name__
except AttributeError:
return repr(cls)
def assert_is_subclass(subcls, cls, msg=''):
"""Assert that ``subcls`` is a subclass of ``cls``.
Parameters
----------
subcls : type
The type to check.
cls : type
The type to check ``subcls`` against.
msg : str, optional
An extra assertion message to print if this fails.
"""
assert issubclass(subcls, cls), (
'%s is not a subclass of %s\n%s' % (
_safe_cls_name(subcls),
_safe_cls_name(cls),
msg,
)
)
def assert_regex(result, expected, msg=''):
"""Assert that ``expected`` matches the result.
Parameters
----------
result : str
The string to search.
expected : str or compiled regex
The pattern to search for in ``result``.
msg : str, optional
An extra assertion message to print if this fails.
"""
assert re.search(expected, result), (
'%s%r not found in %r' % (_fmt_msg(msg), expected, result)
)
@contextmanager
def assert_raises_regex(exc, pattern, msg=''):
"""Assert that some exception is raised in a context and that the message
matches some pattern.
Parameters
----------
exc : type or tuple[type]
The exception type or types to expect.
pattern : str or compiled regex
The pattern to search for in the str of the raised exception.
msg : str, optional
An extra assertion message to print if this fails.
"""
try:
yield
except exc as e:
assert re.search(pattern, str(e)), (
'%s%r not found in %r' % (_fmt_msg(msg), pattern, str(e))
)
else:
raise AssertionError('%s%s was not raised' % (_fmt_msg(msg), exc))
@dispatch(object, object)
def assert_equal(result, expected, path=(), msg='', **kwargs):
"""Assert that two objects are equal using the ``==`` operator.
Parameters
----------
result : object
The result that came from the function under test.
expected : object
The expected result.
Raises
------
AssertionError
Raised when ``result`` is not equal to ``expected``.
"""
assert result == expected, '%s%s != %s\n%s' % (
_fmt_msg(msg),
result,
expected,
_fmt_path(path),
)
@assert_equal.register(float, float)
def assert_float_equal(result,
expected,
path=(),
msg='',
float_rtol=10e-7,
float_atol=10e-7,
float_equal_nan=True,
**kwargs):
assert tolerant_equals(
result,
expected,
rtol=float_rtol,
atol=float_atol,
equal_nan=float_equal_nan,
), '%s%s != %s with rtol=%s and atol=%s%s\n%s' % (
_fmt_msg(msg),
result,
expected,
float_rtol,
float_atol,
(' (with nan != nan)' if not float_equal_nan else ''),
_fmt_path(path),
)
def _check_sets(result, expected, msg, path, type_):
"""Compare two sets. This is used to check dictionary keys and sets.
Parameters
----------
result : set
expected : set
msg : str
path : tuple
type : str
The type of an element. For dict we use ``'key'`` and for set we use
``'element'``.
"""
if result != expected:
if result > expected:
diff = result - expected
msg = 'extra %s in result: %r' % (_s(type_, diff), diff)
elif result < expected:
diff = expected - result
msg = 'result is missing %s: %r' % (_s(type_, diff), diff)
else:
in_result = result - expected
in_expected = expected - result
msg = '%s only in result: %s\n%s only in expected: %s' % (
_s(type_, in_result),
in_result,
_s(type_, in_expected),
in_expected,
)
raise AssertionError(
'%s%ss do not match\n%s' % (
_fmt_msg(msg),
type_,
_fmt_path(path),
),
)
@assert_equal.register(dict, dict)
def assert_dict_equal(result, expected, path=(), msg='', **kwargs):
_check_sets(
viewkeys(result),
viewkeys(expected),
msg,
path + ('.%s()' % ('viewkeys' if PY2 else 'keys'),),
'key',
)
failures = []
for k, (resultv, expectedv) in iteritems(dzip_exact(result, expected)):
try:
assert_equal(
resultv,
expectedv,
path=path + ('[%r]' % (k,),),
msg=msg,
**kwargs
)
except AssertionError as e:
failures.append(str(e))
if failures:
raise AssertionError('\n'.join(failures))
@assert_equal.register(list, list)
@assert_equal.register(tuple, tuple)
def assert_sequence_equal(result, expected, path=(), msg='', **kwargs):
result_len = len(result)
expected_len = len(expected)
assert result_len == expected_len, (
'%s%s lengths do not match: %d != %d\n%s' % (
_fmt_msg(msg),
type(result).__name__,
result_len,
expected_len,
_fmt_path(path),
)
)
for n, (resultv, expectedv) in enumerate(zip(result, expected)):
assert_equal(
resultv,
expectedv,
path=path + ('[%d]' % n,),
msg=msg,
**kwargs
)
@assert_equal.register(set, set)
def assert_set_equal(result, expected, path=(), msg='', **kwargs):
_check_sets(
result,
expected,
msg,
path,
'element',
)
@assert_equal.register(np.ndarray, np.ndarray)
def assert_array_equal(result,
expected,
path=(),
msg='',
array_verbose=True,
array_decimal=None,
**kwargs):
f = (
np.testing.assert_array_equal
if array_decimal is None else
partial(np.testing.assert_array_almost_equal, decimal=array_decimal)
)
try:
f(
result,
expected,
verbose=array_verbose,
err_msg=msg,
)
except AssertionError as e:
raise AssertionError('\n'.join((str(e), _fmt_path(path))))
@assert_equal.register(LabelArray, LabelArray)
def assert_labelarray_equal(result, expected, path=(), **kwargs):
assert_equal(
result.categories,
expected.categories,
path=path + ('.categories',),
**kwargs
)
assert_equal(
result.as_int_array(),
expected.as_int_array(),
path=path + ('.as_int_array()',),
**kwargs
)
def _register_assert_equal_wrapper(type_, assert_eq):
"""Register a new check for an ndframe object.
Parameters
----------
type_ : type
The class to register an ``assert_equal`` dispatch for.
assert_eq : callable[type_, type_]
The function which checks that if the two ndframes are equal.
Returns
-------
assert_ndframe_equal : callable[type_, type_]
The wrapped function registered with ``assert_equal``.
"""
@assert_equal.register(type_, type_)
def assert_ndframe_equal(result, expected, path=(), msg='', **kwargs):
try:
assert_eq(
result,
expected,
**filter_kwargs(assert_eq, kwargs)
)
except AssertionError as e:
raise AssertionError(
_fmt_msg(msg) + '\n'.join((str(e), _fmt_path(path))),
)
return assert_ndframe_equal
assert_frame_equal = _register_assert_equal_wrapper(
pd.DataFrame,
assert_frame_equal,
)
assert_panel_equal = _register_assert_equal_wrapper(
pd.Panel,
assert_panel_equal,
)
assert_series_equal = _register_assert_equal_wrapper(
pd.Series,
assert_series_equal,
)
assert_index_equal = _register_assert_equal_wrapper(
pd.Index,
assert_index_equal,
)
@assert_equal.register(pd.Categorical, pd.Categorical)
def assert_categorical_equal(result, expected, path=(), msg='', **kwargs):
assert_equal(
result.categories,
expected.categories,
path=path + ('.categories',),
msg=msg,
**kwargs
)
assert_equal(
result.codes,
expected.codes,
path=path + ('.codes',),
msg=msg,
**kwargs
)
@assert_equal.register(Adjustment, Adjustment)
def assert_adjustment_equal(result, expected, path=(), **kwargs):
for attr in ('first_row', 'last_row', 'first_col', 'last_col', 'value'):
assert_equal(
getattr(result, attr),
getattr(expected, attr),
path=path + ('.' + attr,),
**kwargs
)
@assert_equal.register(
(datetime.datetime, np.datetime64),
(datetime.datetime, np.datetime64),
)
def assert_timestamp_and_datetime_equal(result,
expected,
path=(),
msg='',
allow_datetime_coercions=False,
compare_nat_equal=True,
**kwargs):
"""
Branch for comparing python datetime (which includes pandas Timestamp) and
np.datetime64 as equal.
Returns raises unless ``allow_datetime_coercions`` is passed as True.
"""
assert allow_datetime_coercions or type(result) == type(expected), (
"%sdatetime types (%s, %s) don't match and "
"allow_datetime_coercions was not set.\n%s" % (
_fmt_msg(msg),
type(result),
type(expected),
_fmt_path(path),
)
)
result = pd.Timestamp(result)
expected = pd.Timestamp(result)
if compare_nat_equal and pd.isnull(result) and pd.isnull(expected):
return
assert_equal.dispatch(object, object)(
result,
expected,
path=path,
**kwargs
)
@assert_equal.register(slice, slice)
def assert_slice_equal(result, expected, path=(), msg=''):
diff_start = (
('starts are not equal: %s != %s' % (result.start, result.stop))
if result.start != expected.start else
''
)
diff_stop = (
('stops are not equal: %s != %s' % (result.stop, result.stop))
if result.stop != expected.stop else
''
)
diff_step = (
('steps are not equal: %s != %s' % (result.step, result.stop))
if result.step != expected.step else
''
)
diffs = diff_start, diff_stop, diff_step
assert not any(diffs), '%s%s\n%s' % (
_fmt_msg(msg),
'\n'.join(filter(None, diffs)),
_fmt_path(path),
)
def assert_isidentical(result, expected, msg=''):
assert result.isidentical(expected), (
'%s%s is not identical to %s' % (_fmt_msg(msg), result, expected)
)
try:
# pull the dshape cases in
from datashape.util.testing import assert_dshape_equal
except ImportError:
pass
else:
assert_equal.funcs.update(
dissoc(assert_dshape_equal.funcs, (object, object)),
)
| apache-2.0 |
pnedunuri/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
harveywwu/vnpy | vnpy/trader/gateway/tkproGateway/DataApi/utils.py | 4 | 3883 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from collections import namedtuple
import datetime as dt
import pandas as pd
import numpy as np
long_nan = 9223372036854775807
def is_long_nan(v):
if v == long_nan:
return True
else:
return False
def to_nan(x):
if is_long_nan(x):
return np.nan
else:
return x
def _to_date(row):
date = int(row['DATE'])
return pd.datetime(year=date // 10000, month=date // 100 % 100, day=date % 100)
def _to_datetime(row):
date = int(row['DATE'])
time = int(row['TIME']) // 1000
return pd.datetime(year=date // 10000, month=date / 100 % 100, day=date % 100,
hour=time // 10000, minute=time // 100 % 100, second=time % 100)
def _to_dataframe(cloumset, index_func=None, index_column=None):
df = pd.DataFrame(cloumset)
for col in df.columns:
if df.dtypes.loc[col] == np.int64:
df.loc[:, col] = df.loc[:, col].apply(to_nan)
if index_func:
df.index = df.apply(index_func, axis=1)
elif index_column:
df.index = df[index_column]
del df.index.name
return df
def _error_to_str(error):
if error:
if 'message' in error:
return str(error['error']) + "," + error['message']
else:
return str(error['error']) + ","
else:
return ","
def to_obj(class_name, data):
try:
if type(data) == list or type(data) == tuple:
result = []
for d in data:
result.append(namedtuple(class_name, list(d.keys()))(*list(d.values())))
return result
elif type(data) == dict:
result = namedtuple(class_name, list(data.keys()))(*list(data.values()))
return result
else:
return data
except Exception as e:
print(class_name, data, e)
return data
def to_date_int(date):
if isinstance(date, str):
t = dt.datetime.strptime(date, "%Y-%m-%d")
date_int = t.year * 10000 + t.month * 100 + t.day
return date_int
elif isinstance(date, (int, np.integer)):
return date
else:
return -1
def to_time_int(time):
if isinstance(time, str):
t = dt.datetime.strptime(time, "%H:%M:%S")
time_int = t.hour * 10000 + t.minute * 100 + t.second
return time_int
elif isinstance(time, (int, np.integer)):
return time
else:
return -1
def extract_result(cr, data_format="", index_column=None, class_name=""):
"""
format supports pandas, obj.
"""
err = _error_to_str(cr['error']) if 'error' in cr else None
if 'result' in cr:
if data_format == "pandas":
if index_column:
return (_to_dataframe(cr['result'], None, index_column), err)
# if 'TIME' in cr['result']:
# return (_to_dataframe(cr['result'], _to_datetime), err)
# elif 'DATE' in cr['result']:
# return (_to_dataframe(cr['result'], _to_date), err)
else:
return (_to_dataframe(cr['result']), err)
elif data_format == "obj" and cr['result'] and class_name:
r = cr['result']
if type(r) == list or type(r) == tuple:
result = []
for d in r:
result.append(namedtuple(class_name, list(d.keys()))(*list(d.values())))
elif type(r) == dict:
result = namedtuple(class_name, list(r.keys()))(*list(r.values()))
else:
result = r
return (result, err)
else:
return (cr['result'], err)
else:
return (None, err)
| mit |
nlpaueb/aueb.twitter.sentiment | regularization.py | 1 | 1421 | from sklearn import preprocessing
import numpy as np
import math
#reguralize features to [-1,1] , xi=xi-meam/3*variance
def regularize(features):
#regularize per column
for i in range(0,len(features[0])):
try:
#take evary column
feat=features[:,i]
#mean and variance of every column
mean=np.mean(feat)
var=np.var(feat)
if(var!=0):
features[:,i]=(features[:,i]-mean)/float(3*var)
else :
features[:,i]=0
except:
pass
features[features>1]=1
features[features<-1]=-1
return features
#reguralize features to [-1,1] horizontally, yi=yi/norm(yi,2)
def regularizeHorizontally(features):
for i in range(0,features.shape[0]):
if (features[i] == np.zeros(features[i].shape)).all() == True:
pass
else:
features[i] = features[i]/np.linalg.norm(features[i],ord=2)
features[features>1]=1
features[features<-1]=-1
return features
#xi=xi-xmin/xman-xmin
def regularizeMaxMin(features):
#regularize per column
for i in range(0,len(features[0])):
#take evary column
feat=features[:,i]
#max and min value of every feature
xmax=max(feat)
xmin=min(feat)
if((xmax-xmin)!=0):
features[:,i]=(features[:,i]-xmin)/float(xmax-xmin)
else :
features[:,i]=0
return features
| gpl-3.0 |
pseudocubic/neutronpy | neutronpy/data/plot.py | 1 | 14569 | import numpy as np
from ..lsfit import Fitter
from ..lsfit.tools import convert_params
class PlotData(object):
"""Class containing data plotting methods
Methods
-------
plot
plot_line
plot_contour
plot_volume
"""
def plot(self, x=None, y=None, z=None, w=None, show_err=True, to_bin=None,
plot_options=None, fit_options=None, smooth_options=None,
output_file='', show_plot=True, **kwargs):
r"""Plots the data in the class. x and y must at least be specified,
and z and/or w being specified will produce higher dimensional plots
(contour and volume, respectively).
Parameters
----------
x : str, optional
`data_column` key defining the x-axis.
Default: :py:attr:`plot_default_x`.
y : str, optional
`data_column` key defining the y-axis.
Default: :py:attr:`plot_default_y`.
z : str, optional
`data_column` key defining the z-axis.
Default: None
w : str, optional
`data_column` key defining the w-axis.
Default: None
bounds : dict, optional
If set, data will be rebinned to the specified parameters, in the
format `[min, max, num points]` for each `data_column` key. See
documentation for :py:meth:`.Data.bin`. Default: None
show_err : bool, optional
Plot error bars. Only applies to xy scatter plots. Default: True
show_plot : bool, optional
Execute `plt.show()` to show the plot. Incompatible with
`output_file` param. Default: True
output_file : str, optional
If set, the plot will be saved to the location given, in the format
specified, provided that the format is supported. Default: None
plot_options : dict, optional
Plot options to be passed to the the matplotlib plotting routine.
Default: None
fit_options : dict, optional
Fitting options to be passed to the Fitter routine. Default: None
smooth_otions : dict, optional
Smoothing options for Gaussian smoothing from
`scipy.ndimage.filters.gaussian_filter`. Default: None
kwargs : optional
Additional plotting keyword arguments passed to the plotting
function.
"""
if to_bin is None:
to_bin = dict()
if plot_options is None:
plot_options = dict()
if fit_options is None:
fit_options = dict()
if smooth_options is None:
smooth_options = dict(sigma=0)
if x is None:
try:
x = self.plot_default_x
except AttributeError:
raise
if y is None:
try:
y = self.plot_default_y
except AttributeError:
raise
if w is not None:
self.plot_volume(x, y, z, w, to_bin, plot_options, smooth_options,
output_file, show_plot, **kwargs)
elif w is None and z is not None:
self.plot_contour(x, y, z, to_bin, plot_options, smooth_options,
output_file, show_plot, **kwargs)
elif w is None and z is None:
self.plot_line(x, y, show_err, to_bin, plot_options, fit_options,
smooth_options, output_file, show_plot, **kwargs)
def plot_volume(self, x, y, z, w, to_bin=None, plot_options=None, smooth_options=None, output_file='', show_plot=True, **kwargs):
r"""Plots a 3D volume of 4D data
Parameters
----------
x : str
`data_column` key defining the x-axis.
Default: :py:attr:`plot_default_x`.
y : str
`data_column` key defining the y-axis.
Default: :py:attr:`plot_default_y`.
z : str
`data_column` key defining the z-axis.
Default: None
w : str
`data_column` key defining the w-axis.
Default: None
bounds : dict, optional
If set, data will be rebinned to the specified parameters, in the
format `[min, max, num points]` for each `data_column` key. See
documentation for :py:meth:`.Data.bin`. Default: None
show_err : bool, optional
Plot error bars. Only applies to xy scatter plots. Default: True
show_plot : bool, optional
Execute `plt.show()` to show the plot. Incompatible with
`output_file` param. Default: True
output_file : str, optional
If set, the plot will be saved to the location given, in the format
specified, provided that the format is supported. Default: None
plot_options : dict, optional
Plot options to be passed to the the matplotlib plotting routine.
Default: None
fit_options : dict, optional
Fitting options to be passed to the Fitter routine. Default: None
smooth_otions : dict, optional
Smoothing options for Gaussian smoothing from
`scipy.ndimage.filters.gaussian_filter`. Default: None
kwargs : optional
Additional plotting keyword arguments passed to the plotting
function.
"""
try:
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
except ImportError:
raise ImportError('Matplotlib >= 1.3.0 is necessary for plotting.')
if to_bin:
data_bin = self.bin(to_bin)
_x = data_bin.data[x]
_y = data_bin.data[y]
_z = data_bin.data[z]
if w == 'intensity':
_w = data_bin.intensity
else:
_w = data_bin.data[w]
else:
_x = self.data[x]
_y = self.data[y]
_z = self.data[z]
if w == 'intensity':
_w = self.intensity
else:
_w = self.data[w]
if smooth_options['sigma'] > 0:
from scipy.ndimage.filters import gaussian_filter
_w = gaussian_filter(_w, **smooth_options)
_x, _y, _z, _w = (np.ma.masked_where(_w <= 0, _x),
np.ma.masked_where(_w <= 0, _y),
np.ma.masked_where(_w <= 0, _z),
np.ma.masked_where(_w <= 0, _w))
fig = plt.figure()
axis = fig.add_subplot(111, projection='3d')
axis.scatter(_x, _y, _z, c=_w, linewidths=0, vmin=1.e-4,
vmax=0.1, norm=colors.LogNorm())
if output_file:
plt.savefig(output_file)
elif show_plot:
plt.show()
else:
pass
def plot_contour(self, x, y, z, to_bin=None, plot_options=None, smooth_options=None, output_file='', show_plot=True, **kwargs):
r"""Method for plotting a 2D contour plot of 3D data
Parameters
----------
x : str
`data_column` key defining the x-axis.
Default: :py:attr:`plot_default_x`.
y : str
`data_column` key defining the y-axis.
Default: :py:attr:`plot_default_y`.
z : str
`data_column` key defining the z-axis.
Default: None
bounds : dict, optional
If set, data will be rebinned to the specified parameters, in the
format `[min, max, num points]` for each `data_column` key. See
documentation for :py:meth:`.Data.bin`. Default: None
show_err : bool, optional
Plot error bars. Only applies to xy scatter plots. Default: True
show_plot : bool, optional
Execute `plt.show()` to show the plot. Incompatible with
`output_file` param. Default: True
output_file : str, optional
If set, the plot will be saved to the location given, in the format
specified, provided that the format is supported. Default: None
plot_options : dict, optional
Plot options to be passed to the the matplotlib plotting routine.
Default: None
fit_options : dict, optional
Fitting options to be passed to the Fitter routine. Default: None
smooth_otions : dict, optional
Smoothing options for Gaussian smoothing from
`scipy.ndimage.filters.gaussian_filter`. Default: None
kwargs : optional
Additional plotting keyword arguments passed to the plotting
function.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise
if to_bin:
data_bin = self.bin(to_bin)
_x = data_bin.data[x]
_y = data_bin.data[y]
if z == 'intensity':
_z = data_bin.intensity
else:
_z = data_bin.data[z]
else:
_x = self.data[x]
_y = self.data[y]
if z == 'intensity':
_z = self.intensity
else:
_z = self.data[z]
if smooth_options['sigma'] > 0:
from scipy.ndimage.filters import gaussian_filter
_z = gaussian_filter(_z, **smooth_options)
x_step = np.around(
np.abs(np.unique(_x) - np.roll(np.unique(_x), 1))[1], decimals=4)
y_step = np.around(
np.abs(np.unique(_y) - np.roll(np.unique(_y), 1))[1], decimals=4)
x_sparse = np.linspace(
_x.min(), _x.max(), (_x.max() - _x.min()) / x_step + 1)
y_sparse = np.linspace(
_y.min(), _y.max(), (_y.max() - _y.min()) / y_step + 1)
X, Y = np.meshgrid(x_sparse, y_sparse)
from scipy.interpolate import griddata
Z = griddata((_x, _y), _z, (X, Y))
plt.pcolormesh(X, Y, Z, **plot_options)
if output_file:
plt.savefig(output_file)
elif show_plot:
plt.show()
else:
pass
def plot_line(self, x, y, show_err=True, to_bin=None, plot_options=None, fit_options=None, smooth_options=None, output_file='', show_plot=True, **kwargs):
r"""Method to Plot a line of 2D data
Parameters
----------
x : str
`data_column` key defining the x-axis.
Default: :py:attr:`plot_default_x`.
y : str
`data_column` key defining the y-axis.
Default: :py:attr:`plot_default_y`.
bounds : dict, optional
If set, data will be rebinned to the specified parameters, in the
format `[min, max, num points]` for each `data_column` key. See
documentation for :py:meth:`.Data.bin`. Default: None
show_err : bool, optional
Plot error bars. Only applies to xy scatter plots. Default: True
show_plot : bool, optional
Execute `plt.show()` to show the plot. Incompatible with
`output_file` param. Default: True
output_file : str, optional
If set, the plot will be saved to the location given, in the format
specified, provided that the format is supported. Default: None
plot_options : dict, optional
Plot options to be passed to the the matplotlib plotting routine.
Default: None
fit_options : dict, optional
Fitting options to be passed to the Fitter routine. Default: None
smooth_otions : dict, optional
Smoothing options for Gaussian smoothing from
`scipy.ndimage.filters.gaussian_filter`. Default: None
kwargs : optional
Additional plotting keyword arguments passed to the plotting
function.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise
if to_bin:
data_bin = self.bin(to_bin)
_x = data_bin.data[x]
if y == 'intensity':
_y = data_bin.intensity
_err = data_bin.error
else:
_y = data_bin.data[y]
_err = np.sqrt(data_bin.data[y])
else:
_x = self.data[x]
if y == 'intensity':
_y = self.intensity
_err = self.error
else:
_y = self.data[y]
_err = np.sqrt(self.data[y])
if smooth_options['sigma'] > 0:
from scipy.ndimage.filters import gaussian_filter
_y = gaussian_filter(_y, **smooth_options)
if not plot_options:
plot_options['fmt'] = 'rs'
if show_err:
plt.errorbar(_x, _y, yerr=_err, **plot_options)
else:
plt.errorbar(_x, _y, **plot_options)
# add axis labels
plt.xlabel(x)
plt.ylabel(y)
if fit_options:
def residuals(params, data):
funct, x, y, err = data
return (y - funct(params, x)) / err
fitobj = Fitter(residuals, data=(
fit_options['function'], _x, _y, _err))
if 'fixp' in fit_options:
fitobj.parinfo = [{'fixed': fix}
for fix in fit_options['fixp']]
try:
fitobj.fit(params0=fit_options['p'])
fit_x = np.linspace(min(_x), max(_x), len(_x) * 10)
fit_y = fit_options['function'](fitobj.params, fit_x)
plt.plot(fit_x, fit_y, '{0}-'.format(plot_options['fmt'][0]))
param_string = u'\n'.join(['p$_{{{0:d}}}$: {1:.3f}'.format(i, p)
for i, p in enumerate(fitobj.params)])
chi2_params = u'$\chi^2$: {0:.3f}\n\n'.format(
fitobj.chi2_min) + param_string
plt.annotate(chi2_params, xy=(0.05, 0.95), xycoords='axes fraction',
horizontalalignment='left', verticalalignment='top',
bbox=dict(alpha=0.75, facecolor='white', edgecolor='none'))
except Exception as mes: # pylint: disable=broad-except
raise Exception("Something wrong with fit: {0}".format(mes))
if output_file:
plt.savefig(output_file)
elif show_plot:
plt.show()
else:
pass
| mit |
mehdidc/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 5 | 11439 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 34