repo_name
stringlengths 7
90
| path
stringlengths 4
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 762
838k
| license
stringclasses 15
values |
---|---|---|---|---|---|
edarin/population_simulator | src/generation.py | 1 | 2170 | # -*- coding: utf-8 -*-
"""
S'occupe de générer une population à partir d'inputs
de description de la population
- L'input (marges dans les programmes) est une
Series de la librairie pandas.
Son index reprend les colonnes qui seront imputées
"""
from numpy.testing import assert_almost_equal
from pandas import Series, DataFrame
def check_input_format(marges):
''' réalise des tests sur le format utilisé pour la
génération de la table
'''
assert isinstance(marges, Series)
assert all(0 <= marges)
assert all(marges <= 1)
assert_almost_equal(marges.sum(), 1)
return True
def generate_population(marges, size):
'''
retourne une population ayant les caractèristiques
marquées dans l'objet marges.
- marges
- size est la taille en nombre de ligne de la
population génrée.
Note importante :
La taille obtenue n'est pas exactement size.
On part du principe que la taille est là pour
controler à peu près le temps de calcul mais
la taille exacte n'est pas très importante
'''
check_input_format(marges)
size_group = marges*size
size_group = size_group.round().astype(int)
def _check_size(vecteur, taille):
# TODO: should be an option
return abs(vecteur.sum()/taille - 1) < 0.05
def resize(marges, size):
# TODO: faire un appel recursif ?
# TODO: ça doit se trouver sur internet
pass
if not _check_size(size_group, size):
resize(marges, size)
nb_to_generate = size_group[size_group > 0]
# population_values = nb_to_generate.values.repeat(nb_to_generate).index
population = nb_to_generate.repeat(nb_to_generate)
population = population.reset_index().iloc[:,:-1]
return population
if __name__ == '__main__':
import pandas as pd
reference = pd.read_csv("data/demographie/pop_age_sexe_2016.csv")
del reference['total']
marges = reference.set_index('age_revolu').unstack()
marges.index.names = ['sexe', 'age']
marges /= marges.sum()
generate_population(marges, 1000) | agpl-3.0 |
JosmanPS/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
kenners/odc | odc.py | 1 | 11267 | """
odc.py
An implementation of the Siggaard-Andersen TANH oxyhaemoglobin dissociation curve model in Python.
Siggaard-Andersen O, Siggaard-Andersen M, Fogh-Andersen N. The TANH-equation modified for the hemoglobin, oxygen, and carbon monoxide equilibrium. Scand J Clin Lab Invest Suppl 1993;214:113–9. doi: 10.1080/00365519309090687
http://dx.doi.org/10.1080/00365519309090687
"""
import math
import matplotlib.pyplot as plt
import numpy as np
import scipy.special
import scipy.optimize
class ODC(object):
"""
An oxyhaemoglobin dissociation curve model.
"""
def __init__(self, pO2, sO2, T=37.0, pH=7.40, pCO2=5.33, cDPG=5.00, ctHb=15.0, FCOHb=0, FMetHb=0, FHbF=0):
#TODO: Set COHb, MetHb, HbF to 0 rather than 0.005
# Constants
self.p0 = 7 # kPa
self.s0 = 0.867 # Fraction
self.h0 = 3.5 #
self.T0 = 37.0 # ºC
self.k0 = 0.5342857 #
self.y0 = 1.8747 #
self.pH0 = 7.40 #
self.cDPG0 = 5.0 # mmol/L
self.pCO20 = 5.33 # kPa
self.p050 = 3.578 # kPa - Estimated normal p50 in human adults
self.f0 = 1.121 #
# Chemical allosteric affinity constants
self.a10 = -0.88 # pH allosteric affinity coefficient
self.a20 = 0.048 # pCO2 allosteric affinity coefficient
self.a30 = -0.7 # MetHb allosteric affinity coefficient
self.a40 = -0.25 # HbF allosteric affinity coefficient
self.a500 = 0.06
self.a501 = -0.02
self.a60 = 0.06 # cDPG allosteric affinity coefficient
self.a70 = -0.02
self.dbdT = 0.055
self.dadcDPG0 = 0.3
self.dadcDPGxHbF = -0.1
# Input variables
self.pO2 = pO2
self.sO2 = sO2
self.T = T
self.pH = pH
self.pCO2 = pCO2
self.cDPG = cDPG
self.FCOHb = FCOHb
self.FMetHb = FMetHb
self.FHbF = FHbF
self.ctHb = ctHb
# Accuracy of any iterative calculations
self.epsilon = 1e-6
# y = y0 + x - x0 + (h * tanh(k0 * (x - x0)))
def __str__(self):
# TODO
pass
@property
def p(self):
"""
pO2CO
Combined partial pressure of oxygen and carbon monoxide
"""
return self.pO2 * self.s / (self.s - self.sCO)
@property
def s(self):
"""
sO2CO
Combined oxygen/carbon monoxide saturation of haemoglobin.
"""
return self.sO2 + ((1 - self.sO2) * self.sCO)
@property
def h(self):
"""
Positive, homotropic, allosteric ligand interaction factor.
Varies with changes in affinity
"""
return self.h0 + self.a
@property
def x0(self):
"""
Combined allosteric affinity factor.
Sum of chemical and thermal affinity.
"""
return self.a + self.b
@property
def a1(self):
"""pH allosteric affinity factor"""
return self.a10 * (self.pH - self.pH0)
@property
def a2(self):
"""pCO2 allosteric affinity factor"""
return self.a20 * math.log(self.pCO2 / self.pCO20)
@property
def a3(self):
"""MetHb allosteric affinity factor"""
return self.a30 * self.FMetHb
@property
def a4(self):
"""HbF allosteric affinity factor"""
return self.a40 * self.FHbF
@property
def a5(self):
"""DPG allosteric affinity factor"""
return (self.a500 + (self.a501 * self.FHbF)) * (self.cDPG - self.cDPG0)
def calculate_a(self, pH, pCO2, FMetHb, FHbF, cDPG):
a1 = self.a10 * (pH - self.pH0)
a2 = self.a20 * math.log(pCO2 / self.pCO20)
a3 = self.a30 * FMetHb
a4 = self.a40 * FHbF
a5 = (self.a500 + (self.a501 * FHbF)) * (cDPG - self.cDPG0)
return a1 + a2 + a3 + a4 + a5
@property
def a(self):
"""
Chemical allosteric affinity factor.
Sum of factors for pH, pCO2, COHb, MetHb, and HbF
"""
return self.a1 + self.a2 + self.a3 + self.a4 + self.a5
@property
def a_lam(self):
# TODO: ?remove
"""
Chemical allosteric affinity factor.
Calculated from p, s, and T using LambertW function.
"""
return (-self.k0 * (self.b + self.h0 - self.x + self.y - self.y0) + scipy.special.lambertw(self.k0 * (-self.b + self.h0 + self.x - self.y + self.y0) * math.exp(self.k0 * (-self.b + self.h0 + self.x + self.y - self.y0)))) / (2 * self.k0)
@property
def b(self):
"""Thermal allosteric affinity factor"""
return self.dbdT * (self.T - self.T0)
@property
def ceHb(self):
"""Concentration of effective haemoglobin"""
return self.ctHb * (1 - self.FCOHb - self.FMetHb)
@property
def sCO(self):
"""Saturation of haemoglobin with carbon monoxide"""
return self.FCOHb / 1 - self.FMetHb
@property
def y(self):
return math.log(self.s / 1 - self.s)
@property
def x(self):
return math.log(self.p / self.p0)
def calc_x(self, p, a, T):
# From OSA pascal
"""Calculates x for p, a, and T"""
return math.log(p / self.p0) - a - (self.dbdT * (T - self.T0))
def calc_y(self, p, a, T):
# From OSA pascal
"""Calculates y for p, a, and T"""
x = self.calc_x(p, a, T)
h = self.calc_h(a)
return self.y0 + x + (h * math.tanh(self.k0 * x))
def calc_h(self, a):
"""Calculates h for a"""
return self.h0 + a
def calc_dydx(self, p, a, T):
# From OSA pascal
return 1 + self.calc_h(a) * self.k0 * (1 - (math.tanh(self.k0 * self.calc_x(p, a, T)))**2)
def calc_dyda(self, p, a, T):
# From OSA pascal
return math.tanh(self.k0 * self.calc_x(p, a, T)) - self.calc_dydx(p, a, T)
@property
def a_est(self):
# From 1990 model
"""
Estimates 'a' from sO2 and pO2 using IFCC 1990 Guidelines.
Assumes no dyshaemoglobins (MetHb, COHb, HbF).
Inaccurate if sO2 > 0.97.
"""
if self.sO2 > 0.97:
raise ValueError('sO2 > 0.97')
x = math.log(self.pO2 / self.p0)
y = math.log(self.sO2 / (1 - self.sO2)) - self.y0
t = math.tanh(self.k0 * x)
return (y - x - (self.h0 * t)) * ((1.87 * t**2) + t - 2.87)**-1
@property
def p50_est(self):
# From 1990 model
"""
Estimate of p50 from sO2 and pO2 using IFCC 1990 Guidelines.
Assumes no dyshaemoglobins (MetHb, COHb, HbF).
Inaccurate if sO2 > 0.97.
"""
return self.p050 * math.exp(self.f0 * self.a_est)
@property
def cDPG_est(self):
# From OSA pascal
"""Estimates cDPG from other variables"""
aDPG0 = self.calculate_a(self.pH, self.pCO2, self.FMetHb, self.FHbF, self.cDPG0)
a = aDPG0
sO2CO = self.s
pO2CO = self.p
ym = self._logit(sO2CO)
yc = self.calc_y(pO2CO, a, self.T)
while (abs(ym - yc) > self.epsilon) or (a < -self.h0):
yc = self.calc_y(pO2CO, a, self.T)
if abs(ym - yc) > 2:
a = a + (0.5 * (ym - yc) / self.calc_dyda(pO2CO, a, self.T))
else:
a = a + (ym - yc) / self.calc_dyda(pO2CO, a, self.T)
if a < -self.h0:
raise ValueError('Unable to calculate cDPG')
#return self.cDPG0
else:
return self.cDPG0 * (1 + ((a - aDPG0) / (self.dadcDPG0 + (self.dadcDPGxHbF * self.FHbF))))
@property
def a_iter(self):
"""Calculates 'a' using an iterative approach"""
# From 1993 tanh paper
start = self.a # Temporary guess with DPG = 5
return scipy.optimize.newton(lambda a: self.y0 - self.y + self.x - (a + self.b) + (self.h * math.tanh(self.k0 * (self.x - (a + self.b)))), start)
def _tanh(self, y0, y, x, a, b, h, k0):
return y0 - y + x - (a + b) + (h * math.tanh(k0 * (x - (a + b))))
@property
def cDPG_from_a_iter(self):
"""Calculates cDPG from iterative value of a"""
return self.a_iter - self.a1 - self.a2 - self.a3 - self.a4
@property
def p50(self):
"""
p50
"""
return self.calculate_pO2(0.5)
def calculate_pO2(self, sO2):
"""Calculates pO2 from sO2"""
# Requires: sO2, T, FCOHb, FMetHb, FHbF, pH, pCO2, and cDPG.
# Calculate a and b
a = self.calculate_a(self.pH, self.pCO2, self.FMetHb, self.FHbF, self.cDPG_est)
b = self.b
# Calculate the 'measured' sO2CO (s) from sO2, FCOHb and FMetHb
#s_m = self.s
s_m = sO2 + ((1 - sO2) * self.sCO)
# Make a guess of a temporary pO2CO (p) (preferably choose the
# point of symmetry of the TANH function) and calculate a temporary
# sO2CO from the TANH equation
p_temp = self.p0
h = self.calc_h(a)
s_temp = 0
while abs(s_m - s_temp) > self.epsilon:
# Calculate temporary sO2CO from TANH equation
x = math.log(p_temp/self.p0)
y = self.y0 + x - (a + b) + (h * math.tanh(self.k0 * (x - (a + b))))
s_temp = math.exp(y)/(math.exp(y) + 1)
#print(s_temp, s_m - s_temp, p_temp)
# The difference between the temporary sO2CO and the 'measured'
# sO2CO allows the calculation of a new temporary pO2CO using a
# fast Newton-Raphson procedure. Repeat until difference is less
# than a given limit.
s_diff = s_m - s_temp
s_temp2 = s_temp + s_diff
y = math.log(s_temp2 / (1 - s_temp2))
# Calculate new p
top = self.y0 - y + x - (a + b) + (h * math.tanh(self.k0 * (x - (a + b))))
bottom = (h * self.k0 * (-math.tanh(self.k0 * (x - (a + b)))**2 + 1)) + 1
x_temp = x - (top / bottom)
p_temp = self.p0 * math.exp(x_temp)
p = p_temp
# Finally M* pCO is calculated (Eq 7) and subtracted from
# pO2CO to give pO2
MpCO = (p/s_m) * self.sCO
pO2 = p - MpCO
# Round answer to epsilon precision
precision = abs(round(math.log(self.epsilon, 10))) - 1
return round(pO2, precision)
def calculate_sO2(self, pO2):
"""Calculates pO2 from sO2"""
# Requires: sO2, T, FCOHb, FMetHb, FHbF, pH, pCO2, and cDPG.
# TODO
pass
@property
def curve_data(self):
"""Tuple of pO2 and sO2 for the curve"""
sO2 = np.arange(0, 1, 0.01, dtype=np.float64)
pO2 = np.array([self.calculate_pO2(x) for x in sO2])
return pO2, sO2
def plot_curve(self):
"""Matplotlib plot of the oxyhaemoglobin curve"""
plt.plot(self.curve_data[0], self.curve_data[1], 'r-')
plt.plot(self.pO2, self.sO2, 'bx')
plt.xlabel('pO2 (kPa)')
plt.ylabel('sO2')
plt.yticks(np.arange(0,1.1,0.1))
plt.axis(xmax=20)
#plt.legend(loc='best')
plt.grid(True)
return plt
# Helper functions
def _logit(self, x):
return math.log(x / (1 - x))
def _antilogit(self, x):
return math.exp(x) / (1 + math.exp(x))
| mit |
AlCap23/Thesis | Python/Experiments/MIMO/titostudy_extern.py | 1 | 10979 | """
Python programm to study the robustness of TITO systems.
Identitfies the system, computes the controller and analysis the controller using the state space - transfer function relation.
Computes the singular values.
Use this script from the terminal / console with
./python FILENAME.py --file_storage = FOLDERNAME
to store essential information with sacred
"""
# Import the packages
# Import numpy
import numpy as np
# Import pandas
import pandas as pd
# Import linear regression model
from scipy import stats
# Import the Algorithms
import sys
sys.path.append('../../')
import Algorithms as alg
# Import the MOBA Simulator
import MoBASimulator as mb
# Plotting
import pylab as p
# Define an experiment
from sacred import Experiment
###########################################################
########################## MAIN PROGRAM ###################
###########################################################
# Define a Sacred Experiment
ex = Experiment()
###########################################################
########################## CONFIG #########################
###########################################################
@ex.config
def experimental_setup():
# Filename to store in
filename = 'TITOStudy_external_15092017_1.csv'
# Overall sample_size
sample_size = 90
# Max degree
max_deg = 9
# Gain Limits
gain_limits = [1, 10.0]
# Lag Limits
lag_limits = [90,180]
# Delay Limits, if small approx. no delay
delay_limits = [1,10]
# Step size for simulate
dt = 0.01
# Final time for simulation
t_sim = 1500
# Maximum Interaction
H = 0.5*np.eye(2,2)
# Frequency parameter (as dB)
wmin = -5
wmax = 3
dw = 10000
# Special frequencies
w_special = np.array([0.1, 0.5, 1.0])
# Store the results
# System Order, Maximum Sensitivity, corresponding Frequency, MS_w0.1, MS_w0.5, MS_w1, Grad_MS(w0.1...1)
columns = ['Degree','MS_RGA','w_MS_RGA','Grad_RGA','MS_A', 'w_MS_A','Grad_A', 'MS_D','w_MS_D', 'Grad_D']
# Add the special frequencies
for freq in range(0, w_special.shape[0]):
columns.append('w_'+str(w_special[freq])+'_RGA')
columns.append('w_'+str(w_special[freq])+'_A')
columns.append('w_'+str(w_special[freq])+'_D')
# Make empty data frame with zeros
R = pd.DataFrame(data = np.zeros((sample_size, len(columns))), columns = columns)
###########################################################
################## CREATE VARIABLES #######################
###########################################################
# Create the gain
k = np.random.uniform(gain_limits[0], gain_limits[1],(sample_size,2,2))
num = np.zeros_like(k)
# Delay
l = np.random.uniform(delay_limits[0], delay_limits[1], (sample_size,2,2))
# Create random time constants
t = np.random.uniform(lag_limits[0],lag_limits[1],(sample_size,2,2,max_deg))
den = np.zeros((sample_size, 2, 2, max_deg+1))
# Loop over the samples and estimate even distribution over degree
for samples in range(0, sample_size):
# Compute current order, from 1 to ...
degree = int(1.0*samples/sample_size * max_deg) + 1
# Loop over outputs
for outputs in range(0,2):
# Loop over inputs
for inputs in range(0,2):
# Compute the distances between the random time constants
# Sort until current degree
dist = np.sort(t[samples, outputs, inputs, :degree])
# Insert a zero for the first distance
dist = np.insert(dist, [0], 0.0)
# Calculate the distance
dist = np.ediff1d(dist)
# Calculate a stable polynomial, which highest coefficient is normed!!!
den[samples, outputs, inputs, :(degree+1)] = np.polynomial.polynomial.polyfromroots(-1./dist)
# Hence, normalize the gain with the highest coefficient
num[samples, outputs, inputs] = k[samples, outputs, inputs] * den[samples, outputs, inputs, 0]
###########################################################
################## EXPERIMENT #############################
###########################################################
@ex.automain
def experiment(num, den, l, R, filename, sample_size, max_deg, dt, t_sim, H, wmin, wmax, dw, w_special):
# Loop over the samples, compute order like earlier
###########################################################
####################### INITIAL MODEL #####################
###########################################################
# Open Simulator
sim = mb.Simulator()
# Show the log window
sim.showLogWindow()
###########################################################
####################### SAMPLE LOOP #######################
###########################################################
# Set initial degree to zero
degree = 0
for samples in range(0, sample_size):
# Calculate the current degree
c_deg = int(1.0 * samples/sample_size * max_deg) +1
# Check if degree has changed
if degree < c_deg:
# Change degree
degree = c_deg
# Clear Simulator
sim.clear()
# Load new model
sim.loadModel("C:/Users/juliu/Documents/Thesis/Modelica/FMU/2_2_n"+str(degree)+"/Masterthesis_Models_mimo_0processmodel.fmu")
sim.setOperationMode('FMU for ModelExchange')
# Preallocat identification parameter
K = np.zeros((2,2))
T = np.zeros((2,2))
L = np.zeros((2,2))
# Reload the model
sim.reloadModel()
###########################################################
####################### MODEL SETUP #######################
###########################################################
# Create a parameter list
params = {}
# Loop over the systems outputs
for outputs in range(0,2):
# Loop over the systems inputs
for inputs in range(0,2):
# Set system gain
params.update({"fmu.num["+str(outputs+1)+","+str(inputs+1)+",1]": num[samples][outputs][inputs]})
# Set system delay
params.update({"fmu.delay["+str(outputs+1)+","+str(inputs+1)+"]": l.item(samples,outputs,inputs)})
# Loop over denominator coefficients
for order in range(0, degree+1):
params.update({"fmu.den["+str(outputs+1)+","+str(inputs+1)+","+str(degree-order+1)+"]": den[samples][outputs][inputs][(order)]})
# Set the parameter
sim.set(params)
# Show the Parameter
# sim.showParameterDialog()
# Store the state space rep for later use
ss = sim.analyser_getStateSpaceForm()
###########################################################
####################### IDENTIFICATION ####################
###########################################################
# Setup first experiment Input 1 -> Output 1 and Output 2
sim.set({"fmu.u[1]": 1,"fmu.u[2]": 0})
# Simulation of the experiment
res = sim.simulate(dt, t_sim)
# Get the needed signals
y = res["fmu.y[1]"]
y2 = res["fmu.y[2]"]
u = res["fmu.u[1]"]
time = res["time"]
# Get TF from Input 1 to Output 1
K[0][0],T[0][0],L[0][0]=alg.Integral_Identification(y,u,time)
# Get TF from Input 1 to Output 2
K[1][0],T[1][0],L[1][0]=alg.Integral_Identification(y2,u,time)
# Setup second experiment Input 2 -> Output 1 and Output 2
# Reset the model state
sim.resetModelState()
# Input Parameter
sim.set({"fmu.u[1]": 0,"fmu.u[2]": 1})
# Simulation of the experiment
res = sim.simulate(dt, t_sim)
# Get the needed signals
y = res["fmu.y[1]"]
y2 = res["fmu.y[2]"]
u = res["fmu.u[2]"]
time = res["time"]
# Get TF from Input 2 to Output 1
K[0][1],T[0][1],L[0][1] = alg.Integral_Identification(y,u,time)
# Get TF from Input 2 to Output 2
K[1][1],T[1][1],L[1][1] = alg.Integral_Identification(y2,u,time)
# Print the System Parameter
# print(K,T,L)
###########################################################
####################### CONTROLLER DESIGN #################
###########################################################
# Loop over the three methods
for methods in range(0,3):
if methods == 0:
KY,B,D = alg.Control_Decentral(K,T,L)
elif methods == 1:
KY,B,D = alg.Control_Astrom(K,T,L,H)
else:
KY,B,D = alg.Control_Decoupled(K,T,L,H)
###########################################################
####################### EVALUATION ########################
###########################################################
# Create a frequency range
omega = np.logspace(wmin, wmax, dw)
# Store the singular values
sv = np.zeros((2,omega.shape[0]))
# Loop over the frequency
for freq in range(0, omega.shape[0]):
# Evaluate the sensitivity at given frequency
S = alg.compute_sensitivity(ss, KY,B,D, omega[freq])
u, sv[:, freq], w = np.linalg.svd(np.abs(S))
# Clear variables
del u,w
# Find the maximum of the sensitivity
ms = np.max(sv)
# Get the corresponding frequency
omega_ms = omega[np.argmax(sv)]
# Print the sensitivity
#p.loglog(omega, sv[0,:])
#p.loglog(omega, sv[1,:])
#p.show()
# Compute the gradient of the maximal singular values
# Compute the maximum singular value along all frequency
sv_max = np.max(sv, axis=0)
# Compute the slope via linear regression
slope, intercept, r_value, p_value, std_err = stats.linregress(omega[np.where(omega<=1.0)], sv_max[np.where(omega<=1.0)])
# Clear variables
del intercept, r_value, p_value, std_err
# Evaluate at the special frequencies
ms_s = []
for freq in w_special:
# Evaluate the sensitivity at given frequency
S = alg.compute_sensitivity(ss, KY,B,D, freq)
u, v, w = np.linalg.svd(np.abs(S))
ms_s.append(np.max(v))
# Clear variables
del u,v,w
###########################################################
####################### STORE DATA ########################
###########################################################
# Store Degree
R.set_value(samples, 'Degree', degree)
if methods == 0:
# Store the maximum sensitivity
R.set_value(samples, 'MS_RGA', ms)
# Store the correspondig frequency
R.set_value(samples, 'w_MS_RGA', omega_ms)
# Store the maximum singular value at the special frequencies
for freq in range(0, w_special.shape[0]):
R.set_value(samples, 'w_'+str(w_special[freq])+'_RGA', ms_s[0])
# Store the gradient
R.set_value(samples, 'Grad_RGA', slope)
elif methods == 1:
# Store the maximum sensitivity
R.set_value(samples, 'MS_A', ms)
# Store the correspondig frequency
R.set_value(samples, 'w_MS_A', omega_ms)
# Store the maximum singular value at the special frequencies
for freq in range(0, w_special.shape[0]):
R.set_value(samples, 'w_'+str(w_special[freq])+'_A', ms_s[0])
# Store the gradient
R.set_value(samples, 'Grad_A', slope)
else:
# Store the maximum sensitivity
R.set_value(samples, 'MS_D', ms)
# Store the correspondig frequency
R.set_value(samples, 'w_MS_D', omega_ms)
# Store the maximum singular value at the special frequencies
for freq in range(0, w_special.shape[0]):
R.set_value(samples, 'w_'+str(w_special[freq])+'_D', ms_s[0])
# Store the gradient
R.set_value(samples, 'Grad_D', slope)
# Store after every sample
R.to_csv(filename, sep=";")
| gpl-3.0 |
mavlyutovrus/books_ocr | 1_adjust_rotation.py | 1 | 2795 | import numpy
from scipy.misc import imread
from matplotlib import pyplot as plt
from PIL import Image
from PIL import ImageDraw
def rotate(image, angle, color, filter=Image.NEAREST):
if image.mode == "P" or filter == Image.NEAREST:
matte = Image.new("1", image.size, 1) # mask
else:
matte = Image.new("L", image.size, 255) # true matte
bg = Image.new(image.mode, image.size, color)
bg.paste(image.rotate(angle, filter),
matte.rotate(angle, filter))
return bg
def build_profile(img, borders, axis):
profile = numpy.zeros(borders[axis][1] - borders[axis][0])
counter_axis_length = borders[1 - axis][1] - borders[1 - axis][0]
if axis:
for x in xrange(borders[axis][0], borders[axis][1]):
profile[x - borders[axis][0]] = sum(img[borders[1 - axis][0] : borders[1 - axis][1], x]) / float(counter_axis_length)
else:
for x in xrange(borders[axis][0], borders[axis][1]):
profile[x - borders[axis][0]] = sum(img[x, borders[1 - axis][0] : borders[1 - axis][1]]) / float(counter_axis_length)
return profile
def get_matrix(img):
original_image_mat = img.load()
mat = numpy.zeros((img.size[1], img.size[0]))
for x in xrange(mat.shape[0]):
for y in xrange(mat.shape[1]):
if original_image_mat[y, x] == 0 or original_image_mat[y, x] == (0, 0, 0):
mat[x, y] = 1
return mat
def get_std_with_rotation(original_image, rotation):
img = rotate(original_image, rotation, "white")
mat = get_matrix(img)
profile = build_profile(mat, ((0, mat.shape[0]), (0, mat.shape[1])), 0)
stdev = numpy.std(profile)
return stdev
def adjust_rotation(original_image):
best_angle = 0
max_stdev = 0
prev_val = 0
for rotation in xrange(-4, 5):
rotation /= 4.0
img = rotate(original_image, rotation, "white")
mat = get_matrix(img)
profile = build_profile(mat, ((0, mat.shape[0]), (0, mat.shape[1])), 0)
stdev = numpy.std(profile)
prev_val = stdev
if stdev > max_stdev:
best_angle = rotation
max_stdev = stdev
return best_angle
import os
img_path = "016774/"
out_path = "016774_rot/"
processed = 0
files = [fname for fname in os.listdir(img_path) if ".png" in fname]
for fname in files:
if os.path.isfile(out_path + fname):
print "..", fname, "existed"
processed += 1
continue
original_image = Image.open(img_path + fname)
angle = adjust_rotation(original_image)
print "..", fname, angle
rotated_image = rotate(original_image, angle, "white")
rotated_image.save(out_path + fname)
processed += 1
if processed % 10 == 0:
print "..processed", processed, "/", len(files)
| apache-2.0 |
chandinijain/Auquan-Toolbox | pythonToolbox/dataloader.py | 1 | 6651 | from __future__ import absolute_import, division, print_function, unicode_literals
try:
from urllib import urlretrieve, urlopen
except ImportError:
from urllib.request import urlretrieve, urlopen
import numpy as np
import pandas as pd
from pandas.tseries.offsets import BDay
import os
def download(exchange, ticker, file_name,logger):
url = 'https://raw.githubusercontent.com/Auquan/auquan-historical-data/master/%s/historicalData/%s.csv'%(exchange.lower(), ticker.lower())
status = urlopen(url).getcode()
if status == 200:
logger.info('Downloading %s data to file: %s'%(ticker, file_name))
urlretrieve(url, file_name)
return True
else:
logger.info('File not found. Please check settings!')
return False
def data_available(exchange, markets,logger):
dir_name = '%s/historicalData/'%exchange.lower()
if not os.path.exists(dir_name):
os.makedirs(dir_name)
for m in markets:
file_name = '%s%s.csv'%(dir_name, m.lower())
if not os.path.exists(file_name):
try:
assert(download(exchange, m, file_name,logger)),"%s not found. Please check settings!"%file_name
except AssertionError:
logger.exception("%s not found. Please check settings!"%file_name)
raise
return True
def download_security_list(exchange, logger):
dir_name = '%s/'%exchange.lower()
if not os.path.exists(dir_name):
os.makedirs(dir_name)
file_name = '%s%s.txt'%(dir_name, exchange.lower())
if not os.path.exists(file_name):
url = 'https://raw.githubusercontent.com/Auquan/auquan-historical-data/master/%s'%(file_name)
status = urlopen(url).getcode()
if status == 200:
logger.info('Downloading data to file: %s'%file_name)
urlretrieve(url, file_name)
return True
else:
logger.info('File not found. Please check exchange settings!')
return False
else:
return True
def compatibleDictKeyCheck(dict, key):
try:
return dict.has_key(key)
except:
return key in dict
def load_data(exchange, markets, start, end, lookback, budget, logger, random=False):
logger.info("Loading Data from %s to %s...."%(start,end))
# because there are some holidays adding some cushion to lookback
try:
dates = [pd.to_datetime(start)-BDay(lookback* 1.10), pd.to_datetime(end)]
except ValueError:
logger.exception("%s or %s is not valid date. Please check settings!"%(start, end))
raise ValueError("%s or %s is not valid date. Please check settings!"%(start, end))
try:
assert(dates[1]>dates[0]),"Start Date is after End Date"
except AssertionError:
logger.exception("Start Date is after End Date")
raise
#Download list of securities
assert(download_security_list(exchange, logger))
if len(markets)==0:
file_name = '%s/%s.txt'%(exchange.lower(), exchange.lower())
markets = [line.strip() for line in open(file_name)]
markets = [m.upper() for m in markets]
features = ['OPEN', 'CLOSE', 'HIGH', 'LOW', 'VOLUME']
date_range = pd.date_range(start=dates[0], end=dates[1], freq='B')
back_data = {}
if random:
for feature in features:
back_data[feature] = pd.DataFrame(np.random.randint(10, 50, size=(date_range.size,len(markets))),
index=date_range,
columns=markets)
else:
for feature in features:
back_data[feature] = pd.DataFrame(index=date_range, columns=markets)
assert data_available(exchange, markets, logger)
market_to_drop = []
for market in markets:
logger.info('Reading %s.csv'%market)
csv = pd.read_csv('%s/historicalData/%s.csv'%(exchange.lower(), market.lower()), index_col=0)
csv.index = pd.to_datetime(csv.index)
csv.columns = [col.upper() for col in csv.columns]
csv = csv.reindex(index=csv.index[::-1])
features = [col.upper() for col in csv.columns]
market_first_date = csv.index[0]
if (market_first_date > (dates[0]-BDay(1)+BDay(1))):
market_to_drop.append(market)
logger.info('Dropping %s. This stock did not start trading before (start date -lookback days)'%market)
continue
market_last_date = csv.index[-1]
if (market_last_date < (dates[0] - BDay(1) + BDay(1))):
market_to_drop.append(market)
logger.info('Dropping %s. This stock terminated before (start date -lookback days)'%market)
continue
back_fill_data = False
if market_last_date in date_range:
back_fill_data = True
logger.info('The market %s doesnt have data for the whole duration. Subsituting missing dates with the last known data'%market)
for feature in features:
back_data[feature][market] = csv[feature][date_range]
if back_fill_data:
back_data[feature].loc[market_last_date:date_range[-1], market] = back_data[feature].at[market_last_date, market]
for m in market_to_drop:
logger.info('Dropping %s. Not Enough Data'%m)
markets.remove(m)
for feature in features:
back_data[feature].drop(market_to_drop, axis=1, inplace=True)
dates_to_drop = pd.Series(False, index=date_range)
for feature in features:
dates_to_drop |= pd.isnull(back_data[feature]).any(axis=1)
dropped_dates = date_range[dates_to_drop]
date_range = date_range[~dates_to_drop]
print(dropped_dates)
for feature in features:
back_data[feature] = back_data[feature].drop(dropped_dates)
back_data['COST TO TRADE'] = pd.DataFrame(0, index=date_range, columns=markets)
back_data['POSITION'] = pd.DataFrame(0, index=date_range, columns=markets)
back_data['ORDER'] = pd.DataFrame(0, index=date_range, columns=markets)
back_data['FILLED_ORDER'] = pd.DataFrame(0, index=date_range, columns=markets)
back_data['DAILY_PNL'] = pd.DataFrame(0, index=date_range, columns=markets)
back_data['TOTAL_PNL'] = pd.DataFrame(0, index=date_range, columns=markets)
back_data['FUNDS'] = pd.Series(budget, index=date_range)
back_data['VALUE'] = pd.Series(budget, index=date_range)
back_data['MARGIN'] = pd.Series(0, index=date_range)
return back_data, date_range
| mit |
nmayorov/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 8 | 25509 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
jamessergeant/pylearn2 | pylearn2/scripts/datasets/step_through_small_norb.py | 49 | 3123 | #! /usr/bin/env python
"""
A script for sequentially stepping through SmallNORB, viewing each image and
its label.
Intended as a demonstration of how to iterate through NORB images,
and as a way of testing SmallNORB's StereoViewConverter.
If you just want an image viewer, consider
pylearn2/scripts/show_binocular_grayscale_images.py,
which is not specific to SmallNORB.
"""
__author__ = "Matthew Koichi Grimes"
__copyright__ = "Copyright 2010-2014, Universite de Montreal"
__credits__ = __author__
__license__ = "3-clause BSD"
__maintainer__ = __author__
__email__ = "mkg alum mit edu (@..)"
import argparse, pickle, sys
from matplotlib import pyplot
from pylearn2.datasets.norb import SmallNORB
from pylearn2.utils import safe_zip
def main():
def parse_args():
parser = argparse.ArgumentParser(
description="Step-through visualizer for SmallNORB dataset")
parser.add_argument("--which_set",
default='train',
required=True,
help=("'train', 'test', or the path to a "
"SmallNORB .pkl file"))
return parser.parse_args()
def load_norb(args):
if args.which_set in ('test', 'train'):
return SmallNORB(args.which_set, True)
else:
norb_file = open(args.which_set)
return pickle.load(norb_file)
args = parse_args()
norb = load_norb(args)
topo_space = norb.view_converter.topo_space # does not include label space
vec_space = norb.get_data_specs()[0].components[0]
figure, axes = pyplot.subplots(1, 2, squeeze=True)
figure.suptitle("Press space to step through, or 'q' to quit.")
def draw_and_increment(iterator):
"""
Draws the image pair currently pointed at by the iterator,
then increments the iterator.
"""
def draw(batch_pair):
for axis, image_batch in safe_zip(axes, batch_pair):
assert image_batch.shape[0] == 1
grayscale_image = image_batch[0, :, :, 0]
axis.imshow(grayscale_image, cmap='gray')
figure.canvas.draw()
def get_values_and_increment(iterator):
try:
vec_stereo_pair, labels = norb_iter.next()
except StopIteration:
return (None, None)
topo_stereo_pair = vec_space.np_format_as(vec_stereo_pair,
topo_space)
return topo_stereo_pair, labels
batch_pair, labels = get_values_and_increment(norb_iter)
draw(batch_pair)
norb_iter = norb.iterator(mode='sequential',
batch_size=1,
data_specs=norb.get_data_specs())
def on_key_press(event):
if event.key == ' ':
draw_and_increment(norb_iter)
if event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
draw_and_increment(norb_iter)
pyplot.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
pkruskal/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
theilmbh/klusta-pipeline | klusta_pipeline/utils.py | 1 | 7720 | import os
import glob
import itertools
import numpy as np
import h5py as h5
from scipy import interpolate
from random import sample
from klusta_pipeline import MAX_CHANS
import datetime as dt
from sklearn.linear_model import LinearRegression
def validate_merge(import_list,omit):
mat_data = []
chans = ['Port_%i'%(p+1) for p in range(MAX_CHANS)]
chans = [ch for ch in chans if ch not in omit]
for s2mat in import_list:
mat_chans=[]
interval = None
with h5.File(s2mat, 'r') as f:
for ch in chans:
try:
chan_data = f[ch]
mat_chans.append(ch)
assert interval is None or interval == chan_data['interval'][0], "intervals don't match between channels in %s ... something seems wrong" % (s2mat)
interval = chan_data['interval'][0]
except KeyError:
continue
mat_data.append(
{
'chans': mat_chans,
'name': s2mat,
'interval': interval
}
)
assert len(mat_data)>0, 'No mat files found'
# ref = mat_data[0]
# for chk in mat_data[1:]:
# # check if all files have same number of chans
# assert len(ref['chans'])==len(chk['chans'])
# # check if all files have same chans
# for ch in ref['chans']:
# assert ch in chk['chans']
# # check if all files have same sampling rate
# #assert ref[ch]['interval']==chk[ch]['interval']
return mat_data
def get_pen(penstr):
'''extracts penetration data from the penetration folder'''
pen = penstr.split('_')
d = dict(
index = int(pen[0][3:]),
hemisphere = {'Rgt':'right','Lft':'left'}[pen[1]],
anterior = int(pen[2][2:]),
lateral = int(pen[3][2:]),
)
return d
def get_site(sitestr):
'''extracts site data from the site folder'''
site = sitestr.split('_')
d = dict(
index = int(site[0][4:]),
depth = int(site[1][1:]),
)
return d
def get_epoch(epcstr):
'''extracts epoch data from the epoch folder'''
epc = epcstr.split('_')
d = dict(
index = int(epc[0][3:]),
datetime = dt.datetime.strptime(epc[1],'%Y-%m-%d+%H-%M-%S').ctime(),
prot = '_'.join(epc[2:]),
)
return d
def get_file_info(filename):
''' extracts experimental metadata from a filename '''
d = dict(filename=filename)
if filename.startswith('AutoSv'):
d.update(
datetime = dt.datetime.strptime(filename[7:22],'%m%d%y_%H-%M-%S').ctime()
)
elif filename.startswith('Sub'):
print filename
datestr = filename.split('_')[1]
d.update(
datetime = dt.datetime.strptime(datestr,'%m-%d-%y+%H-%M-%S').ctime(),
)
else:
pass
return d
def get_info(smrx):
''' takes the full path to an smrx file and returns experimental metadata'''
path = smrx.split('\\')
subj, _,pen, site, epc, filename = path[-6:]
d = dict(
subject = subj,
pen = get_pen(pen),
site = get_site(site),
epoch = get_epoch(epc),
file = get_file_info(filename),
)
return d
def get_import_list(export,info):
import_list = []
for item in info:
import_list.append(
os.path.join(
export,
item['file']['filename'].split('.')[0]+'.mat',
)
)
return import_list
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
def chunkit(t,v):
dt = np.diff(t)
interval = min(dt)
breaks = np.nonzero(dt>1.5*interval)[0]
if len(breaks)>0:
yield t[0:breaks[0]],v[0:breaks[0]]
for start,stop in pairwise(breaks):
yield t[start+1:stop],v[start+1:stop]
yield t[breaks[-1]+1:],v[breaks[-1]+1:]
else:
yield t,v
def do_car(data):
'''common average reference.
for each channel, subtract off the mean of the other channels
'''
car_data = np.empty(data.shape,data.dtype)
for ch,waveform in enumerate(data.T):
common_av = np.vstack((data.T[:ch,:],data.T[ch+1:,:])).mean(axis=0)
car_data[:,ch] = waveform-common_av
return car_data
def spline_realign(r,chans,fs,start,stop):
'''spline realignment.
realigns each channel using InterpolatedUnivariateSpline
r: dictionary containing raw data keyed by channel label
chans: channel labels
fs: sampling frequency (Hz)
'''
t_new = np.arange(start,stop,1.0/fs)
realigned_data = np.empty((len(t_new), len(chans) ), np.int16)
for ch,lbl in enumerate(chans):
spline = interpolate.InterpolatedUnivariateSpline(r[lbl]['times'], r[lbl]['values'])
realigned_data[:,ch] = spline(t_new)
return realigned_data
def no_realign(r,chans,fs,start,stop):
'''no realignment.
truncates data so that all channels are the same length.
assumes that each sample of each channel occurs at simultaneous absolute time
r: dictionary containing raw data keyed by channel label
chans: channel labels
fs: sampling frequency (Hz)
'''
raw_length = np.amin([r[lbl]['length'] for lbl in chans])
realigned_data = np.empty((raw_length, len(chans)), np.int16)
for ch,lbl in enumerate(chans):
realigned_data[:,ch] = r[lbl]['values'][0:raw_length]
return realigned_data
realign_methods = {
'none':no_realign,
'spline':spline_realign,
}
def realign(r,chans,fs,method):
'''Realignment wrapper.
calls appropriate realignment method.
r: dictionary containing raw data keyed by channel label
chans: channel labels
fs: sampling frequency (Hz)
method: string containing the desired realignment method
'''
start = np.amax([r[lbl]['start'] for lbl in chans])
stop = np.amin([r[lbl]['stop'] for lbl in chans])
rec = {
'name': '',
'description': '',
'file_origin': r['file_origin'],
'start_time': start,
'fs': fs,
}
rec['data'] = realign_methods[method](r, chans, fs, start, stop)
return rec
def subsample_data(data,npts=1000000,axis=0):
pts = data.shape[0]
indx = sample(xrange(pts), npts) if pts > npts else range(pts)
return data[indx,:]
def subsample_index(data_lengths, sample_pts=1000000):
data_pts = np.sum(data_lengths)
counts = np.zeros(len(data_lengths))
for i in np.random.choice(xrange(len(data_lengths)), size=sample_pts, p=np.array(data_lengths)/float(data_pts)):
counts[i] += 1
for i in xrange(len(data_lengths)):
counts[i] = min(counts[i], data_lengths[i])
return [np.random.choice(xrange(length), size=count, replace=False) for length, count in zip(data_lengths, counts)]
def calc_weights(rec_list):
linreg = LinearRegression()
idxs = subsample_index([len(r['data']) for r in rec_list])
data = np.vstack(tuple(r['data'][idx] for r, idx in zip(rec_list, idxs)))
coeffs = []
for ch,waveform in enumerate(data.T):
X = np.vstack((data.T[:ch,:],data.T[ch+1:,:]))
linreg.fit(X.T,waveform)
coeffs.append(linreg.coef_)
return coeffs
def do_war(data,weights):
'''common average reference.
for each channel, subtract off the weighted average of the other channels
'''
car_data = np.empty(data.shape,data.dtype)
for ch,(waveform,w) in enumerate(zip(data.T,weights)):
X = np.vstack((data.T[:ch,:],data.T[ch+1:,:]))
car_data[:,ch] = waveform - X.T.dot(w)
return car_data
| bsd-3-clause |
ephes/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
ypkang/Dato-Core | src/unity/python/graphlab/test/test_io.py | 13 | 15881 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import commands
import json
import logging
import os
import re
import tempfile
import unittest
import pandas
import graphlab
import graphlab.connect.main as glconnect
import graphlab.sys_util as _sys_util
from graphlab.test.util import create_server, start_test_tcp_server
from pandas.util.testing import assert_frame_equal
def _test_save_load_object_helper(testcase, obj, url):
"""
Helper function to test save and load a server side object to a given url.
"""
def cleanup(url):
"""
Remove the saved file from temp directory.
"""
protocol = None
path = None
splits = url.split("://")
if len(splits) > 1:
protocol = splits[0]
path = splits[1]
else:
path = url
if not protocol or protocol is "local" or protocol is "remote":
tempdir = tempfile.gettempdir()
pattern = path + ".*"
for f in os.listdir(tempdir):
if re.search(pattern, f):
os.remove(os.path.join(tempdir, f))
if isinstance(obj, graphlab.SGraph):
obj.save(url + ".graph")
newobj = graphlab.load_graph(url + ".graph")
testcase.assertItemsEqual(obj.get_fields(), newobj.get_fields())
testcase.assertDictEqual(obj.summary(), newobj.summary())
elif isinstance(obj, graphlab.Model):
obj.save(url + ".model")
newobj = graphlab.load_model(url + ".model")
testcase.assertItemsEqual(obj.list_fields(), newobj.list_fields())
testcase.assertEqual(type(obj), type(newobj))
elif isinstance(obj, graphlab.SFrame):
obj.save(url + ".frame_idx")
newobj = graphlab.load_sframe(url + ".frame_idx")
testcase.assertEqual(obj.shape, newobj.shape)
testcase.assertEqual(obj.column_names(), newobj.column_names())
testcase.assertEqual(obj.column_types(), newobj.column_types())
assert_frame_equal(obj.head(obj.num_rows()).to_dataframe(),
newobj.head(newobj.num_rows()).to_dataframe())
else:
raise TypeError
cleanup(url)
def create_test_objects():
vertices = pandas.DataFrame({'vid': ['1', '2', '3'],
'color': ['g', 'r', 'b'],
'vec': [[.1, .1, .1], [.1, .1, .1], [.1, .1, .1]]})
edges = pandas.DataFrame({'src_id': ['1', '2', '3'],
'dst_id': ['2', '3', '4'],
'weight': [0., 0.1, 1.]})
graph = graphlab.SGraph().add_vertices(vertices, 'vid').add_edges(edges, 'src_id', 'dst_id')
sframe = graphlab.SFrame(edges)
model = graphlab.pagerank.create(graph)
return (graph, sframe, model)
class LocalFSConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
def _test_read_write_helper(self, url, content):
url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(url, content)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content)
if os.path.exists(url):
os.remove(url)
def test_object_save_load(self):
for prefix in ['', 'local://', 'remote://']:
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
def test_basic(self):
self._test_read_write_helper(self.tempfile, 'hello world')
self._test_read_write_helper("local://" + self.tempfile + ".csv", 'hello,world,woof')
self._test_read_write_helper("remote://" + self.tempfile + ".csv", 'hello,world,woof')
def test_gzip(self):
self._test_read_write_helper(self.tempfile + ".gz", 'hello world')
self._test_read_write_helper(self.tempfile + ".csv.gz", 'hello world')
self._test_read_write_helper("local://" + self.tempfile + ".csv.gz", 'hello world')
self._test_read_write_helper("remote://" + self.tempfile + ".csv.gz", 'hello world')
def test_exception(self):
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("/root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("/root/tmp", '.....'))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("/root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("/root/tmp", '.....'))
self.assertRaises(IOError, lambda: self.graph.save("/root/tmp.graph"))
self.assertRaises(IOError, lambda: self.sframe.save("/root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save("/root/tmp.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph("/root/tmp.graph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe("/root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model("/root/tmp.model"))
class RemoteFSConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
glconnect.stop()
auth_token = 'graphlab_awesome'
self.server = start_test_tcp_server(auth_token=auth_token)
glconnect.launch(self.server.get_server_addr(), auth_token=auth_token)
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
@classmethod
def tearDownClass(self):
glconnect.stop()
self.server.stop()
def _test_read_write_helper(self, url, content):
url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(url, content)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content)
def test_basic(self):
self._test_read_write_helper("remote://" + self.tempfile, 'hello,world,woof')
def test_gzip(self):
self._test_read_write_helper("remote://" + self.tempfile + ".csv.gz", 'hello,world,woof')
def test_object_save_load(self):
prefix = "remote://"
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
def test_exception(self):
self.assertRaises(ValueError, lambda: self._test_read_write_helper(self.tempfile, 'hello world'))
self.assertRaises(ValueError, lambda: self._test_read_write_helper("local://" + self.tempfile + ".csv.gz", 'hello,world,woof'))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("remote:///root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("remote:///root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("remote:///root/tmp", '.....'))
self.assertRaises(IOError, lambda: self.graph.save("remote:///root/tmp.graph"))
self.assertRaises(IOError, lambda: self.sframe.save("remote:///root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save("remote:///root/tmp.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph("remote:///root/tmp.graph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe("remote:///root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model("remote:///root/tmp.model"))
class HttpConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
def _test_read_helper(self, url, content_expected):
url = graphlab.util._make_internal_url(url)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content_expected)
def test_read(self):
expected = "\n".join([str(unichr(i + ord('a'))) for i in range(26)])
expected = expected + "\n"
self._test_read_helper(self.url, expected)
def test_exception(self):
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__(self.url, '.....'))
@unittest.skip("Disabling HDFS Connector Tests")
class HDFSConnectorTests(unittest.TestCase):
# This test requires hadoop to be installed and avaiable in $PATH.
# If not, the tests will be skipped.
@classmethod
def setUpClass(self):
self.has_hdfs = len(_sys_util.get_hadoop_class_path()) > 0
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
def _test_read_write_helper(self, url, content_expected):
url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(url, content_expected)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content_expected)
# clean up the file we wrote
status, output = commands.getstatusoutput('hadoop fs -test -e ' + url)
if status is 0:
commands.getstatusoutput('hadoop fs -rm ' + url)
def test_basic(self):
if self.has_hdfs:
self._test_read_write_helper("hdfs://" + self.tempfile, 'hello,world,woof')
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_gzip(self):
if self.has_hdfs:
self._test_read_write_helper("hdfs://" + self.tempfile + ".gz", 'hello,world,woof')
self._test_read_write_helper("hdfs://" + self.tempfile + ".csv.gz", 'hello,world,woof')
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_object_save_load(self):
if self.has_hdfs:
prefix = "hdfs://"
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_exception(self):
bad_url = "hdfs:///root/"
if self.has_hdfs:
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs:///"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs:///tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs://" + self.tempfile))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__(bad_url + "/tmp", "somerandomcontent"))
self.assertRaises(IOError, lambda: self.graph.save(bad_url + "x.graph"))
self.assertRaises(IOError, lambda: self.sframe.save(bad_url + "x.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save(bad_url + "x.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph(bad_url + "mygraph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe(bad_url + "x.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model(bad_url + "x.model"))
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
@unittest.skip("Disabling S3 Connector Tests")
class S3ConnectorTests(unittest.TestCase):
# This test requires aws cli to be installed. If not, the tests will be skipped.
@classmethod
def setUpClass(self):
status, output = commands.getstatusoutput('aws s3api list-buckets')
self.has_s3 = (status is 0)
self.standard_bucket = None
self.regional_bucket = None
# Use aws cli s3api to find a bucket with "gl-testdata" in the name, and use it as out test bucket.
# Temp files will be read from /written to the test bucket's /tmp folder and be cleared on exist.
if self.has_s3:
try:
json_output = json.loads(output)
bucket_list = [b['Name'] for b in json_output['Buckets']]
assert 'gl-testdata' in bucket_list
assert 'gl-testdata-oregon' in bucket_list
self.standard_bucket = 'gl-testdata'
self.regional_bucket = 'gl-testdata-oregon'
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
except:
logging.getLogger(__name__).warning("Fail parsing ioutput of s3api into json. Please check your awscli version.")
self.has_s3 = False
def _test_read_write_helper(self, url, content_expected):
s3url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(s3url, content_expected)
content_read = glconnect.get_unity().__read__(s3url)
self.assertEquals(content_read, content_expected)
(status, output) = commands.getstatusoutput('aws s3 rm --region us-west-2 ' + url)
if status is not 0:
logging.getLogger(__name__).warning("Cannot remove file: " + url)
def test_basic(self):
if self.has_s3:
for bucket in [self.standard_bucket, self.regional_bucket]:
self._test_read_write_helper("s3://" + bucket + self.tempfile, 'hello,world,woof')
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_gzip(self):
if self.has_s3:
self._test_read_write_helper("s3://" + self.standard_bucket + self.tempfile + ".gz", 'hello,world,woof')
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_object_save_load(self):
if self.has_s3:
prefix = "s3://" + self.standard_bucket
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_exception(self):
if self.has_s3:
bad_bucket = "i_am_a_bad_bucket"
prefix = "s3://" + bad_bucket
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3:///"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3://" + self.standard_bucket + "/somerandomfile"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3://" + "/somerandomfile"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("s3://" + "/somerandomfile", "somerandomcontent"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("s3://" + self.standard_bucket + "I'amABadUrl/", "somerandomcontent"))
self.assertRaises(IOError, lambda: self.graph.save(prefix + "/x.graph"))
self.assertRaises(IOError, lambda: self.sframe.save(prefix + "/x.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save(prefix + "/x.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph(prefix + "/x.graph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe(prefix + "/x.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model(prefix + "/x.model"))
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
| agpl-3.0 |
DonBeo/statsmodels | statsmodels/tsa/x13.py | 7 | 23281 | """
Run x12/x13-arima specs in a subprocess from Python and curry results back
into python.
Notes
-----
Many of the functions are called x12. However, they are also intended to work
for x13. If this is not the case, it's a bug.
"""
from __future__ import print_function
import os
import subprocess
import tempfile
import re
from warnings import warn
import pandas as pd
from statsmodels.compat.python import iteritems
from statsmodels.tools.tools import Bunch
from statsmodels.tools.sm_exceptions import (X13NotFoundError,
IOWarning, X13Error,
X13Warning)
__all__ = ["x13_arima_select_order", "x13_arima_analysis"]
_binary_names = ('x13as.exe', 'x13as', 'x12a.exe', 'x12a')
class _freq_to_period:
def __getitem__(self, key):
if key.startswith('M'):
return 12
elif key.startswith('Q'):
return 4
_freq_to_period = _freq_to_period()
_period_to_freq = {12 : 'M', 4 : 'Q'}
_log_to_x12 = {True : 'log', False : 'none', None : 'auto'}
_bool_to_yes_no = lambda x : 'yes' if x else 'no'
def _find_x12(x12path=None, prefer_x13=True):
"""
If x12path is not given, then either x13as[.exe] or x12a[.exe] must
be found on the PATH. Otherwise, the environmental variable X12PATH or
X13PATH must be defined. If prefer_x13 is True, only X13PATH is searched
for. If it is false, only X12PATH is searched for.
"""
global _binary_names
if x12path is not None and x12path.endswith(_binary_names):
# remove binary from path if given
x12path = os.path.dirname(x12path)
if not prefer_x13: # search for x12 first
_binary_names = _binary_names[::-1]
if x12path is None:
x12path = os.getenv("X12PATH", "")
if not x12path:
x12path = os.getenv("X13PATH", "")
elif x12path is None:
x12path = os.getenv("X13PATH", "")
if not x12path:
x12path = os.getenv("X12PATH", "")
for binary in _binary_names:
x12 = os.path.join(x12path, binary)
try:
subprocess.check_call(x12, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return x12
except OSError:
pass
else:
return False
def _check_x12(x12path=None):
x12path = _find_x12(x12path)
if not x12path:
raise X13NotFoundError("x12a and x13as not found on path. Give the "
"path, put them on PATH, or set the "
"X12PATH or X13PATH environmental variable.")
return x12path
def _clean_order(order):
"""
Takes something like (1 1 0)(0 1 1) and returns a arma order, sarma
order tuple. Also accepts (1 1 0) and return arma order and (0, 0, 0)
"""
order = re.findall("\([0-9 ]*?\)", order)
clean = lambda x : tuple(map(int, re.sub("[()]", "", x).split(" ")))
if len(order) > 1:
order, sorder = map(clean, order)
else:
order = clean(order[0])
sorder = (0, 0, 0)
return order, sorder
def run_spec(x12path, specpath, outname=None, meta=False, datameta=False):
if meta and datameta:
raise ValueError("Cannot specify both meta and datameta.")
if meta:
args = [x12path, "-m " + specpath]
elif datameta:
args = [x12path, "-d " + specpath]
else:
args = [x12path, specpath]
if outname:
args += [outname]
return subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def _make_automdl_options(maxorder, maxdiff, diff):
options = "\n"
options += "maxorder = ({0} {1})\n".format(maxorder[0], maxorder[1])
if maxdiff is not None: # maxdiff always takes precedence
options += "maxdiff = ({0} {1})\n".format(maxdiff[0], maxdiff[1])
else:
options += "diff = ({0} {1})\n".format(diff[0], diff[1])
return options
def _make_var_names(exog):
if hasattr(exog, "name"):
var_names = exog.name
elif hasattr(exog, "columns"):
var_names = exog.columns
else:
raise ValueError("exog is not a Series or DataFrame or is unnamed.")
try:
var_names = " ".join(var_names)
except TypeError: # cannot have names that are numbers, pandas default
from statsmodels.base.data import _make_exog_names
if exog.ndim == 1:
var_names = "x1"
else:
var_names = " ".join(_make_exog_names(exog))
return var_names
def _make_regression_options(trading, exog):
if not trading and exog is None: # start regression spec
return ""
reg_spec = "regression{\n"
if trading:
reg_spec += " variables = (td)\n"
if exog is not None:
var_names = _make_var_names(exog)
reg_spec += " user = ({0})\n".format(var_names)
reg_spec += " data = ({0})\n".format("\n".join(map(str,
exog.values.ravel().tolist())))
reg_spec += "}\n" # close out regression spec
return reg_spec
def _make_forecast_options(forecast_years):
if forecast_years is None:
return ""
forecast_spec = "forecast{\n"
forecast_spec += "maxlead = ({0})\n}}\n".format(forecast_years)
return forecast_spec
def _check_errors(errors):
errors = errors[errors.find("spc:")+4:].strip()
if errors and 'ERROR' in errors:
raise X13Error(errors)
elif errors and 'WARNING' in errors:
warn(errors, X13Warning)
def _convert_out_to_series(x, dates, name):
"""
Convert x to a DataFrame where x is a string in the format given by
x-13arima-seats output.
"""
from StringIO import StringIO
from pandas import read_table
out = read_table(StringIO(x), skiprows=2, header=None)
return out.set_index(dates).rename(columns={1 : name})[name]
def _open_and_read(fname):
# opens a file, reads it, and make sure it's closed
with open(fname, 'r') as fin:
fout = fin.read()
return fout
class Spec(object):
@property
def spec_name(self):
return self.__class__.__name__.replace("Spec", "")
def create_spec(self, **kwargs):
spec = """{name} {{
{options}
}}
"""
return spec.format(name=self.spec_name,
options=self.options)
def set_options(self, **kwargs):
options = ""
for key, value in kwargs.iteritems():
options += "{0}={1}\n".format(key, value)
self.__dict__.update({key : value})
self.options = options
class SeriesSpec(Spec):
"""
Parameters
----------
data
appendbcst : bool
appendfcst : bool
comptype
compwt
decimals
modelspan
name
period
precision
to_print
to_save
span
start
title
type
Notes
-----
Rarely used arguments
divpower
missingcode
missingval
saveprecision
trimzero
"""
def __init__(self, data, name='Unnamed Series', appendbcst=False,
appendfcst=False,
comptype=None, compwt=1, decimals=0, modelspan=(),
period=12, precision=0, to_print=[], to_save=[], span=(),
start=(1, 1), title='', series_type=None, divpower=None,
missingcode=-99999, missingval=1000000000):
appendbcst, appendfcst = map(_bool_to_yes_no, [appendbcst,
appendfcst,
])
series_name = "\"{0}\"".format(name[:64]) # trim to 64 characters
title = "\"{0}\"".format(title[:79]) # trim to 79 characters
self.set_options(data=data, appendbcst=appendbcst,
appendfcst=appendfcst, period=period, start=start,
title=title, name=series_name,
)
def pandas_to_series_spec(x):
# from statsmodels.tools.data import _check_period_index
# check_period_index(x)
if hasattr(x, 'columns'): # convert to series
if len(x.columns) > 1:
raise ValueError("Does not handle DataFrame with more than one "
"column")
x = x[x.columns[0]]
data = "({0})".format("\n".join(map(str, x.values.tolist())))
# get periodicity
# get start / first data
# give it a title
try:
period = _freq_to_period[x.index.freqstr]
except (AttributeError, ValueError):
from pandas.tseries.api import infer_freq
period = _freq_to_period[infer_freq(x.index)]
start_date = x.index[0]
if period == 12:
year, stperiod = start_date.year, start_date.month
elif period == 4:
year, stperiod = start_date.year, start_date.quarter
else: # pragma: no cover
raise ValueError("Only monthly and quarterly periods are supported."
" Please report or send a pull request if you want "
"this extended.")
if hasattr(x, 'name'):
name = x.name or "Unnamed Series"
else:
name = 'Unnamed Series'
series_spec = SeriesSpec(data=data, name=name, period=period,
title=name, start="{0}.{1}".format(year,
stperiod))
return series_spec
def x13_arima_analysis(endog, maxorder=(2, 1), maxdiff=(2, 1), diff=None,
exog=None, log=None, outlier=True, trading=False,
forecast_years=None, retspec=False,
speconly=False, start=None, freq=None,
print_stdout=False, x12path=None, prefer_x13=True):
"""
Perform x13-arima analysis for monthly or quarterly data.
Parameters
----------
endog : array-like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seaonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array-like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_years : int
Number of forecasts produced. The default is one year.
retspec : bool
Whether to return the created specification file. Can be useful for
debugging.
speconly : bool
Whether to create the specification file and then return it without
performing the analysis. Can be useful for debugging.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accapted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or
X12PATH depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
Returns
-------
res : Bunch
A bunch object with the following attributes:
- results : str
The full output from the X12/X13 run.
- seasadj : pandas.Series
The final seasonally adjusted ``endog``
- trend : pandas.Series
The trend-cycle component of ``endog``
- irregular : pandas.Series
The final irregular component of ``endog``
- stdout : str
The captured stdout produced by x12/x13.
- spec : str, optional
Returned if ``retspec`` is True. The only thing returned if
``speconly`` is True.
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output
directory, invoking exog12/X13 in a subprocess, and reading the output
back in.
"""
x12path = _check_x12(x12path)
if not isinstance(endog, (pd.DataFrame, pd.Series)):
if start is None or freq is None:
raise ValueError("start and freq cannot be none if endog is not "
"a pandas object")
endog = pd.Series(endog, index=pd.DatetimeIndex(start=start,
periods=len(endog),
freq=freq))
spec_obj = pandas_to_series_spec(endog)
spec = spec_obj.create_spec()
spec += "transform{{function={0}}}\n".format(_log_to_x12[log])
if outlier:
spec += "outlier{}\n"
options = _make_automdl_options(maxorder, maxdiff, diff)
spec += "automdl{{{0}}}\n".format(options)
spec += _make_regression_options(trading, exog)
spec += _make_forecast_options(forecast_years)
spec += "x11{ save=(d11 d12 d13) }"
if speconly:
return spec
# write it to a tempfile
# TODO: make this more robust - give the user some control?
ftempin = tempfile.NamedTemporaryFile(delete=False, suffix='.spc')
ftempout = tempfile.NamedTemporaryFile(delete=False)
try:
ftempin.write(spec)
ftempin.close()
ftempout.close()
# call x12 arima
p = run_spec(x12path, ftempin.name[:-4], ftempout.name)
p.wait()
stdout = p.stdout.read()
if print_stdout:
print(p.stdout.read())
# check for errors
errors = _open_and_read(ftempout.name + '.err')
_check_errors(errors)
# read in results
results = _open_and_read(ftempout.name + '.out')
seasadj = _open_and_read(ftempout.name + '.d11')
trend = _open_and_read(ftempout.name + '.d12')
irregular = _open_and_read(ftempout.name + '.d13')
finally:
try: # sometimes this gives a permission denied error?
# not sure why. no process should have these open
os.remove(ftempin.name)
os.remove(ftempout.name)
except:
if os.path.exists(ftempin.name):
warn("Failed to delete resource {0}".format(ftempin.name),
IOWarning)
if os.path.exists(ftempout.name):
warn("Failed to delete resource {0}".format(ftempout.name),
IOWarning)
seasadj = _convert_out_to_series(seasadj, endog.index, 'seasadj')
trend = _convert_out_to_series(trend, endog.index, 'trend')
irregular = _convert_out_to_series(irregular, endog.index, 'irregular')
# NOTE: there isn't likely anything in stdout that's not in results
# so may be safe to just suppress and remove it
if not retspec:
res = X13ArimaAnalysisResult(observed=endog, results=results,
seasadj=seasadj, trend=trend,
irregular=irregular, stdout=stdout)
else:
res = X13ArimaAnalysisResult(observed=endog, results=results,
seasadj=seasadj, trend=trend,
irregular=irregular, stdout=stdout,
spec=spec)
return res
def x13_arima_select_order(endog, maxorder=(2, 1), maxdiff=(2, 1), diff=None,
exog=None, log=None, outlier=True, trading=False,
forecast_years=None,
start=None, freq=None, print_stdout=False,
x12path=None, prefer_x13=True):
"""
Perform automatic seaonal ARIMA order identification using x12/x13 ARIMA.
Parameters
----------
endog : array-like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seaonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array-like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_years : int
Number of forecasts produced. The default is one year.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accapted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or X12PATH
depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
Returns
-------
results : Bunch
A bunch object that has the following attributes:
- order : tuple
The regular order
- sorder : tuple
The seasonal order
- include_mean : bool
Whether to include a mean or not
- results : str
The full results from the X12/X13 analysis
- stdout : str
The captured stdout from the X12/X13 analysis
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output back
in.
"""
results = x13_arima_analysis(endog, x12path=x12path, exog=exog, log=log,
outlier=outlier, trading=trading,
forecast_years=forecast_years,
maxorder=maxorder, maxdiff=maxdiff, diff=diff,
start=start, freq=freq, prefer_x13=prefer_x13)
model = re.search("(?<=Final automatic model choice : ).*",
results.results)
order = model.group()
if re.search("Mean is not significant", results.results):
include_mean = False
elif re.search("Constant", results.results):
include_mean = True
else:
include_mean = False
order, sorder = _clean_order(order)
res = Bunch(order=order, sorder=sorder, include_mean=include_mean,
results=results.results, stdout=results.stdout)
return res
class X13ArimaAnalysisResult(object):
def __init__(self, **kwargs):
for key, value in iteritems(kwargs):
setattr(self, key, value)
def plot(self):
from statsmodels.graphics.utils import _import_mpl
plt = _import_mpl()
fig, axes = plt.subplots(4, 1, sharex=True)
self.observed.plot(ax=axes[0], legend=False)
axes[0].set_ylabel('Observed')
self.seasadj.plot(ax=axes[1], legend=False)
axes[1].set_ylabel('Seas. Adjusted')
self.trend.plot(ax=axes[2], legend=False)
axes[2].set_ylabel('Trend')
self.irregular.plot(ax=axes[3], legend=False)
axes[3].set_ylabel('Irregular')
fig.tight_layout()
return fig
if __name__ == "__main__":
import numpy as np
from statsmodels.tsa.arima_process import ArmaProcess
np.random.seed(123)
ar = [1, .35, .8]
ma = [1, .8]
arma = ArmaProcess(ar, ma, nobs=100)
assert arma.isstationary()
assert arma.isinvertible()
y = arma.generate_sample()
dates = pd.date_range("1/1/1990", periods=len(y), freq='M')
ts = pd.TimeSeries(y, index=dates)
xpath = "/home/skipper/src/x12arima/x12a"
try:
results = x13_arima_analysis(xpath, ts)
except:
print("Caught exception")
results = x13_arima_analysis(xpath, ts, log=False)
# import pandas as pd
# seas_y = pd.read_csv("usmelec.csv")
# seas_y = pd.TimeSeries(seas_y["usmelec"].values,
# index=pd.DatetimeIndex(seas_y["date"], freq="MS"))
# results = x13_arima_analysis(xpath, seas_y)
| bsd-3-clause |
PatrickOReilly/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
ratnania/pigasus | tests/test_poisson_circle_metric.py | 1 | 4129 | # -*- coding: UTF-8 -*-
#! /usr/bin/python
from pigasus.utils.manager import context
# ...
try:
from matplotlib import pyplot as plt
PLOT=True
except ImportError:
PLOT=False
# ...
import numpy as np
from pigasus.gallery.poisson import *
import sys
import inspect
filename = inspect.getfile(inspect.currentframe()) # script filename (usually with path)
sys.stdout = open(filename.split('.py')[0]+'.txt', 'w')
# ...
sin = np.sin ; cos = np.cos
# ...
# ...
# exact solution
# ...
u = lambda x,y : [sin ( 1.0 - x**2 - y**2 ) ]
# ...
# ...
# rhs
# ...
f = lambda x,y : [4.0 * ( x**2 + y**2 ) * sin ( 1.0 - x**2 - y**2 ) + 4.0 * cos ( 1.0 - x**2 - y**2 ) ]
# ...
#-----------------------------------
#-----------------------------------
# ...
#-----------------------------------
AllDirichlet = True
try:
nx = int(sys.argv[1])
except:
nx = 3
try:
ny = int(sys.argv[2])
except:
ny = 3
try:
px = int(sys.argv[3])
except:
px = 2
try:
py = int(sys.argv[4])
except:
py = 2
from caid.cad_geometry import circle as domain
from caid.cad_geometry import cad_geometry, cad_nurbs
from pigasus.fem.metric import metric
from caid.cad_geometry import square as patch
from caid.cad_geometry import square
TYPE = None
#TYPE = "mapping"
#TYPE = "points"
#TYPE = "analytic"
if TYPE is None:
geo = domain(n=[nx,ny],p=[px,py])
# -----------------
if TYPE is not None:
# geo = cad_geometry("domain.xml")
# geo = patch(n=[nx,ny],p=[px,py])
geo_s = square(p=[2,2])
nrb = geo_s[0]
U,V = nrb.knots
#C = nrb.points
C = np.zeros_like(nrb.points)
s = 1./np.sqrt(2)
weights = np.ones((3,3))
weights[1,0] = s
weights[0,1] = s
weights[2,1] = s
weights[1,2] = s
srf = cad_nurbs([U,V], C, weights=weights)
geo = cad_geometry()
geo.append(srf)
geo._internal_faces = geo_s._internal_faces
geo._external_faces = geo_s._external_faces
geo._connectivity = geo_s._connectivity
#
srf = geo[0]
dim = srf.dim
n = [nx,ny]
list_t = []
for axis in range(0,dim):
ub = srf.knots[axis][0]
ue = srf.knots[axis][-1]
list_t.append(np.linspace(ub,ue,n[axis]+2)[1:-1])
p =[px,py]
list_p = []
for axis in range(0,dim):
list_p.append(p[axis] - srf.degree[axis])
geo.refine(list_t=list_t, list_p=list_p)
# -----------------
if TYPE is None:
Metric = None
geo = domain(n=[nx,ny],p=[px,py])
if TYPE == "mapping":
geo_m = domain()
Metric = metric(geometry=geo_m, with_igakit=False)
if TYPE == "analytic":
F = lambda r,t : [r * cos(2. * pi * t), r * sin(2. * pi * t)]
DF = lambda r, t : [cos(2. * pi * t) \
, - 2. * pi * r * sin(2. * pi * t) \
, sin(2. * pi * t) \
, 2. * pi * r * cos(2. * pi * t)]
Metric = metric(analytic=[F,DF])
if TYPE == "points":
lpi_shape = np.asarray([int(x) for x in np.genfromtxt('shape.txt')])
# lpr_points = np.genfromtxt('points_adv.txt').reshape(lpi_shape)
lpr_points = np.genfromtxt('pts_adv.txt').reshape(lpi_shape)
Metric = metric(points = lpr_points)
# ...
# ...
try:
bc_dirichlet
except NameError:
bc_dirichlet = None
else:
pass
try:
bc_neumann
except NameError:
bc_neumann = None
else:
pass
try:
AllDirichlet
except NameError:
AllDirichlet = None
else:
pass
try:
Dirichlet
except NameError:
Dirichlet = None
else:
pass
try:
Metric
except NameError:
Metric = None
else:
pass
# ...
with context():
# ...
PDE = poisson(geometry=geo, bc_dirichlet=bc_dirichlet, bc_neumann=bc_neumann,
AllDirichlet=AllDirichlet, Dirichlet=Dirichlet,metric=Metric)
# ...
# ...
PDE.assembly(f=f)
PDE.solve()
# ...
# ...
normU = PDE.norm(exact=u)
print("norm U = ", normU)
# ...
# ...
if PLOT:
PDE.plot() ; plt.colorbar(); plt.title('$u_h$')
plt.savefig(filename.split('.py')[0]+'.png', format='png')
plt.clf()
# ...
PDE.free()
| mit |
donbright/piliko | experiment/bernoulli/pythberntest.py | 1 | 2265 | from fractions import Fraction as F
import sys
# rational paramterization / approximation of bernoulli's lemniscate
# in a 3 dimensional 'dumbbell' arrangement.
# (note - this uses terms from Norman Wildberger's rational
# trigonometry/chromogeometry. briefly for a vector from 0,0 to x,y:
#
# blue quadrance (x,y) = x^2 + y^2
# red quadrance (x,y) = x^2 - y^2
# green quadrance (x,y) = 2*x*y
# )
# theory:
#
# step one is the rational paramterization of bernoulli's lemniscate
# we found this in pythbern.py
#
# step two is to 'grow' it into three d as some kind of dumbbell shape.
#
# how..? hrm.
#
# consider each 'x' as a 'distance' from origin for generating a circle.
# consider 'y' as the radius of the circle.
# now, draw the circle--- using rational points
# we will end up with a 'stack' of circles in the dumbbell shape
# as though we had sliced the dumbbell.
# imagine
def sqr(x): return x*x
def greenq_pts(x,y,x2,y2): return 2*(x2-x)*(y2-y)
def redq_pts(x,y,x2,y2): return sqr(x2-x)-sqr(y2-y)
def blueq_pts(x,y,x2,y2): return sqr(x2-x)+sqr(y2-y)
def greenq(m,n): return greenq_pts(0,0,m,n)
def redq(m,n): return redq_pts(0,0,m,n)
def blueq(m,n): return blueq_pts(0,0,m,n)
xs,ys,zs=[],[],[]
depth = 10
for m in range(-depth,depth):
for n in range(0,depth):
if redq(m,n)==0: continue
if greenq(m,n)==0: continue
bq = blueq(m,n)
rq = redq(m,n)
gq = greenq(m,n)
for m2 in range(-depth,depth):
for n2 in range(-depth,depth):
if blueq(m2,n2)==0: continue
xdumb = F(bq,rq) * F( 1, blueq( F(bq,rq), F(gq, rq) ) )
y = F(gq,rq) * F( 1, blueq( F(bq,rq), F(gq, rq) ) )
radius = y
ydumb = F(redq(m2,n2),blueq(m2,n2))
zdumb = F(greenq(m2,n2),blueq(m2,n2))
ydumb *= radius
zdumb *= radius
xs += [xdumb]
ys += [ydumb]
zs += [zdumb]
max=max(xs+ys+zs)
for i in range(0,2):
print str(xs[i])+','+str(ys[i])+','+str(zs[i]),
print '....'
for i in range(0,len(xs)):
xs[i] = F( xs[i], max )
ys[i] = F( ys[i], max )
zs[i] = F( zs[i], max )
print len(xs), 'points'
import numpy as np
import matplotlib.pylab as plt
fig,ax = plt.subplots(figsize=(8,8))
ax.set_ylim([-1.2,1.2])
ax.set_xlim([-1.2,1.2])
for i in range(0,len(xs)):
xs[i]=xs[i]+zs[i]/4
ys[i]=ys[i]+zs[i]/4
ax.scatter(xs,ys)
plt.show()
| bsd-3-clause |
zangsir/sms-tools | lectures/08-Sound-transformations/plots-code/stftFiltering-orchestra.py | 3 | 1669 | import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import utilFunctions as UF
import stftTransformations as STFTT
import stft as STFT
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
w = np.hamming(2048)
N = 2048
H = 512
# design a band stop filter using a hanning window
startBin = int(N*500.0/fs)
nBins = int(N*2000.0/fs)
bandpass = (np.hanning(nBins) * 65.0) - 60
filt = np.zeros(N/2+1)-60
filt[startBin:startBin+nBins] = bandpass
y = STFTT.stftFiltering(x, fs, w, N, H, filt)
mX,pX = STFT.stftAnal(x, w, N, H)
mY,pY = STFT.stftAnal(y, w, N, H)
plt.figure(1, figsize=(12, 9))
plt.subplot(311)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX))
plt.title('mX (orchestra.wav)')
plt.autoscale(tight=True)
plt.subplot(312)
plt.plot(fs*np.arange(mX[0,:].size)/float(N), filt, 'k', lw=1.3)
plt.axis([0, fs/2, -60, 7])
plt.title('filter shape')
plt.subplot(313)
numFrames = int(mY[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mY[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mY))
plt.title('mY')
plt.autoscale(tight=True)
plt.tight_layout()
UF.wavwrite(y, fs, 'orchestra-stft-filtering.wav')
plt.savefig('stftFiltering-orchestra.png')
plt.show()
| agpl-3.0 |
r-mart/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
cbrunet/fibermodes | scripts/cutoff.py | 2 | 8962 | # This file is part of FiberModes.
#
# FiberModes is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FiberModes is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FiberModes. If not, see <http://www.gnu.org/licenses/>.
"""This file contains the code to generate plots used in the JLT paper
about cutoff of three-layer step-index fibers.
"""
from fibermodes import FiberFactory, Simulator, Mode, ModeFamily
from itertools import zip_longest
import numpy
from matplotlib import pyplot
from matplotlib.patches import Rectangle
import seaborn as sns
from math import sqrt
FIBERS = [
("Fiber (a)", [4e-6, 6e-6], [1.47, 1.43, 1.44]),
("Fiber (b)", [4e-6, 6e-6], [1.47, 1.45, 1.44]),
("Fiber (c)", [4e-6, 6e-6], [1.43, 1.47, 1.44]),
("Fiber (d)", [4e-6, 6e-6], [1.45, 1.47, 1.44]),
("Fiber (e)", [4e-6, 6e-6], [1.44, 1.47, 1.44]),
]
VLIM = (2.5, 7.0)
MODE_COLORS = {
"HE(1,1)": sns.xkcd_rgb['mid blue'],
"LP(0,1)": sns.xkcd_rgb['mid blue'],
"TE(0,1)": sns.xkcd_rgb['orange'],
"HE(2,1)": sns.xkcd_rgb['bright sky blue'],
"LP(1,1)": sns.xkcd_rgb['bright sky blue'],
"TM(0,1)": sns.xkcd_rgb['red'],
"EH(1,1)": sns.xkcd_rgb['darkish green'],
"HE(3,1)": sns.xkcd_rgb['purplish blue'],
"LP(2,1)": sns.xkcd_rgb['purplish blue'],
"EH(2,1)": sns.xkcd_rgb['bluish green'],
"HE(4,1)": sns.xkcd_rgb['fuchsia'],
"LP(3,1)": sns.xkcd_rgb['fuchsia'],
"EH(3,1)": sns.xkcd_rgb['leafy green'],
"HE(5,1)": sns.xkcd_rgb['neon pink'],
"LP(4,1)": sns.xkcd_rgb['neon pink'],
"EH(4,1)": sns.xkcd_rgb['bright olive'],
"HE(6,1)": sns.xkcd_rgb['rosy pink'],
"LP(5,1)": sns.xkcd_rgb['rosy pink'],
"EH(5,1)": sns.xkcd_rgb['darkish green'],
"HE(7,1)": sns.xkcd_rgb['purplish blue'],
"LP(6,1)": sns.xkcd_rgb['purplish blue'],
"EH(6,1)": sns.xkcd_rgb['bluish green'],
"HE(8,1)": sns.xkcd_rgb['fuchsia'],
"LP(7,1)": sns.xkcd_rgb['fuchsia'],
"EH(7,1)": sns.xkcd_rgb['leafy green'],
"HE(9,1)": sns.xkcd_rgb['neon pink'],
"LP(8,1)": sns.xkcd_rgb['neon pink'],
"EH(8,1)": sns.xkcd_rgb['bright olive'],
"HE(10,1)": sns.xkcd_rgb['rosy pink'],
"LP(9,1)": sns.xkcd_rgb['rosy pink'],
"HE(1,2)": sns.xkcd_rgb['deep sky blue'],
"LP(0,2)": sns.xkcd_rgb['deep sky blue'],
"TE(0,2)": sns.xkcd_rgb['browny orange'],
"HE(2,2)": sns.xkcd_rgb['true blue'],
"LP(1,2)": sns.xkcd_rgb['true blue'],
"TM(0,2)": sns.xkcd_rgb['blood red'],
"EH(1,2)": sns.xkcd_rgb['evergreen'],
"HE(3,2)": sns.xkcd_rgb['bright violet'],
"LP(2,2)": sns.xkcd_rgb['bright violet'],
"LP(0,3)": sns.xkcd_rgb['turquoise blue'],
}
FIRSTMODES = (
Mode(ModeFamily.TE, 0, 1),
Mode(ModeFamily.HE, 2, 1),
Mode(ModeFamily.TM, 0, 1),
Mode(ModeFamily.EH, 1, 1),
Mode(ModeFamily.HE, 3, 1),
Mode(ModeFamily.HE, 1, 2),
)
def plot_b_vs_V(vectorial=True, scalar=False):
nf = len(FIBERS)
fig, axes = pyplot.subplots(nf, 1, sharex=False, sharey=False,
subplot_kw={'xlim': VLIM, 'ylim': (0, 0.3)},
figsize=(6, 9))
sns.despine(fig)
lines = {}
for i, (name, r, n) in enumerate(FIBERS):
axes[i].set_title(name)
f = FiberFactory()
for (r_, n_) in zip_longest(r, n):
f.addLayer(radius=r_, index=n_)
fiber = f[0]
V = numpy.linspace(*VLIM)
wl = [fiber.toWl(v) for v in V[::-1]]
sim = Simulator(f, wl, vectorial=vectorial, scalar=scalar, delta=1e-5)
co = next(sim.cutoff())
b = next(sim.b())
assert len(b) == len(wl)
for mode, cutoff in co[0].items():
if cutoff == 0:
continue # skip HE(1,1) / LP(0,1)
color = MODE_COLORS[str(mode)]
axes[i].axvline(cutoff, ls=':', color=color)
b_ = numpy.empty(len(wl))
for j, b__ in enumerate(b):
b_[j] = b__.get(mode, float("nan"))
lines[mode], = axes[i].plot(V[::-1], b_, color=color)
if i == 1 and vectorial is True: # fiber b
r = Rectangle((4.6, 0), 0.8, 0.04, alpha=.3, facecolor='grey')
axes[i].add_patch(r)
handles = [lines[k] for k in sorted(lines)]
labels = [str(k) for k in sorted(lines)]
leg = fig.legend(handles, labels, loc='upper left',
bbox_to_anchor=(0.18, 1), frameon=True)
frame = leg.get_frame()
frame.set_linewidth(0)
fig.text(0.04, 0.5, "Normalized propagation constant ($b$)",
rotation='vertical', ha='center', va='center')
axes[-1].set_xlabel("Normalized frequency ($V_0$)")
fig.tight_layout(rect=(0.04, 0, 1, 1))
def plot_zoom(fiber, vlim=(4.6, 5.4), blim=(0, 0.04)):
fig = pyplot.figure(figsize=(6, 5))
ax = fig.add_subplot(111, xlim=vlim, ylim=blim)
sns.despine(fig)
name, r, n = fiber
ax.set_title(name)
f = FiberFactory()
for (r_, n_) in zip_longest(r, n):
f.addLayer(radius=r_, index=n_)
fiber = f[0]
V = numpy.linspace(*vlim)
wl = [fiber.toWl(v) for v in V[::-1]]
sim = Simulator(f, wl, delta=1e-7)
co = next(sim.cutoff())
b = next(sim.b())
for mode, cutoff in co[0].items():
if cutoff == 0:
continue # skip HE(1,1) / LP(0,1)
color = MODE_COLORS[str(mode)]
ax.axvline(cutoff, ls=':', color=color)
b_ = numpy.empty(len(wl))
for j, b__ in enumerate(b):
b_[j] = b__.get(mode, float("nan"))
ax.plot(V[::-1], b_, color=color,
label=str(mode) if mode.nu in (1, 3) else None)
ax.set_ylabel("Normalized propagation constant ($b$)")
ax.set_xlabel("Normalized frequency ($V_0$)")
ax.legend(loc='best')
fig.tight_layout()
def plot_var(n1, n2, n3, vlim, modes=None, mmax=None, numax=None, colors=None):
f = FiberFactory()
f.addLayer(radius=4e-6, index=n1)
f.addLayer(radius=6e-6, index=n2)
f.addLayer(index=n3)
wl = 800e-9
if modes is not None:
numax = max(m.nu for m in modes)
mmax = max(m.m for m in modes)
sim = Simulator(f, wl, delta=1e-5, numax=numax, mmax=mmax)
co = list(sim.cutoff())
if modes is None:
modes = set()
for m_ in sim.modes():
modes |= m_[0]
fig = pyplot.figure(figsize=(6, 5))
ax = fig.add_subplot(111, xlim=vlim, ylim=(1.2, 1.8))
sns.despine(fig)
if hasattr(n1, '__iter__'):
yl = 'Index of center layer ($n_1$)'
n = n1
on = n2
var = 1
else:
yl = 'Index of middle layer ($n_2$)'
n = n2
on = n1
var = 2
na = sqrt(on**2 - n3*n3)
lines = {}
for mode in modes:
co_ = numpy.empty(len(n))
for i, co__ in enumerate(co):
co_[i] = co__[0].get(mode, float("nan"))
nm = max(n[i], on)
if n[i] == n3 and var == 2:
co_[i] *= 6 / 4
else:
co_[i] *= na / sqrt(nm*nm - n3*n3)
if colors:
color = colors[mode.m][mode.nu]
else:
color = MODE_COLORS[str(mode)]
lines[mode], = ax.plot(co_, n, color=color, label=str(mode))
ax.axhline(1.4, ls='--', color='k')
ax.axhline(1.6, ls='--', color='k')
ax.axhspan(1.2, 1.4, color='grey', alpha=0.6)
ax.axhspan(1.4, 1.6, color='grey', alpha=0.4)
ax.axhspan(1.6, 1.8, color='grey', alpha=0.2)
ax.set_ylabel(yl)
ax.set_xlabel("Normalized frequency ($V_0$)")
if colors:
m = [Mode("TE", 0, 1), Mode("HE", 1, 2), Mode("HE", 1, 3)]
handles = [lines[m_] for m_ in m]
labels = ["$m=1$", "$m=2$", "$m=3$"]
ax.legend(handles, labels, loc='best')
else:
ax.legend(loc='best')
fig.tight_layout()
if __name__ == '__main__':
sns.set_style("ticks")
# plot_b_vs_V() # veccutoff.pdf
# plot_b_vs_V(vectorial=False, scalar=True) # lpcutoff.pdf
# plot_zoom(FIBERS[1]) # fiberbzoom.pdf
COLORS = [[],
sns.color_palette("Blues_r"),
sns.color_palette("Reds_r"),
sns.color_palette("Greens_r")]
# plot_var(numpy.linspace(1.2, 1.8, 31), 1.6, 1.4,
# (1, 8), FIRSTMODES) # centervar
plot_var(numpy.linspace(1.2, 1.8), 1.6, 1.4,
(0, 25), mmax=3, numax=5, colors=COLORS)
# plot_var(1.6, numpy.linspace(1.2, 1.8, 31), 1.4,
# (1, 8), FIRSTMODES) # ringvar
pyplot.show()
| gpl-3.0 |
spallavolu/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
fatadama/estimation | filters/python/problems/benchmark/benchmark_ekf.py | 1 | 5122 | import sys
import numpy as np
import scipy.integrate as sp
import matplotlib.pyplot as plt
sys.path.append('../../ekf')
import ekf
# system constants
sigma_y1 = 0.01;
sigma_y2 = 0.01;
## vicon2xhatInitial(yk) - returns initial state and covariance estimate based on a single initial vicon measurement
# @param yk [x,y,z,b0,b1,b2,b3] vicon measurement of position and attitude (quaternion)
def vicon2xhatInitial(yk):
# xhat: [(x,y,z)_inertial (v1,v2,v3)_body, quat_body]
xhat = np.zeros(10)
# initialize position to vicon
xhat[0:3] = yk[0:3].copy()
# initialize attitude to vicon
xhat[6:10] = yk[3:7].copy()
# velocity is already zero
var_x = 0.001*0.001#meters^2
var_vel = 0.1*0.1
var_quat = 0.01*0.01
# covariance estimate
Pk = np.diag([var_x, var_x, var_x, var_vel, var_vel, var_vel, var_quat, var_quat, var_quat, var_quat])
Pk = Pk + 1e-12*np.ones((10,10))
return (xhat,Pk)
## stateDerivative(x,t,u) - returns the derivative of the filter state at a given time, for particular control values
#
# @param x state given by ( position(inertial), velocity(body frame), quaternion )
# @param t current time
# @param u "control" given by (accelerometer measurement, gyro measurement, gravity constant)
def stateDerivative(x,t,u):
f = np.zeros((2,))
f[0] = x[1]
f[1] = -2.0*(1.5/1.0)*(x[0]*x[0]-1.0)*x[1]-(1.2/1.0)*x[0]
return f
def stateDerivativeEKF(x,t,u):
f = np.zeros((2,))
f[0] = x[1]
f[1] = -2.0*(1.0/1.0)*(x[0]*x[0]-1.0)*x[1]-(1.0/1.0)*x[0]
return f
## stateGradient(x,t,u) - returns the gradient of the derivative of the filter state w.r.t. the filter state
#
# @param x state given by ( position(inertial), velocity(body frame), quaternion )
# @param t current time
# @param u "control" given by (accelerometer measurement, gyro measurement, gravity constant)
def stateGradient(x,t,u):
Fk = np.zeros((2,2))
Fk = np.array([ [0.0,1.0],[-4.0*x[0]*x[1]-1.0,-2.0*(x[0]*x[0]-1.0)] ])
return Fk
## stateProcessInfluence(x,t,u) - returns the process noise matrix on the derivative of the filter state
#
# @param x state given by ( position(inertial), velocity(body frame), quaternion )
# @param t current time
# @param u "control" given by (accelerometer measurement, gyro measurement, gravity constant)
def stateProcessInfluence(x,t,u):
# numerically validated for one case
Gk = np.array([ [0.0],[1.0] ])
return Gk
## measFunction(x,t) - returns the expectation of a vicon measurement, given the state and time
#
# @param x state given by ( position(inertial), velocity(body frame), quaternion )
# @param t current time
def measFunction(x,t):
yexp = np.array([ x[0]])
return yexp
## viconGradient(x,t) - returns the gradient of the expectation given the state and time
#
# @param x state given by ( position(inertial), velocity(body frame), quaternion )
# @param t current time
def measGradient(x,t):
Hk = np.array([ [1.0,0.0] ])
return Hk
def initFunction(yinit):
return (np.array([ yinit[0],0.0 ]),np.identity(2)*1000.0+1e-6*np.ones((2,2)))
def simMeasurementFunction(xk,t):
ymeas = np.array([ xk[0]+ np.random.normal(0.0,sigma_y1) ])
return ymeas
def main(argin='./'):
# output file
FOUT = open('python_benchmark.csv','w')
FOUT.write('t,x1,x2,ymeas1,ymeas2,x1hat,x2hat,P11,P22\n');
# initialize EKF
Qkin = np.array([[0.2]])
#Qkin = np.array([[20.0]])
EKF = ekf.ekf(2,1,stateDerivativeEKF,stateGradient,stateProcessInfluence,Qk = Qkin)
Rkin = np.array([ [sigma_y1*sigma_y1] ])
dt = 0.01
tfin = 10.0
nSteps = int(tfin/dt)
tsim = 0.0
xk = np.array([1.0,0.0])
yk = measFunction(xk,tsim)
EKF.init(yk,initFunction,tsim)
print(nSteps)
xkl = np.zeros((nSteps,2))
xtl = np.zeros((nSteps,2))
Pkl = np.zeros((nSteps,2))
tl = np.zeros((nSteps,))
for k in range(nSteps):
# propagte
EKF.propagateRK4(dt)
# simulate
y = sp.odeint(stateDerivative,xk,np.array([tsim,tsim+dt]),args=([],) )
xk = y[-1,:].copy()
# update time
tsim = tsim + dt
# measurement
ymeas = simMeasurementFunction(xk,tsim)
# update EKF
EKF.update(tsim,ymeas,measFunction,measGradient,Rkin)
# log to file
FOUT.write('%f,%f,%f,%f,%f,%f,%f,%f\n' % (tsim,xk[0],xk[1],ymeas[0],EKF.xhat[0],EKF.xhat[1],EKF.Pk[0,0],EKF.Pk[1,1]) )
# log to data
xkl[k,0] = EKF.xhat[0]
xkl[k,1] = EKF.xhat[1]
xtl[k,0] = xk[0]
xtl[k,1] = xk[1]
Pkl[k,0] = EKF.Pk[0,0]
Pkl[k,1] = EKF.Pk[1,1]
tl[k] = tsim
print("Completed sim")
fig = plt.figure()
ax = []
for k in range(2):
nam = 'e'+str(k+1)
ax.append(fig.add_subplot(2,1,k+1,ylabel=nam))
ax[k].plot(tl,xkl[:,k]-xtl[:,k])
ax[k].plot(tl,3*np.sqrt(Pkl[:,k]),'r--')
ax[k].plot(tl,-3*np.sqrt(Pkl[:,k]),'r--')
ax[k].grid()
fig.show()
raw_input("Return to continue")
FOUT.close()
return
if __name__ == '__main__':
main()
| gpl-2.0 |
tejaskhot/deep-learning | conv_cifar/scripts/conv_destin.py | 6 | 9175 | """
@author: Tejas Khot
@contact: [email protected]
"""
__author__='tejas'
import os
from time import time
import cPickle as pickle
from destin.load_data import *
from destin.network import *
import nnet.datasets as ds
from sklearn import svm
t_0 = time()
# *****Define Parameters for the Network and nodes
# Network Params
num_layers = 4
patch_mode = 'Adjacent'
image_type = 'Color'
network_mode = True
cifar_stat=[]
# For a Node: specify Your Algorithm Choice and Corresponding parameters
# ******************************************************************************************
#
# Incremental Clustering
#
num_nodes_per_layer = [[8, 8], [4, 4], [2, 2], [1, 1]]
num_cents_per_layer = [50, 25, 25 ,50]
pool_size = [(16,1),(2,2),(2,2),(1,1)] #pooling size: The first number is the number of vector
#you want to pool. For example, (64,1) will pool all the
#vector in the first layer. (16,1) will divide the first layer
#in to 4 quarters and pool each of them. (4,1) will divide the
#first layer in to 16th pieces and pool each of them
print "Uniform DeSTIN with Clustering"
algorithm_choice = 'Clustering'
alg_params = {'mr': 0.01, 'vr': 0.01, 'sr': 0.001, 'DIMS': [],
'CENTS': [], 'node_id': [],
'num_cents_per_layer': num_cents_per_layer}
# ******************************************************************************************
#Load Data, 10 loads 5 batches in total 50,000
# 1 to 5 load batch_1 to batch_5training images, 1 to five
# Declare a Network Object and load Training Data
DESTIN = Network( num_layers, algorithm_choice, alg_params, num_nodes_per_layer, cifar_stat , patch_mode, image_type,)
#, , , , cifar_stat, patch_mode='Adjacent', image_type='Color'
DESTIN.setmode(network_mode)
DESTIN.set_lowest_layer(0)
# Load Data
# Modify the location of the training data in file "load_data.py"
# data = np.random.rand(5,32*32*3)
# Initialize Network; there is is also a layer-wise initialization option
DESTIN.init_network()
train_names=np.arange(0,476,25)
"""
#Train the Network
print "DeSTIN Training/with out Feature extraction"
for epoch in range(5):
counter=0
if epoch==0:
k=16
else:
k=0
for num in train_names[k:]:
data=load_train(num)
for I in range(data.shape[0]): # For Every image in the data set batch
if counter % 1000 == 0:
print("Training Iteration Image Number : %d" % counter)
for L in range(DESTIN.number_of_layers):
if L == 0:
img=data[I][:].reshape(50, 38, 38)
img=img.swapaxes(0,1).swapaxes(1,2) ## (38, 38, 50)
img=img[3:-3, 3:-3, :] ## (32, 32, 50)
# This is equivalent to sharing centroids or kernels
DESTIN.layers[0][L].load_input(img, [4, 4])
DESTIN.layers[0][L].do_layer_learning()
#DESTIN.layers[0][L].shared_learning()
else:
DESTIN.layers[0][L].load_input(DESTIN.layers[0][L - 1].nodes, [2, 2])
DESTIN.layers[0][L].do_layer_learning()
#DESTIN.layers[0][L].shared_learning()
if counter>0 and counter % 10000==0:
try:
pickle.dump( DESTIN, open( "DESTIN_conv_"+ str(epoch)+"_"+str(counter), "wb" ) )
print "Pickled DeSTIN till ", counter
except:
print "Could not pickle DeSTIN"
counter+=1
print "Epoch " + str(epoch+1) + " completed"
try:
pickle.dump( DESTIN, open( "DESTIN_conv", "wb" ) )
print "Pickled DeSTIN "
except:
print "Could not pickle DeSTIN"
print "done with destin training network"
"""
DESTIN=pickle.load( open( "DESTIN_conv", "rb" ) )
print("DeSTIN running | Feature Extraction over the Training Data")
network_mode = False
DESTIN.setmode(network_mode)
# Testing it over the training set
"""
if not os.path.exists('train'):
os.makedirs('train')
counter=29800
k=11
for num in train_names[k:]:
if num==train_names[11]:
data=load_train(num)[2300:]
else:
data=load_train(num)
for I in range(data.shape[0]): # For Every image in the data set
if counter % 1000 == 0:
print("Testing Iteration Number : Completed till Image: %d" % counter)
for L in range(DESTIN.number_of_layers):
if L == 0:
img=data[I][:].reshape(50, 38, 38)
img=img.swapaxes(0,1).swapaxes(1,2) ## (38, 38, 50)
img=img[3:-3, 3:-3, :] ## (32, 32, 50)
DESTIN.layers[0][L].load_input(img, [4, 4])
DESTIN.layers[0][L].do_layer_learning()
else:
DESTIN.layers[0][L].load_input(DESTIN.layers[0][L - 1].nodes, [2, 2])
DESTIN.layers[0][L].do_layer_learning()
DESTIN.update_belief_exporter(pool_size, True ,'average_exc_pad') #( maxpool_shape , ignore_border, mode)
if counter in range(199, 50999, 200):
Name = 'train/' + str(counter) + '.txt'
#file_id = open(Name, 'w')
np.savetxt(Name, np.array(DESTIN.network_belief['belief']))
#file_id.close()
# Get rid-off accumulated training beliefs
DESTIN.clean_belief_exporter()
counter+=1
print("Feature Extraction with the test set")
if not os.path.exists('test'):
os.makedirs('test')
test_names=np.arange(0,76,25)
counter=0
for num in test_names:
data=load_test(num)
for I in range(data.shape[0]): # For Every image in the data set
if counter % 1000 == 0:
print("Testing Iteration Number : Completed till Image: %d" % (counter))
for L in range(DESTIN.number_of_layers):
if L == 0:
img=data[I][:].reshape(50, 38, 38)
img=img.swapaxes(0,1).swapaxes(1,2) ## (38, 38, 50)
img=img[3:-3, 3:-3, :] ## (32, 32, 50)
DESTIN.layers[0][L].load_input(img, [4, 4])
DESTIN.layers[0][L].do_layer_learning() # Calculates belief for
else:
DESTIN.layers[0][L].load_input(DESTIN.layers[0][L - 1].nodes, [2, 2])
DESTIN.layers[0][L].do_layer_learning()
DESTIN.update_belief_exporter(pool_size, True ,'average_exc_pad')
if counter in range(199, 10199, 200):
Name = 'test/' + str(counter + 1) + '.txt'
np.savetxt(Name, np.array(DESTIN.network_belief['belief']))
# Get rid-off accumulated training beliefs
DESTIN.clean_belief_exporter()
counter+=1
del data
"""
print "Training With SVM"
print("Loading training and test labels")
trainData, trainLabel, testData, testLabel=ds.load_CIFAR10("/home/ubuntu/destin/cifar-10-batches-py")
del trainData
del testData
# Load Training and Test Data/Extracted from DeSTIN
# here we do not use the whole set of feature extracted from DeSTIN
# We use the features which are extracted from the top few layers
print("Loading training and testing features")
I = 199
Name = 'train/' + str(I) + '.txt'
trainData = np.ravel(np.loadtxt(Name))
for I in range(399, 50000, 200):
Name = 'train/' + str(I) + '.txt'
file_id = open(Name, 'r')
Temp = np.ravel(np.loadtxt(Name))
trainData = np.hstack((trainData, Temp))
del Temp
Len = np.shape(trainData)[0]
Size = np.size(trainData)
print "Training data shape is : ", trainData.shape
Width = Len/50000
print Len
print Width*50000
trainData = trainData.reshape((50000, Width))
# Training SVM
SVM = svm.LinearSVC(C=1)
# C=100, kernel='rbf')
print "Training the SVM"
trainLabel = np.squeeze(np.asarray(trainLabel).reshape(50000, 1))
#print trainData
SVM.fit(trainData, trainLabel)
print("Training Score = %f " % float(100 * SVM.score(trainData, trainLabel)))
#print("Training Accuracy = %f" % (SVM.score(trainData, trainLabel) * 100))
eff = {}
eff['train'] = SVM.score(trainData, trainLabel) * 100
del trainData
testData = np.array([])
print("Loading training and testing features")
I = 399
Name = 'test/' + str(I + 1) + '.txt'
testData = np.ravel(np.loadtxt(Name))
for I in range(599, 10000, 200):
Name = 'test/' + str(I + 1) + '.txt'
file_id = open(Name, 'r')
Temp = np.ravel(np.loadtxt(Name))
testData = np.hstack((testData, Temp))
del Temp
Len = np.shape(testData)[0]
Size = np.size(testData)
I = 399
Name = 'test/' + str(I + 1) + '.txt'
testData1 = np.ravel(np.loadtxt(Name))
print np.shape(testData1)[0]/200.0
Width = np.float(Len)/9800.0
print Len
print Size
testData = testData.reshape((9800, Width))
print "Predicting Test samples"
print("Test Score = %f" % float(100 * SVM.score(testData, testLabel[200:10000])))
#print("Training Accuracy = %f" % (SVM.score(testData, testLabel) * 100))
eff['test'] = SVM.score(testData, testLabel[200:10000]) * 100
io.savemat('accuracy.mat', eff)
print "Total time taken: ", time()-t_0
| gpl-2.0 |
cancan101/tensorflow | tensorflow/examples/learn/text_classification_cnn.py | 53 | 4430 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
def cnn_model(features, target):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
target = tf.one_hot(target, 15, 1, 0)
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
word_vectors, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1],
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = learn.Estimator(model_fn=cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
phoebe-project/phoebe2-docs | 2.1/tutorials/building_a_system.py | 1 | 7498 | #!/usr/bin/env python
# coding: utf-8
# Building a System
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# From now on, we'll just quickly do common setup at the beginning of each tutorial.
# For full gory details on the general concepts here, make sure to read [General Concepts](general_concepts).
#
# We'll always start by doing our basic imports, setting up a logger, and initializing
# an empty Bundle.
# In[1]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.Bundle()
# Default Systems
# ------------------------
#
# Although the default empty Bundle doesn't include a system, there are available
# constructors that create default systems. To create a simple binary with component tags
# 'binary', 'primary', and 'secondary' (as above), you could call [default_binary](../api/phoebe.frontend.bundle.Bundle.default_binary.md):
# In[2]:
b = phoebe.Bundle.default_binary()
# or for short:
# In[3]:
b = phoebe.default_binary()
# In[4]:
print b.hierarchy
# To build the same binary but as a contact system, you would call:
# In[5]:
b = phoebe.default_binary(contact_binary=True)
# In[6]:
print b.hierarchy
# For more details on dealing with contact binary systems, see the [Contact Binary Example Script](../examples/minimal_contact_binary.ipynb)
# Adding Components Manually
# --------------------
#
# By default, an empty [Bundle](../api/phoebe.frontend.bundle.Bundle.md) does not contain any information about our system.
#
# So, let's first start by adding a few stars. Here we'll call the generic [add_component](../api/phoebe.frontend.bundle.Bundle.add_component.md) method. This method works for any type of component in the system - stars, orbits, planets, disks, rings, spots, etc. The first argument needs to be a callable or the name of a callable in [phoebe.parameters.component](../api/phoebe.parameters.component.md) which include the following options:
#
# * orbit
# * star
# * envelope
#
# add_component also takes a keyword argument for the 'component' tag. Here we'll give them component tags 'primary' and 'secondary' - but note that these are merely convenience labels and do not hold any special roles. Some tags, however, are forbidden if they clash with other tags or reserved values - so if you get error stating the component tag is forbidden, try using a different string.
# In[7]:
b = phoebe.Bundle()
# In[8]:
b.add_component(phoebe.component.star, component='primary')
b.add_component('star', component='secondary')
# But there are also shortcut methods for [add_star](../api/phoebe.frontend.bundle.Bundle.add_star.md) and [add_orbit](../api/phoebe.frontend.bundle.Bundle.add_orbit.md). In these cases you don't need to provide the function, but only the component tag of your star/orbit.
#
# Any of these functions also accept values for any of the qualifiers of the created parameters.
# In[9]:
b.add_star('extrastarforfun', teff=6000)
# Here we call the add_component method of the bundle with several arguments:
#
# * a function (or the name of a function) in phoebe.parameters.component. This
# function tells the bundle what parameters need to be added.
# * component: the tag that we want to give this component for future reference.
# * any additional keyword arguments: you can also provide initial values for Parameters
# that you know will be created. In the last example you can see that the
# effective temperature will already be set to 6000 (in default units which is K).
#
# and then we'll do the same to add an orbit:
# In[10]:
b.add_orbit('binary')
# Defining the Hierarchy
# ---------------------------------
#
# At this point all we've done is add a bunch of Parameters to our Bundle, but
# we still need to specify the hierarchical setup of our system.
#
# Here we want to place our two stars (with component tags 'primary' and 'secondary') in our
# orbit (with component tag 'binary'). This can be done with several different syntaxes sent to [b.set_hierarchy](../api/phoebe.frontend.bundle.Bundle.set_hierarchy.md):
# In[11]:
b.set_hierarchy(phoebe.hierarchy.binaryorbit, b['binary'], b['primary'], b['secondary'])
# or
# In[12]:
b.set_hierarchy(phoebe.hierarchy.binaryorbit(b['binary'], b['primary'], b['secondary']))
# If you access the value that this set via [get_hierarchy](../api/phoebe.frontend.bundle.Bundle.get_hierarchy.md), you'll see that it really just resulted
# in a simple string representation:
# In[13]:
b.get_hierarchy()
# We could just as easily have used this string to set the hierarchy:
# In[14]:
b.set_hierarchy('orbit:binary(star:primary, star:secondary)')
# If at any point we want to flip the primary and secondary components or make
# this binary a triple, its seriously as easy as changing this hierarchy and
# everything else will adjust as needed (including cross-ParameterSet constraints,
# and datasets)
# The Hierarchy Parameter
# -----------------------------
#
# Setting the hierarchy just sets the value of a single parameter (although it may take some time because it also does a lot of paperwork and manages constraints between components in the system). You can access that parameter as usual:
# In[15]:
b['hierarchy@system']
# or through any of these shortcuts:
# In[16]:
b.get_hierarchy()
# In[17]:
b.hierarchy
# This [HierarchyParameter](../api/phoebe.parameters.HierarchyParameter.md) then has several methods unique to itself. You can, for instance, list the component tags of all the stars or orbits in the hierarchy via [get_stars](../api/phoebe.parameters.HierarchyParameter.get_stars.md) or [get_orbits](../api/phoebe.parameters.HierarchyParameter.get_orbits.md), respectively:
# In[18]:
print b.hierarchy.get_stars()
# In[19]:
print b.hierarchy.get_orbits()
# Or you can ask for the component tag of the top-level item in the hierarchy via [get_top](../api/phoebe.parameters.HierarchyParameter.get_top.md).
# In[20]:
print b.hierarchy.get_top()
# And request the parent, children, child, or sibling of any item in the hierarchy via [get_parent_of](../api/phoebe.parameters.HierarchyParameter.get_parent_of.md), [get_children_of](../api/phoebe.parameters.HierarchyParameter.get_children_of.md), or [get_sibling_of](../api/phoebe.parameters.HierarchyParameter.get_sibling_of.md).
# In[21]:
print b.hierarchy.get_parent_of('primary')
# In[22]:
print b.hierarchy.get_children_of('binary')
# In[23]:
print b.hierarchy.get_child_of('binary', 0) # here 0 means primary component, 1 means secondary
# In[24]:
print b.hierarchy.get_sibling_of('primary')
# We can also check whether a given component (by component tag) is the primary or secondary component in its parent orbit via [get_primary_or_secondary](../api/phoebe.parameters.HierarchyParameter.get_primary_or_secondary.md). Note that here its just a coincidence (although on purpose) that the component tag is also 'secondary'.
# In[25]:
print b.hierarchy.get_primary_or_secondary('secondary')
# Next
# ----------
#
# Next up: let's learn about [saving and loading](saving_and_loading.ipynb).
# In[ ]:
| gpl-3.0 |
jorik041/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
JNero/Machine-Learning-in-action | DicisionTree/treePlotter.py | 1 | 3722 | import matplotlib.pyplot as plt
decisionNode = dict(boxstyle='sawtooth', fc='0.8')
leafNode = dict(boxstyle='round4', fc='0.8')
arrow_args = dict(arrowstyle="<-")
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction', xytext=centerPt, textcoords='axes fraction',
va='center', ha='center', bbox=nodeType, arrowprops=arrow_args)
def createPlot():
fig = plt.figure(1, facecolor='white')
fig.clf()
createPlot.ax1 = plt.subplot(111, frameon=False)
plotNode(U'决策节点', (0.5, 0.1), (0.1, 0.5), decisionNode)
plotNode(U'叶节点', (0.8, 0.1), (0.3, 0.8), leadNode)
plt.show()
def getNumLeafs(myTree):
m = numLeafs = 0
firstStr = list(myTree.keys())[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
numLeafs += getNumLeafs(secondDict[key])
else:
numLeafs += 1
return numLeafs
def getTreeDepth(myTree):
maxDepth = 0
firstStr = list(myTree.keys())[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
thisDepth = 1+getTreeDepth(secondDict[key])
else:
thisDepth = 1
if thisDepth > maxDepth:
maxDepth = thisDepth
return maxDepth
def retrieveTree(i):
listOfTrees = [{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},
{'no surfacing': {
0: 'no', 1: {'flippers': {0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}
]
return listOfTrees[i]
def plotMidText(cntrPt, parentPt, txtString):
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(
xMid, yMid, txtString, va="center", ha="center", rotation=30)
# if the first key tells you what feat was split on
def plotTree(myTree, parentPt, nodeTxt):
numLeafs = getNumLeafs(myTree) # this determines the x width of this tree
depth = getTreeDepth(myTree)
# the text label for this node should be this
firstStr = list(myTree.keys())[0]
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs)) /
2.0/plotTree.totalW, plotTree.yOff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keys():
# test to see if the nodes are dictonaires, if not they are leaf nodes
if type(secondDict[key]).__name__ == 'dict':
plotTree(secondDict[key], cntrPt, str(key)) # recursion
else: # it's a leaf node print the leaf node
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(
secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
# if you do get a dictonary you know it's a tree, and the first element
# will be another dict
def createPlot(inTree):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
createPlot.ax1 = plt.subplot(111, frameon=False, **axprops) # no ticks
# createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW
plotTree.yOff = 1.0
plotTree(inTree, (0.5, 1.0), '')
plt.show()
| apache-2.0 |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/cluster/tests/test_mean_shift.py | 48 | 3653 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_, ms2.cluster_centers_)
assert_array_equal(ms1.labels_, ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
HiSPARC/topaz | 140226_detector_overlap/detector_overlap.py | 1 | 2061 | """Determine area overlap/miss between a fixed square and rotating rectangle
For Groundparticle simulations
How much area overlap is there between a fixed square with surface .5 m^2
and a rectangle with sides 1m by 0.5m with same origin but rotated.
"""
import math
from random import uniform
import numpy as np
from matplotlib import pyplot as plt
from pointrect import Point, Rect
def run():
square_size = (math.sqrt(2) / 2.) / 2.
detector_long = 1. / 2.
detector_short = 0.5 / 2.
square = Rect(Point(-square_size, -square_size),
Point(square_size, square_size))
angles = np.linspace(0, np.pi / 2., 50)
overlap = []
n = 200000
for i, angle in enumerate(angles):
plt.figure()
xin = []
xout = []
yin = []
yout = []
count = 0
for _ in xrange(n):
long = uniform(-detector_long, detector_long)
short = uniform(-detector_short, detector_short)
point = Point(long, short).rotate(angle)
if square.contains(point):
xin.append(point.x)
yin.append(point.y)
count += 1
else:
xout.append(point.x)
yout.append(point.y)
overlap.append(count / (2. * n))
plt.text(.4, .6, ('Angle: %.2f deg\nOverlap: %.3f m**2' %
(np.degrees(angle), overlap[-1])), ha='left')
plt.scatter(xin, yin, s=2, c='black')
plt.scatter(xout, yout, s=2, c='r')
plt.plot([-square_size, -square_size, square_size, square_size, -square_size],
[-square_size, square_size, square_size, -square_size, -square_size])
plt.xlabel('x (m)')
plt.xlabel('y (m)')
plt.title('Overlap between straight square and rotated rectangle')
plt.axis('equal')
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.savefig('plots/rotated_%d.png' % i)
plt.figure()
plt.plot(angles, overlap)
plt.show()
if __name__ == "__main__":
run()
| gpl-3.0 |
bthirion/nipy | examples/labs/histogram_fits.py | 4 | 3236 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
Example of a script that perfoms histogram analysis of an activation
image, to estimate activation Z-score with various heuristics:
* Gamma-Gaussian model
* Gaussian mixture model
* Empirical normal null
This example is based on a (simplistic) simulated image.
Needs matplotlib
"""
# Author : Bertrand Thirion, Gael Varoquaux 2008-2009
print(__doc__)
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
import nipy.labs.utils.simul_multisubject_fmri_dataset as simul
import nipy.algorithms.statistics.empirical_pvalue as en
###############################################################################
# simulate the data
shape = (60, 60)
pos = 2 * np.array([[6, 7], [10, 10], [15, 10]])
ampli = np.array([3, 4, 4])
dataset = simul.surrogate_2d_dataset(n_subj=1, shape=shape, pos=pos,
ampli=ampli, width=10.0).squeeze()
fig = plt.figure(figsize=(12, 10))
plt.subplot(3, 3, 1)
plt.imshow(dataset, cmap=plt.cm.hot)
plt.colorbar()
plt.title('Raw data')
Beta = dataset.ravel().squeeze()
###############################################################################
# fit Beta's histogram with a Gamma-Gaussian mixture
gam_gaus_pp = en.gamma_gaussian_fit(Beta, Beta)
gam_gaus_pp = np.reshape(gam_gaus_pp, (shape[0], shape[1], 3))
plt.figure(fig.number)
plt.subplot(3, 3, 4)
plt.imshow(gam_gaus_pp[..., 0], cmap=plt.cm.hot)
plt.title('Gamma-Gaussian mixture,\n first component posterior proba.')
plt.colorbar()
plt.subplot(3, 3, 5)
plt.imshow(gam_gaus_pp[..., 1], cmap=plt.cm.hot)
plt.title('Gamma-Gaussian mixture,\n second component posterior proba.')
plt.colorbar()
plt.subplot(3, 3, 6)
plt.imshow(gam_gaus_pp[..., 2], cmap=plt.cm.hot)
plt.title('Gamma-Gaussian mixture,\n third component posterior proba.')
plt.colorbar()
###############################################################################
# fit Beta's histogram with a mixture of Gaussians
alpha = 0.01
gaus_mix_pp = en.three_classes_GMM_fit(Beta, None, alpha, prior_strength=100)
gaus_mix_pp = np.reshape(gaus_mix_pp, (shape[0], shape[1], 3))
plt.figure(fig.number)
plt.subplot(3, 3, 7)
plt.imshow(gaus_mix_pp[..., 0], cmap=plt.cm.hot)
plt.title('Gaussian mixture,\n first component posterior proba.')
plt.colorbar()
plt.subplot(3, 3, 8)
plt.imshow(gaus_mix_pp[..., 1], cmap=plt.cm.hot)
plt.title('Gaussian mixture,\n second component posterior proba.')
plt.colorbar()
plt.subplot(3, 3, 9)
plt.imshow(gaus_mix_pp[..., 2], cmap=plt.cm.hot)
plt.title('Gamma-Gaussian mixture,\n third component posterior proba.')
plt.colorbar()
###############################################################################
# Fit the null mode of Beta with an empirical normal null
efdr = en.NormalEmpiricalNull(Beta)
emp_null_fdr = efdr.fdr(Beta)
emp_null_fdr = emp_null_fdr.reshape(shape)
plt.subplot(3, 3, 3)
plt.imshow(1 - emp_null_fdr, cmap=plt.cm.hot)
plt.colorbar()
plt.title('Empirical FDR\n ')
plt.show()
| bsd-3-clause |
matthew-brett/draft-statsmodels | scikits/statsmodels/sandbox/rls.py | 1 | 5115 | """Restricted least squares
from pandas
License: Simplified BSD
"""
import numpy as np
from scikits.statsmodels.regression import WLS, GLS, RegressionResults
class RLS(GLS):
"""
Restricted general least squares model that handles linear constraints
Parameters
----------
endog: array-like
n length array containing the dependent variable
exog: array-like
n-by-p array of independent variables
constr: array-like
k-by-p array of linear constraints
param (0.): array-like or scalar
p-by-1 array (or scalar) of constraint parameters
sigma (None): scalar or array-like
The weighting matrix of the covariance. No scaling by default (OLS).
If sigma is a scalar, then it is converted into an n-by-n diagonal
matrix with sigma as each diagonal element.
If sigma is an n-length array, then it is assumed to be a diagonal
matrix with the given sigma on the diagonal (WLS).
Notes
-----
endog = exog * beta + epsilon
weights' * constr * beta = param
See Greene and Seaks, "The Restricted Least Squares Estimator:
A Pedagogical Note", The Review of Economics and Statistics, 1991.
"""
def __init__(self, endog, exog, constr, param=0., sigma=None):
N, Q = exog.shape
constr = np.asarray(constr)
if constr.ndim == 1:
K, P = 1, constr.shape[0]
else:
K, P = constr.shape
if Q != P:
raise Exception('Constraints and design do not align')
self.ncoeffs = Q
self.nconstraint = K
self.constraint = constr
if np.isscalar(param) and K > 1:
param = np.ones((K,)) * param
self.param = param
if sigma is None:
sigma = 1.
if np.isscalar(sigma):
sigma = np.ones(N) * sigma
sigma = np.squeeze(sigma)
if sigma.ndim == 1:
self.sigma = np.diag(sigma)
self.cholsigmainv = np.diag(np.sqrt(sigma))
else:
self.sigma = sigma
self.cholsigmainv = np.linalg.cholesky(np.linalg.pinv(self.sigma)).T
super(GLS, self).__init__(endog, exog)
_rwexog = None
@property
def rwexog(self):
"""Whitened exogenous variables augmented with restrictions"""
if self._rwexog is None:
P = self.ncoeffs
K = self.nconstraint
design = np.zeros((P + K, P + K))
design[:P, :P] = np.dot(self.wexog.T, self.wexog) #top left
constr = np.reshape(self.constraint, (K, P))
design[:P, P:] = constr.T #top right partition
design[P:, :P] = constr #bottom left partition
design[P:, P:] = np.zeros((K, K)) #bottom right partition
self._rwexog = design
return self._rwexog
_inv_rwexog = None
@property
def inv_rwexog(self):
"""Inverse of self.rwexog"""
if self._inv_rwexog is None:
self._inv_rwexog = np.linalg.inv(self.rwexog)
return self._inv_rwexog
_rwendog = None
@property
def rwendog(self):
"""Whitened endogenous variable augmented with restriction parameters"""
if self._rwendog is None:
P = self.ncoeffs
K = self.nconstraint
response = np.zeros((P + K,))
response[:P] = np.dot(self.wexog.T, self.wendog)
response[P:] = self.param
self._rwendog = response
return self._rwendog
_ncp = None
@property
def rnorm_cov_params(self):
"""Parameter covariance under restrictions"""
if self._ncp is None:
P = self.ncoeffs
self._ncp = self.inv_rwexog[:P, :P]
return self._ncp
_wncp = None
@property
def wrnorm_cov_params(self):
"""
Heteroskedasticity-consistent parameter covariance
Used to calculate White standard errors.
"""
if self._wncp is None:
df = self.df_resid
pred = np.dot(self.wexog, self.coeffs)
eps = np.diag((self.wendog - pred) ** 2)
sigmaSq = np.sum(eps)
pinvX = np.dot(self.rnorm_cov_params, self.wexog.T)
self._wncp = np.dot(np.dot(pinvX, eps), pinvX.T) * df / sigmaSq
return self._wncp
_coeffs = None
@property
def coeffs(self):
"""Estimated parameters"""
if self._coeffs is None:
betaLambda = np.dot(self.inv_rwexog, self.rwendog)
self._coeffs = betaLambda[:self.ncoeffs]
return self._coeffs
def fit(self):
rncp = self.wrnorm_cov_params
lfit = RegressionResults(self, self.coeffs, normalized_cov_params=rncp)
return lfit
if __name__=="__main__":
import scikits.statsmodels as sm
dta = np.genfromtxt('./rlsdata.txt', names=True)
design = np.column_stack((dta['Y'],dta['Y']**2,dta[['NE','NC','W','S']].view(float).reshape(dta.shape[0],-1)))
design = sm.add_constant(design, prepend=True)
rls_mod = RLS(dta['G'],design, constr=[0,0,0,1,1,1,1])
rls_fit = rls_mod.fit()
| bsd-3-clause |
calliope-project/calliope | calliope/test/test_core_preprocess.py | 1 | 62116 | import pytest
from pytest import approx
import os
import pandas as pd
import numpy as np
import calliope
import calliope.exceptions as exceptions
from calliope.core.attrdict import AttrDict
from calliope.preprocess import time
from calliope.test.common.util import build_test_model as build_model
from calliope.test.common.util import constraint_sets, defaults, check_error_or_warning
class TestModelRun:
def test_model_from_dict(self):
"""
Test creating a model from dict/AttrDict instead of from YAML
"""
this_path = os.path.dirname(__file__)
model_location = os.path.join(this_path, "common", "test_model", "model.yaml")
model_dict = AttrDict.from_yaml(model_location)
node_dict = AttrDict(
{
"nodes": {
"a": {"techs": {"test_supply_elec": {}, "test_demand_elec": {}}},
"b": {"techs": {"test_supply_elec": {}, "test_demand_elec": {}}},
}
}
)
model_dict.union(node_dict)
model_dict.model["timeseries_data_path"] = os.path.join(
this_path, "common", "test_model", model_dict.model["timeseries_data_path"]
)
# test as AttrDict
calliope.Model(model_dict)
# test as dict
calliope.Model(model_dict.as_dict())
@pytest.mark.filterwarnings(
"ignore:(?s).*Not building the link a,b:calliope.exceptions.ModelWarning"
)
def test_valid_scenarios(self):
"""
Test that valid scenario definition raises no error and results in applied scenario.
"""
override = AttrDict.from_yaml_string(
"""
scenarios:
scenario_1: ['one', 'two']
overrides:
one:
techs.test_supply_gas.constraints.energy_cap_max: 20
two:
techs.test_supply_elec.constraints.energy_cap_max: 20
nodes:
a:
techs:
test_supply_gas:
test_supply_elec:
test_demand_elec:
"""
)
model = build_model(override_dict=override, scenario="scenario_1")
assert (
model._model_run.nodes["a"].techs.test_supply_gas.constraints.energy_cap_max
== 20
)
assert (
model._model_run.nodes[
"a"
].techs.test_supply_elec.constraints.energy_cap_max
== 20
)
def test_invalid_scenarios_dict(self):
"""
Test that invalid scenario definition raises appropriate error
"""
override = AttrDict.from_yaml_string(
"""
scenarios:
scenario_1:
techs.foo.bar: 1
"""
)
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override, scenario="scenario_1")
assert check_error_or_warning(
error, "Scenario definition must be a list of override names."
)
def test_invalid_scenarios_str(self):
"""
Test that invalid scenario definition raises appropriate error
"""
override = AttrDict.from_yaml_string(
"""
scenarios:
scenario_1: 'foo'
"""
)
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override, scenario="scenario_1")
assert check_error_or_warning(
error, "Scenario definition must be a list of override names."
)
def test_scenario_name_overlaps_overrides(self):
"""
Test that a scenario name cannot be a combination of override names
"""
override = AttrDict.from_yaml_string(
"""
scenarios:
'simple_supply,one_day': ['simple_supply', 'one_day']
"""
)
with pytest.raises(exceptions.ModelError) as error:
build_model(
override_dict=override,
scenario="simple_supply,one_day",
)
assert check_error_or_warning(
error,
"Name of a manually defined scenario cannot be a combination of override names.",
)
def test_undefined_carriers(self):
"""
Test that user has input either carrier or carrier_in/_out for each tech
"""
override = AttrDict.from_yaml_string(
"""
techs:
test_undefined_carrier:
essentials:
parent: supply
name: test
constraints:
resource: .inf
energy_cap_max: .inf
nodes.1.techs.test_undefined_carrier:
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario="simple_supply,one_day")
def test_conversion_plus_primary_carriers(self):
"""
Test that user has input input/output primary carriers for conversion_plus techs
"""
override1 = {
"techs.test_conversion_plus.essentials.carrier_in": ["gas", "coal"]
}
override2 = {"techs.test_conversion_plus.essentials.primary_carrier_in": "coal"}
override3 = {
"techs.test_conversion_plus.essentials.primary_carrier_out": "coal"
}
model = build_model({}, scenario="simple_conversion_plus,two_hours")
assert (
model._model_run.techs.test_conversion_plus.essentials.get_key(
"primary_carrier_in", None
)
== "gas"
)
# should fail: multiple carriers in, but no primary_carrier_in assigned
with pytest.raises(exceptions.ModelError) as error:
build_model(override1, scenario="simple_conversion_plus,two_hours")
assert check_error_or_warning(error, "Primary_carrier_in must be assigned")
# should fail: primary_carrier_in not one of the carriers_in
with pytest.raises(exceptions.ModelError) as error:
build_model(override2, scenario="simple_conversion_plus,two_hours")
assert check_error_or_warning(error, "Primary_carrier_in `coal` not one")
# should fail: primary_carrier_out not one of the carriers_out
with pytest.raises(exceptions.ModelError) as error:
build_model(override3, scenario="simple_conversion_plus,two_hours")
assert check_error_or_warning(error, "Primary_carrier_out `coal` not one")
def test_incorrect_subset_time(self):
"""
If subset_time is a list, it must have two entries (start_time, end_time)
If subset_time is not a list, it should successfully subset on the given
string/integer
"""
override = lambda param: AttrDict.from_yaml_string(
"model.subset_time: {}".format(param)
)
# should fail: one string in list
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override(["2005-01"]), scenario="simple_supply")
# should fail: three strings in list
with pytest.raises(exceptions.ModelError):
build_model(
override_dict=override(["2005-01-01", "2005-01-02", "2005-01-03"]),
scenario="simple_supply",
)
# should pass: two string in list as slice
model = build_model(
override_dict=override(["2005-01-01", "2005-01-07"]),
scenario="simple_supply",
)
assert all(
model.inputs.timesteps.to_index()
== pd.date_range("2005-01", "2005-01-07 23:00:00", freq="H")
)
# should fail: must be a list, not a string
with pytest.raises(exceptions.ModelError):
model = build_model(
override_dict=override("2005-01"), scenario="simple_supply"
)
# should fail: time subset out of range of input data
with pytest.raises(exceptions.ModelError) as error:
build_model(
override_dict=override(["2005-03", "2005-04"]), scenario="simple_supply"
)
assert check_error_or_warning(
error,
"subset time range ['2005-03', '2005-04'] is outside the input data time range [2005-01-01, 2005-02-01]",
)
# should fail: time subset out of range of input data
with pytest.raises(exceptions.ModelError):
build_model(
override_dict=override(["2005-02-01", "2005-02-05"]),
scenario="simple_supply",
)
def test_incorrect_date_format(self):
"""
Test the date parser catches a different date format from file than
user input/default (inc. if it is just one line of a file that is incorrect)
"""
# should pass: changing datetime format from default
override1 = {
"model.timeseries_dateformat": "%d/%m/%Y %H:%M:%S",
"techs.test_demand_heat.constraints.resource": "file=demand_heat_diff_dateformat.csv",
"techs.test_demand_elec.constraints.resource": "file=demand_heat_diff_dateformat.csv",
}
model = build_model(override_dict=override1, scenario="simple_conversion")
assert all(
model.inputs.timesteps.to_index()
== pd.date_range("2005-01", "2005-02-01 23:00:00", freq="H")
)
# should fail: wrong dateformat input for one file
override2 = {
"techs.test_demand_heat.constraints.resource": "file=demand_heat_diff_dateformat.csv"
}
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override2, scenario="simple_conversion")
# should fail: wrong dateformat input for all files
override3 = {"model.timeseries_dateformat": "%d/%m/%Y %H:%M:%S"}
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override3, scenario="simple_supply")
# should fail: one value wrong in file
override4 = {
"techs.test_demand_heat.constraints.resource": "file=demand_heat_wrong_dateformat.csv"
}
# check in output error that it points to: 07/01/2005 10:00:00
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override4, scenario="simple_conversion")
def test_inconsistent_time_indeces(self):
"""
Test that, including after any time subsetting, the indeces of all time
varying input data are consistent with each other
"""
# should fail: wrong length of demand_heat csv vs demand_elec
override1 = {
"techs.test_demand_heat.constraints.resource": "file=demand_heat_wrong_length.csv"
}
# check in output error that it points to: 07/01/2005 10:00:00
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override1, scenario="simple_conversion")
# should pass: wrong length of demand_heat csv, but time subsetting removes the difference
build_model(override_dict=override1, scenario="simple_conversion,one_day")
def test_single_timestep(self):
"""
Test that warning is raised on using 1 timestep, that timestep resolution will
be inferred to be 1 hour
"""
override1 = {
"model.subset_time": ["2005-01-01 00:00:00", "2005-01-01 00:00:00"]
}
# check in output error that it points to: 07/01/2005 10:00:00
with pytest.warns(exceptions.ModelWarning) as warn_info:
model = build_model(override_dict=override1, scenario="simple_supply")
assert check_error_or_warning(
warn_info,
"Only one timestep defined. Inferring timestep resolution to be 1 hour",
)
assert model.inputs.timestep_resolution == [1]
def test_empty_key_on_explode(self):
"""
On exploding nodes (from ``'1--3'`` or ``'1,2,3'`` to
``['1', '2', '3']``), raise error on the resulting list being empty
"""
list1 = calliope.preprocess.nodes.explode_nodes("1--3")
list2 = calliope.preprocess.nodes.explode_nodes("1,2,3")
assert list1 == list2 == ["1", "2", "3"]
def test_key_clash_on_set_loc_key(self):
"""
Raise error on attempted overwrite of information regarding a recently
exploded location
"""
override = {
"nodes.a.techs.test_supply_elec.constraints.resource": 10,
"nodes.a,b.techs.test_supply_elec.constraints.resource": 15,
}
with pytest.raises(KeyError):
build_model(override_dict=override, scenario="simple_supply,one_day")
def test_calculate_depreciation(self):
"""
Technologies which define investment costs *must* define lifetime and
interest rate, so that a depreciation rate can be calculated.
If lifetime == inf and interested > 0, depreciation rate will be inf, so
we want to avoid that too.
"""
override1 = {"techs.test_supply_elec.costs.monetary.energy_cap": 10}
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override1, scenario="simple_supply,one_day")
assert check_error_or_warning(
error, "Must specify constraints.lifetime and costs.monetary.interest_rate"
)
override2 = {
"techs.test_supply_elec.constraints.lifetime": 10,
"techs.test_supply_elec.costs.monetary.energy_cap": 10,
}
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override2, scenario="simple_supply,one_day")
assert check_error_or_warning(
error, "Must specify constraints.lifetime and costs.monetary.interest_rate"
)
override3 = {
"techs.test_supply_elec.costs.monetary.interest_rate": 0.1,
"techs.test_supply_elec.costs.monetary.energy_cap": 10,
}
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override3, scenario="simple_supply,one_day")
assert check_error_or_warning(
error, "Must specify constraints.lifetime and costs.monetary.interest_rate"
)
override4 = {
"techs.test_supply_elec.constraints.lifetime": 10,
"techs.test_supply_elec.costs.monetary.interest_rate": 0,
"techs.test_supply_elec.costs.monetary.energy_cap": 10,
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override4, scenario="simple_supply,one_day")
assert check_error_or_warning(excinfo, "`monetary` interest rate of zero")
override5 = {
"techs.test_supply_elec.constraints.lifetime": np.inf,
"techs.test_supply_elec.costs.monetary.interest_rate": 0,
"techs.test_supply_elec.costs.monetary.energy_cap": 10,
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override5, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo,
"No investment monetary cost will be incurred for `test_supply_elec`",
)
override6 = {
"techs.test_supply_elec.constraints.lifetime": np.inf,
"techs.test_supply_elec.costs.monetary.interest_rate": 0.1,
"techs.test_supply_elec.costs.monetary.energy_cap": 10,
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override6, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo,
"No investment monetary cost will be incurred for `test_supply_elec`",
)
override7 = {
"techs.test_supply_elec.constraints.lifetime": 10,
"techs.test_supply_elec.costs.monetary.interest_rate": 0.1,
"techs.test_supply_elec.costs.monetary.energy_cap": 10,
}
build_model(override_dict=override7, scenario="simple_supply,one_day")
def test_delete_interest_rate(self):
"""
If only 'interest_rate' is given in the cost class for a technology, we
should be able to handle deleting it without leaving an empty cost key.
"""
override1 = {"techs.test_supply_elec.costs.monetary.interest_rate": 0.1}
m = build_model(override_dict=override1, scenario="simple_supply,one_day")
assert "loc_techs_cost" not in m._model_data.dims
def test_empty_cost_class(self):
"""
If cost is defined, but its value is not a dictionary, ensure it is
deleted
"""
override1 = {"techs.test_supply_elec.costs.carbon": None}
with pytest.warns(exceptions.ModelWarning) as warn_info:
m = build_model(
override_dict=override1,
scenario="simple_supply,one_day,investment_costs",
)
assert check_error_or_warning(
warn_info,
"Deleting empty cost class `carbon` for technology `test_supply_elec` at `a`.",
)
assert (
"carbon" not in m._model_run.nodes["b"].techs.test_supply_elec.costs.keys()
)
assert "carbon" not in m._model_data.coords["costs"].values
def test_strip_link(self):
override = {
"links.a, c.techs": {"test_transmission_elec": None},
"nodes.c.techs": {"test_supply_elec": None},
}
m = build_model(override_dict=override, scenario="simple_supply,one_day")
assert "c" in m._model_run.nodes["a"].links.keys()
def test_dataframes_passed(self):
"""
If model config specifies dataframes to be loaded in (via df=...),
these time series must be passed as arguments in calliope.Model(...).
"""
override = {"techs.test_demand_elec.constraints.resource": "df=demand_elec"}
with pytest.raises(exceptions.ModelError) as error:
build_model(
model_file="model_minimal.yaml",
override_dict=override,
timeseries_dataframes=None,
)
assert check_error_or_warning(
error, "no timeseries passed " "as arguments in calliope.Model(...)."
)
def test_dataframe_keys(self):
"""
Any timeseries specified via df=... must correspond to a key in
timeseries_dataframes. An error should be thrown.
"""
override = {"techs.test_demand_elec.constraints.resource": "df=key_1"}
ts_df = {"key_2": pd.DataFrame(np.arange(10))}
with pytest.raises(exceptions.ModelError) as error:
build_model(
model_file="model_minimal.yaml",
override_dict=override,
timeseries_dataframes=ts_df,
)
assert check_error_or_warning(
error, "Model attempted to load dataframe with key"
)
def test_invalid_dataframes_passed(self):
"""
`timeseries_dataframes` should be dict of pandas DataFrames.
"""
override = {"techs.test_demand_elec.constraints.resource": "df=demand_elec"}
ts_df_nodict = pd.DataFrame(np.arange(10)) # Not a dict
ts_df_numpy_arrays = {"demand_elec": np.arange(10)} # No pd DataFrames
for timeseries_dataframes in [ts_df_nodict, ts_df_numpy_arrays]:
with pytest.raises(exceptions.ModelError) as error:
build_model(
model_file="model_minimal.yaml",
override_dict=override,
timeseries_dataframes=timeseries_dataframes,
)
assert check_error_or_warning(
error, "`timeseries_dataframes` must be dict of pandas DataFrames."
)
class TestChecks:
def test_unrecognised_config_keys(self):
"""
Check that the only top level keys can be 'model', 'run', 'nodes',
'techs', 'tech_groups' (+ 'config_path', but that is an internal addition)
"""
override = {"nonsensical_key": "random_string"}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override, scenario="simple_supply")
assert check_error_or_warning(
excinfo, "Unrecognised top-level configuration item: nonsensical_key"
)
def test_missing_config_key(self):
"""
Check that missing 'nodes' raises an error
"""
with pytest.raises(exceptions.ModelError) as excinfo:
build_model() # Not selecting any scenario means no nodes are defined
assert check_error_or_warning(
excinfo, "Model is missing required top-level configuration item: nodes"
)
def test_unrecognised_model_run_keys(self):
"""
Check that the only keys allowed in 'model' and 'run' are those in the
model defaults
"""
override1 = {"model.nonsensical_key": "random_string"}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override1, scenario="simple_supply")
assert check_error_or_warning(
excinfo, "Unrecognised setting in model configuration: nonsensical_key"
)
override2 = {"run.nonsensical_key": "random_string"}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override2, scenario="simple_supply")
assert check_error_or_warning(
excinfo, "Unrecognised setting in run configuration: nonsensical_key"
)
# A key that should be in run but is given in model
override3 = {"model.solver": "glpk"}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override3, scenario="simple_supply")
assert check_error_or_warning(
excinfo, "Unrecognised setting in model configuration: solver"
)
# A key that should be in model but is given in run
override4 = {"run.subset_time": None}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override4, scenario="simple_supply")
assert check_error_or_warning(
excinfo, "Unrecognised setting in run configuration: subset_time"
)
def test_warn_null_number_of_spores(self):
"""
Check that spores number is greater than 0 if spores run mode is selected
"""
override = {"run.spores_options.spores_number": 0}
with pytest.warns(exceptions.ModelWarning) as warn:
build_model(scenario="spores,simple_supply", override_dict=override)
assert check_error_or_warning(
warn, "spores run mode is selected, but a number of 0 spores is requested"
)
def test_non_string_score_cost_class(self):
"""
Check that the score_cost_class for spores scoring is a string
"""
override = {"run.spores_options.score_cost_class": 0}
with pytest.raises(exceptions.ModelError) as excinfo:
build_model(scenario="spores,simple_supply", override_dict=override)
assert check_error_or_warning(
excinfo, "`run.spores_options.score_cost_class` must be a string"
)
@pytest.mark.parametrize(
"invalid_key", [("monetary"), ("emissions"), ("name"), ("anything_else_really")]
)
def test_unrecognised_tech_keys(self, invalid_key):
"""
Check that no invalid keys are defined for technologies.
"""
override1 = {"techs.test_supply_gas.{}".format(invalid_key): "random_string"}
with pytest.warns(exceptions.ModelWarning):
build_model(override_dict=override1, scenario="simple_supply")
def test_model_version_mismatch(self):
"""
Model config says model.calliope_version = 0.1, which is not what we
are running, so we want a warning.
"""
override = {"model.calliope_version": 0.1}
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo, "Model configuration specifies calliope_version"
)
def test_unknown_carrier_tier(self):
"""
User can only use 'carrier_' + ['in', 'out', 'in_2', 'out_2', 'in_3',
'out_3', 'ratios']
"""
override1 = AttrDict.from_yaml_string(
"""
techs.test_supply_elec.essentials.carrier_1: power
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override1, scenario="simple_supply,one_day")
override2 = AttrDict.from_yaml_string(
"""
techs.test_conversion_plus.essentials.carrier_out_4: power
"""
)
with pytest.raises(exceptions.ModelError):
build_model(
override_dict=override2, scenario="simple_conversion_plus,one_day"
)
def test_name_overlap(self):
"""
No tech may have the same identifier as a tech group
"""
override = AttrDict.from_yaml_string(
"""
techs:
supply:
essentials:
name: Supply tech
carrier: gas
parent: supply
constraints:
energy_cap_max: 10
resource: .inf
nodes:
1.techs.supply:
0.techs.supply:
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario="one_day")
@pytest.mark.parametrize(
"loc_tech",
(
({"nodes": ["1", "foo"]}),
({"techs": ["test_supply_elec", "bar"]}),
({"nodes": ["1", "foo"], "techs": ["test_supply_elec", "bar"]}),
),
)
@pytest.mark.xfail(reason="Planning to remove group constraints")
def test_inexistent_group_constraint_loc_tech(self, loc_tech):
override = {"group_constraints.mygroup": {"energy_cap_max": 100, **loc_tech}}
with pytest.warns(exceptions.ModelWarning) as excinfo:
m = build_model(override_dict=override, scenario="simple_supply")
assert check_error_or_warning(
excinfo, "Possible misspelling in group constraints:"
)
loc_techs = m._model_data.group_constraint_loc_techs_mygroup.values
assert "foo:test_supply_elec" not in loc_techs
assert "1:bar" not in loc_techs
assert "foo:bar" not in loc_techs
@pytest.mark.xfail(reason="Planning to remove group constraints")
def test_inexistent_group_constraint_empty_loc_tech(self):
override = {
"group_constraints.mygroup": {"energy_cap_max": 100, "locs": ["foo"]}
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
m = build_model(override_dict=override, scenario="simple_supply")
assert check_error_or_warning(
excinfo, "Constraint group `mygroup` will be completely ignored"
)
assert m._model_run.group_constraints.mygroup.get("exists", True) is False
@pytest.mark.filterwarnings(
"ignore:(?s).*Not building the link a,b:calliope.exceptions.ModelWarning"
)
def test_abstract_base_tech_group_override(self):
"""
Abstract base technology groups can be overridden
"""
override = AttrDict.from_yaml_string(
"""
tech_groups:
supply:
constraints:
lifetime: 25
nodes:
b.techs.test_supply_elec:
b.techs.test_demand_elec:
"""
)
build_model(override_dict=override, scenario="one_day")
def test_unspecified_parent(self):
"""
All technologies and technology groups must specify a parent
"""
override = AttrDict.from_yaml_string(
"""
techs.test_supply_no_parent:
essentials:
name: Supply tech
carrier: gas
constraints:
energy_cap_max: 10
resource: .inf
nodes.b.techs.test_supply_no_parent:
"""
)
with pytest.raises(KeyError):
build_model(override_dict=override, scenario="simple_supply,one_day")
def test_tech_as_parent(self):
"""
All technologies and technology groups must specify a parent
"""
override1 = AttrDict.from_yaml_string(
"""
techs.test_supply_tech_parent:
essentials:
name: Supply tech
carrier: gas
parent: test_supply_elec
constraints:
energy_cap_max: 10
resource: .inf
nodes.b.techs.test_supply_tech_parent:
"""
)
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override1, scenario="simple_supply,one_day")
check_error_or_warning(
error, "tech `test_supply_tech_parent` has another tech as a parent"
)
override2 = AttrDict.from_yaml_string(
"""
tech_groups.test_supply_group:
essentials:
carrier: gas
parent: test_supply_elec
constraints:
energy_cap_max: 10
resource: .inf
techs.test_supply_tech_parent.essentials:
name: Supply tech
parent: test_supply_group
nodes.b.techs.test_supply_tech_parent:
"""
)
with pytest.raises(exceptions.ModelError) as error:
build_model(override_dict=override2, scenario="simple_supply,one_day")
check_error_or_warning(
error, "tech_group `test_supply_group` has a tech as a parent"
)
def test_resource_as_carrier(self):
"""
No carrier in technology or technology group can be called `resource`
"""
override1 = AttrDict.from_yaml_string(
"""
techs:
test_supply_elec:
essentials:
name: Supply tech
carrier: resource
parent: supply
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override1, scenario="simple_supply,one_day")
override2 = AttrDict.from_yaml_string(
"""
tech_groups:
test_supply_group:
essentials:
name: Supply tech
carrier: resource
parent: supply
techs.test_supply_elec.essentials.parent: test_supply_group
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override2, scenario="simple_supply,one_day")
@pytest.mark.filterwarnings(
"ignore: defines force_resource but not a finite resource:calliope.exceptions.ModelWarning"
)
def test_missing_required_constraints(self):
"""
A technology within an abstract base technology must define a subset of
hardcoded constraints in order to function
"""
# should fail: missing one of ['energy_cap_max', 'energy_cap_equals', 'energy_cap_per_unit']
override_supply1 = AttrDict.from_yaml_string(
"""
techs:
demand_missing_constraint:
essentials:
parent: demand
carrier: electricity
name: demand missing constraint
switches:
resource_unit: power
nodes.b.techs.demand_missing_constraint:
"""
)
with pytest.raises(exceptions.ModelError):
build_model(
override_dict=override_supply1, scenario="simple_supply,one_day"
)
# should pass: giving one of ['energy_cap_max', 'energy_cap_equals', 'energy_cap_per_unit']
override_supply2 = AttrDict.from_yaml_string(
"""
techs:
supply_missing_constraint:
essentials:
parent: supply
carrier: electricity
name: supply missing constraint
constraints.energy_cap_max: 10
nodes.b.techs.supply_missing_constraint:
"""
)
build_model(override_dict=override_supply2, scenario="simple_supply,one_day")
def test_defining_non_allowed_constraints(self):
"""
A technology within an abstract base technology can only define a subset
of hardcoded constraints, anything else will not be implemented, so are
not allowed for that technology. This includes misspellings
"""
# should fail: storage_cap_max not allowed for supply tech
override_supply1 = AttrDict.from_yaml_string(
"""
techs.test_supply_elec.constraints.storage_cap_max: 10
"""
)
with pytest.raises(exceptions.ModelError):
build_model(
override_dict=override_supply1, scenario="simple_supply,one_day"
)
def test_defining_non_allowed_costs(self):
"""
A technology within an abstract base technology can only define a subset
of hardcoded costs, anything else will not be implemented, so are
not allowed for that technology. This includes misspellings
"""
# should fail: storage_cap_max not allowed for supply tech
override = AttrDict.from_yaml_string(
"""
techs.test_supply_elec.costs.monetary.storage_cap: 10
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario="simple_supply,one_day")
# should fail: om_prod not allowed for demand tech
override = AttrDict.from_yaml_string(
"""
techs.test_demand_elec.costs.monetary.om_prod: 10
"""
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override, scenario="simple_supply,one_day")
def test_defining_cost_class_with_name_of_cost(self):
"""
A cost class with the same name as one of the possible cost types was
defined, suggesting a user mistake with indentation.
"""
override = AttrDict.from_yaml_string(
"""
techs.test_supply_elec.costs.storage_cap: 10
"""
)
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(override_dict=override, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo, "`test_supply_elec` at `b` defines storage_cap as a cost class."
)
def test_exporting_unspecified_carrier(self):
"""
User can only define an export carrier if it is defined in
['carrier_out', 'carrier_out_2', 'carrier_out_3']
"""
override_supply = lambda param: AttrDict.from_yaml_string(
"techs.test_supply_elec.constraints.export_carrier: {}".format(param)
)
override_converison_plus = lambda param: AttrDict.from_yaml_string(
"techs.test_conversion_plus.constraints.export_carrier: {}".format(param)
)
# should fail: exporting `heat` not allowed for electricity supply tech
with pytest.raises(exceptions.ModelError):
build_model(
override_dict=override_supply("heat"), scenario="simple_supply,one_day"
)
# should fail: exporting `random` not allowed for conversion_plus tech
with pytest.raises(exceptions.ModelError):
build_model(
override_dict=override_converison_plus("random"),
scenario="simple_conversion_plus,one_day",
)
# should pass: exporting electricity for supply tech
build_model(
override_dict=override_supply("electricity"),
scenario="simple_supply,one_day",
)
# should pass: exporting heat for conversion tech
build_model(
override_dict=override_converison_plus("heat"),
scenario="simple_conversion_plus,one_day",
)
def test_tech_directly_in_nodes(self):
"""
A tech defined directly within a location rather than within techs
inside that location is probably an oversight.
"""
override = {"nodes.b.test_supply_elec.costs.storage_cap": 10}
with pytest.raises(exceptions.ModelError) as excinfo:
build_model(override_dict=override, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo, "Node `b` contains unrecognised keys ['test_supply_elec']"
)
def test_tech_defined_twice_in_links(self):
"""
A technology can only be defined once for a link, even if that link is
defined twice (i.e. `A,B` and `B,A`).
"""
override = {
"links.a,b.techs.test_transmission_elec": None,
"links.b,a.techs.test_transmission_elec": None,
}
with pytest.raises(exceptions.ModelError) as excinfo:
build_model(override_dict=override, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo,
"Technology test_transmission_elec defined twice on a link defined "
"in both directions (e.g. `A,B` and `B,A`)",
)
override = {
"links.a,b.techs": {
"test_transmission_elec": None,
"test_transmission_heat": None,
},
"links.b,a.techs": {
"test_transmission_elec": None,
"test_transmission_heat": None,
},
}
with pytest.raises(exceptions.ModelError) as excinfo:
build_model(override_dict=override, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo, ["test_transmission_elec", "test_transmission_heat"]
)
# We do allow a link to be defined twice, so long as the same tech isn't in both
override = {
"techs.test_transmission_heat_2": {
"essentials.name": "Transmission heat tech",
"essentials.carrier": "heat",
"essentials.parent": "transmission",
},
"links.a,b.techs": {"test_transmission_elec": None},
"links.b,a.techs": {"test_transmission_heat_2": None},
}
build_model(override_dict=override, scenario="simple_supply,one_day")
def test_allowed_time_varying_constraints(self):
"""
`file=` is only allowed on a hardcoded list of constraints, unless
`_time_varying` is appended to the constraint (i.e. user input)
"""
allowed_constraints_no_file = list(
set(defaults.tech_groups.storage.allowed_constraints).difference(
defaults.model.file_allowed
)
)
allowed_constraints_file = list(
set(defaults.tech_groups.storage.allowed_constraints).intersection(
defaults.model.file_allowed
)
)
override = lambda param: AttrDict.from_yaml_string(
"techs.test_storage.constraints.{}: file=binary_one_day.csv".format(param)
)
# should fail: Cannot have `file=` on the following constraints
for param in allowed_constraints_no_file:
with pytest.raises(exceptions.ModelError) as errors:
build_model(
override_dict=override(param), scenario="simple_storage,one_day"
)
assert check_error_or_warning(
errors,
"Cannot load data from file for configuration"
" `techs.test_storage.constraints.{}`".format(param),
)
# should pass: can have `file=` on the following constraints
for param in allowed_constraints_file:
build_model(
override_dict=override(param), scenario="simple_storage,one_day"
)
def test_incorrect_node_coordinates(self):
"""
Either all or no nodes must have `coordinates` defined and, if all
defined, they must be in the same coordinate system (lat/lon or x/y)
"""
def _override(param0, param1):
override = {}
if param0 is not None:
override.update({"nodes.a.coordinates": param0})
if param1 is not None:
override.update({"nodes.b.coordinates": param1})
return override
cartesian0 = {"x": 0, "y": 1}
cartesian1 = {"x": 1, "y": 1}
geographic0 = {"lat": 0, "lon": 1}
geographic1 = {"lat": 1, "lon": 1}
fictional0 = {"a": 0, "b": 1}
fictional1 = {"a": 1, "b": 1}
# should fail: cannot have nodes in one place and not in another
with pytest.raises(exceptions.ModelError) as error:
build_model(
override_dict=_override(cartesian0, None),
scenario="simple_supply,one_day",
)
check_error_or_warning(
error, "Either all or no nodes must have `coordinates` defined"
)
# should fail: cannot have cartesian coordinates in one place and geographic in another
with pytest.raises(exceptions.ModelError) as error:
build_model(
override_dict=_override(cartesian0, geographic1),
scenario="simple_supply,one_day",
)
check_error_or_warning(error, "All nodes must use the same coordinate format")
# should fail: cannot use a non-cartesian or non-geographic coordinate system
with pytest.raises(exceptions.ModelError) as error:
build_model(
override_dict=_override(fictional0, fictional1),
scenario="simple_supply,one_day",
)
check_error_or_warning(error, "Unidentified coordinate system")
# should fail: coordinates must be given as key:value pairs
with pytest.raises(exceptions.ModelError) as error:
build_model(
override_dict=_override([0, 1], [1, 1]),
scenario="simple_supply,one_day",
)
check_error_or_warning(error, "Coordinates must be given in the format")
# should pass: cartesian coordinates in both places
build_model(
override_dict=_override(cartesian0, cartesian1),
scenario="simple_supply,one_day",
)
# should pass: geographic coordinates in both places
build_model(
override_dict=_override(geographic0, geographic1),
scenario="simple_supply,one_day",
)
def test_one_way(self):
"""
With one_way transmission, we remove one direction of a link from
loc_tech_carriers_prod and the other from loc_tech_carriers_con.
"""
override = {
"links.X1,N1.techs.heat_pipes.switches.one_way": True,
"links.N1,X2.techs.heat_pipes.switches.one_way": True,
"links.N1,X3.techs.heat_pipes.switches.one_way": True,
"model.subset_time": ["2005-01-01", "2005-01-01"],
}
m = calliope.examples.urban_scale(override_dict=override)
m.run(build_only=True)
removed_prod_links = [
("X1", "heat_pipes:N1"),
("N1", "heat_pipes:X2"),
("N1", "heat_pipes:X3"),
]
removed_con_links = [
("N1", "heat_pipes:X1"),
("X2", "heat_pipes:N1"),
("X3", "heat_pipes:N1"),
]
for link in removed_prod_links:
assert link not in set(i[1:3] for i in m._backend_model.carrier_prod._index)
for link in removed_con_links:
assert link not in set(i[1:3] for i in m._backend_model.carrier_con._index)
def test_carrier_ratio_for_inexistent_carrier(self):
"""
A tech should not define a carrier ratio for a carrier it does
not actually use.
"""
override = AttrDict.from_yaml_string(
"""
nodes.1.techs.test_conversion_plus.constraints.carrier_ratios:
carrier_in:
some_carrier: 1.0
carrier_out_2:
another_carrier: 2.0
"""
)
with pytest.warns(exceptions.ModelWarning) as excinfo:
build_model(
override_dict=override, scenario="simple_conversion_plus,one_day"
)
assert check_error_or_warning(
excinfo,
"Tech `test_conversion_plus` gives a carrier ratio for `another_carrier`, but does not actually",
)
def test_carrier_ratio_for_specified_carrier(self):
"""
The warning for not defining a carrier ratio for a carrier a tech does
not actually use should not be triggered if the carrier is defined.
"""
override = AttrDict.from_yaml_string(
"""
nodes.b.techs.test_conversion_plus.constraints.carrier_ratios:
carrier_in:
heat: 1.0
"""
)
with pytest.warns(None) as excinfo:
build_model(
override_dict=override, scenario="simple_conversion_plus,one_day"
)
assert "Tech `test_conversion_plus` gives a carrier ratio" not in [
str(i) for i in excinfo.list
]
def test_carrier_ratio_from_file(self):
"""
It is possible to load a timeseries carrier_ratio from file
"""
override = AttrDict.from_yaml_string(
"""
nodes.b.techs.test_conversion_plus.constraints.carrier_ratios:
carrier_out.heat: file=carrier_ratio.csv
"""
)
with pytest.warns(None) as excinfo:
build_model(
override_dict=override, scenario="simple_conversion_plus,one_day"
)
assert "Cannot load data from file for configuration" not in [
str(i) for i in excinfo.list
]
@pytest.mark.filterwarnings("ignore:(?s).*Integer:calliope.exceptions.ModelWarning")
def test_milp_constraints(self):
"""
If `units` is defined, but not `energy_cap_per_unit`, throw an error
"""
# should fail: no energy_cap_per_unit
override1 = AttrDict.from_yaml_string(
"techs.test_supply_elec.constraints.units_max: 4"
)
with pytest.raises(exceptions.ModelError):
build_model(override_dict=override1, scenario="simple_supply,one_day")
# should pass: energy_cap_per_unit given
override2 = AttrDict.from_yaml_string(
"""
techs.test_supply_elec.constraints:
units_max: 4
energy_cap_per_unit: 5
"""
)
build_model(override_dict=override2, scenario="simple_supply,one_day")
def test_force_resource_ignored(self):
"""
If a technology is defines force_resource but is not in loc_techs_finite_resource
it will have no effect
"""
override = {
"techs.test_supply_elec.constraints.resource": np.inf,
"techs.test_supply_elec.switches.force_resource": True,
}
with pytest.raises(exceptions.ModelError) as excinfo:
build_model(override_dict=override, scenario="simple_supply,one_day")
assert check_error_or_warning(
excinfo,
"Cannot have `force_resource` = True",
)
def test_override_coordinates(self):
"""
Check that warning is raised if we are completely overhauling the
coordinate system with an override
"""
override = {
"nodes": {
"X1.coordinates": {"lat": 51.4596158, "lon": -0.1613446},
"X2.coordinates": {"lat": 51.4652373, "lon": -0.1141548},
"X3.coordinates": {"lat": 51.4287016, "lon": -0.1310635},
"N1.coordinates": {"lat": 51.4450766, "lon": -0.1247183},
},
"links": {
"X1,X2.techs.power_lines.distance": 10,
"X1,X3.techs.power_lines.distance": 5,
"X1,N1.techs.heat_pipes.distance": 3,
"N1,X2.techs.heat_pipes.distance": 3,
"N1,X3.techs.heat_pipes.distance": 4,
},
}
with pytest.warns(exceptions.ModelWarning) as excinfo:
calliope.examples.urban_scale(override_dict=override)
assert check_error_or_warning(excinfo, "Updated from coordinate system")
def test_clustering_and_cyclic_storage(self):
"""
Don't allow time clustering with cyclic storage if not also using
storage_inter_cluster
"""
override = {
"model.subset_time": ["2005-01-01", "2005-01-04"],
"model.time": {
"function": "apply_clustering",
"function_options": {
"clustering_func": "file=cluster_days.csv:0",
"how": "mean",
"storage_inter_cluster": False,
},
},
"run.cyclic_storage": True,
}
with pytest.raises(exceptions.ModelError) as error:
build_model(override, scenario="simple_supply")
assert check_error_or_warning(error, "cannot have cyclic storage")
def test_incorrect_resource_unit(self):
"""
Only `energy`, `energy_per_cap`, or `energy_per_area` is allowed under
`resource unit`.
"""
def _override(resource_unit):
return {"techs.test_supply_elec.switches.resource_unit": resource_unit}
with pytest.raises(exceptions.ModelError) as error:
build_model(_override("power"), scenario="simple_supply")
build_model(_override("energy"), scenario="simple_supply")
build_model(_override("energy_per_cap"), scenario="simple_supply")
build_model(_override("energy_per_area"), scenario="simple_supply")
assert check_error_or_warning(
error, "`power` is an unknown resource unit for `test_supply_elec`"
)
@pytest.mark.parametrize(
"constraints,costs",
(
({"units_max": 2, "energy_cap_per_unit": 5}, None),
({"units_equals": 2, "energy_cap_per_unit": 5}, None),
({"units_min": 2, "energy_cap_per_unit": 5}, None),
(None, {"purchase": 2}),
),
)
@pytest.mark.xfail(
reason="Expected fail because now the setting of integer/binary variables is more explicit, so users should be aware without the need of a warning"
)
def test_milp_supply_warning(self, constraints, costs):
override_constraints = {}
override_costs = {}
if constraints is not None:
override_constraints.update(
{"techs.test_supply_elec.constraints": constraints}
)
if costs is not None:
override_costs.update({"techs.test_supply_elec.costs.monetary": costs})
override = {**override_constraints, **override_costs}
with pytest.warns(exceptions.ModelWarning) as warn:
build_model(
override_dict=override,
scenario="simple_supply,one_day,investment_costs",
)
assert check_error_or_warning(
warn,
"Integer and / or binary decision variables are included in this model",
)
@pytest.mark.parametrize(
"constraints,costs",
(
(
{"units_max": 2, "storage_cap_per_unit": 5, "energy_cap_per_unit": 5},
None,
),
(
{
"units_equals": 2,
"storage_cap_per_unit": 5,
"energy_cap_per_unit": 5,
},
None,
),
(
{"units_min": 2, "storage_cap_per_unit": 5, "energy_cap_per_unit": 5},
None,
),
(None, {"purchase": 2}),
),
)
@pytest.mark.xfail(
reason="Expected fail because now the setting of integer/binary variables is more explicit, so users should be aware without the need of a warning"
)
def test_milp_storage_warning(self, constraints, costs):
override_constraints = {}
override_costs = {}
if constraints is not None:
override_constraints.update({"techs.test_storage.constraints": constraints})
if costs is not None:
override_costs.update({"techs.test_storage.costs.monetary": costs})
override = {**override_constraints, **override_costs}
with pytest.warns(exceptions.ModelWarning) as warn:
build_model(
override_dict=override,
scenario="simple_storage,one_day,investment_costs",
)
assert check_error_or_warning(
warn,
"Integer and / or binary decision variables are included in this model",
)
def test_fail_on_string(self):
with pytest.raises(calliope.exceptions.ModelError) as exception:
build_model(
model_file="weighted_obj_func.yaml",
scenario="illegal_string_cost_class",
)
assert check_error_or_warning(
exception, "`run.objective_options.cost_class` must be a dictionary."
)
def test_warn_on_using_default(self):
with pytest.warns(exceptions.ModelWarning) as warn:
build_model(
model_file="weighted_obj_func.yaml",
scenario="emissions_objective_without_removing_monetary_default",
)
assert check_error_or_warning(
warn, "Monetary cost class with a weight of 1 is still included"
)
@pytest.mark.parametrize(
"override",
[
({"run.objective_options.cost_class": {"monetary": None}}),
(
{
"run.objective_options.cost_class": {
"monetary": None,
"emissions": None,
}
}
),
],
)
def test_warn_on_no_weight(self, override):
with pytest.warns(exceptions.ModelWarning) as warn:
model = build_model(
model_file="weighted_obj_func.yaml", override_dict=override
)
assert check_error_or_warning(
warn, "cost class monetary has weight = None, setting weight to 1"
)
assert all(
model.run_config["objective_options"]["cost_class"][i] == 1
for i in override["run.objective_options.cost_class"].keys()
)
@pytest.mark.skip(reason="check is now taken care of in typedconfig")
def test_storage_initial_fractional_value(self):
"""
Check that the storage_initial value is a fraction
"""
with pytest.raises(exceptions.ModelError) as error:
build_model(
{"techs.test_storage.constraints.storage_initial": 5},
"simple_storage,two_hours,investment_costs",
)
assert check_error_or_warning(
error, "storage_initial values larger than 1 are not allowed."
)
@pytest.mark.skip(reason="check is now taken care of in typedconfig")
def test_storage_initial_smaller_than_discharge_depth(self):
"""
Check that the storage_initial value is at least equalt to the storage_discharge_depth
"""
with pytest.raises(exceptions.ModelError) as error:
build_model(
{"techs.test_storage.constraints.storage_initial": 0},
"simple_storage,two_hours,investment_costs,storage_discharge_depth",
)
assert check_error_or_warning(
error, "storage_initial is smaller than storage_discharge_depth."
)
@pytest.mark.skip(reason="check is now taken care of in typedconfig")
def test_storage_inter_cluster_vs_storage_discharge_depth(self):
"""
Check that the storage_inter_cluster is not used together with storage_discharge_depth
"""
with pytest.raises(exceptions.ModelError) as error:
override = {"model.subset_time": ["2005-01-01", "2005-01-04"]}
build_model(override, "clustering,simple_storage,storage_discharge_depth")
assert check_error_or_warning(
error,
"storage_discharge_depth is currently not allowed when time clustering is active.",
)
@pytest.mark.skip(reason="check is now taken care of in typedconfig")
def test_warn_on_undefined_cost_classes(self):
with pytest.warns(exceptions.ModelWarning) as warn:
build_model(
model_file="weighted_obj_func.yaml",
scenario="undefined_class_objective",
)
assert check_error_or_warning(
warn,
"Cost classes `{'random_class'}` are defined in the objective options but not ",
)
class TestUtil:
def test_vincenty(self):
# London to Paris: about 344 km
coords = [(51.507222, -0.1275), (48.8567, 2.3508)]
distance = calliope.preprocess.util.vincenty(coords[0], coords[1])
assert distance == pytest.approx(343834) # in meters
class TestTime:
@pytest.fixture
def model_national(self, load_timeseries_from_dataframes):
"""
Return national scale example model. If load_timeseries_from_dataframes
is True, timeseries are read into dataframes and model is called using them.
If not, the timeseries are read in from CSV.
"""
if load_timeseries_from_dataframes:
# Create dictionary with dataframes
timeseries_data_path = os.path.join(
calliope.examples._PATHS["national_scale"], "timeseries_data/"
)
timeseries_dataframes = {}
timeseries_dataframes["csp_resource"] = pd.read_csv(
os.path.join(timeseries_data_path, "csp_resource.csv"), index_col=0
)
timeseries_dataframes["demand_1"] = pd.read_csv(
os.path.join(timeseries_data_path, "demand-1.csv"), index_col=0
)
timeseries_dataframes["demand_2"] = pd.read_csv(
os.path.join(timeseries_data_path, "demand-2.csv"), index_col=0
)
# Create override dict telling calliope to load timeseries from df
override_dict = {
"techs.csp.constraints.resource": "df=csp_resource",
"nodes.region1.techs.demand_power.constraints.resource": "df=demand_1:demand",
"nodes.region2.techs.demand_power.constraints.resource": "df=demand_2:demand",
}
return calliope.examples.national_scale(
timeseries_dataframes=timeseries_dataframes, override_dict=override_dict
)
else:
return calliope.examples.national_scale()
@pytest.fixture
def model_urban(self):
return calliope.examples.urban_scale(
override_dict={"model.subset_time": ["2005-01-01", "2005-01-10"]}
)
def test_add_max_demand_timesteps(self, model_urban):
data = model_urban._model_data_pre_clustering.copy()
data = time.add_max_demand_timesteps(data)
assert data["max_demand_timesteps"].loc[
dict(carriers="heat")
].values == np.datetime64("2005-01-05T07:00:00")
assert data["max_demand_timesteps"].loc[
dict(carriers="electricity")
].values == np.datetime64("2005-01-10T09:00:00")
@pytest.mark.parametrize("load_timeseries_from_dataframes", [False, True])
def test_timeseries_from_csv(self, model_national):
"""
Timeseries data should be successfully loaded into national_scale example
model. This test checks whether this happens with timeseries loaded both
from CSV (`load_timeseries_from_dataframes`=False, called via file=...) and
from dataframes (`load_timeseries_from_dataframes`=True, called via df=...).
"""
model = model_national
assert model.inputs.resource.loc[("region1", "demand_power")].values[
0
] == approx(-25284.48)
assert model.inputs.resource.loc[("region2", "demand_power")].values[
0
] == approx(-2254.098)
assert model.inputs.resource.loc[("region1-1", "csp")].values[8] == approx(
0.263805
)
assert model.inputs.resource.loc[("region1-2", "csp")].values[8] == approx(
0.096755
)
assert model.inputs.resource.loc[("region1-3", "csp")].values[8] == approx(0.0)
| apache-2.0 |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/patches.py | 10 | 142681 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map, zip
import math
import matplotlib as mpl
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.colors as colors
from matplotlib import docstring
import matplotlib.transforms as transforms
from matplotlib.path import Path
from matplotlib.cbook import mplDeprecation
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object definition
docstring.interpd.update(Patch="""
================= ==============================================
Property Description
================= ==============================================
alpha float
animated [True | False]
antialiased or aa [True | False]
capstyle ['butt' | 'round' | 'projecting']
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
edgecolor or ec any matplotlib color
facecolor or fc any matplotlib color
figure a matplotlib.figure.Figure instance
fill [True | False]
hatch unknown
joinstyle ['miter' | 'round' | 'bevel']
label any string
linewidth or lw float
lod [True | False]
transform a matplotlib.transform transformation instance
visible [True | False]
zorder any number
================= ==============================================
""")
class Patch(artist.Artist):
"""
A patch is a 2D artist with a face color and an edge color.
If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
are *None*, they default to their rc params setting.
"""
zorder = 1
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
def __str__(self):
return str(self.__class__).split('.')[-1]
def __init__(self,
edgecolor=None,
facecolor=None,
color=None,
linewidth=None,
linestyle=None,
antialiased=None,
hatch=None,
fill=True,
capstyle=None,
joinstyle=None,
**kwargs):
"""
The following kwarg properties are supported
%(Patch)s
"""
artist.Artist.__init__(self)
if linewidth is None:
linewidth = mpl.rcParams['patch.linewidth']
if linestyle is None:
linestyle = "solid"
if capstyle is None:
capstyle = 'butt'
if joinstyle is None:
joinstyle = 'miter'
if antialiased is None:
antialiased = mpl.rcParams['patch.antialiased']
self._fill = True # needed for set_facecolor call
if color is not None:
if (edgecolor is not None or
facecolor is not None):
import warnings
warnings.warn("Setting the 'color' property will override"
"the edgecolor or facecolor properties. ")
self.set_color(color)
else:
self.set_edgecolor(edgecolor)
self.set_facecolor(facecolor)
self.set_linewidth(linewidth)
self.set_linestyle(linestyle)
self.set_antialiased(antialiased)
self.set_hatch(hatch)
self.set_fill(fill)
self.set_capstyle(capstyle)
self.set_joinstyle(joinstyle)
self._combined_transform = transforms.IdentityTransform()
if len(kwargs):
self.update(kwargs)
def get_verts(self):
"""
Return a copy of the vertices used in this patch
If the patch contains Bezier curves, the curves will be
interpolated by line segments. To access the curves as
curves, use :meth:`get_path`.
"""
trans = self.get_transform()
path = self.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
return []
def contains(self, mouseevent, radius=None):
"""Test whether the mouse event occurred in the patch.
Returns T/F, {}
"""
# This is a general version of contains that should work on any
# patch with a path. However, patches that have a faster
# algebraic solution to hit-testing should override this
# method.
if six.callable(self._contains):
return self._contains(self, mouseevent)
if radius is None:
radius = self.get_linewidth()
inside = self.get_path().contains_point(
(mouseevent.x, mouseevent.y), self.get_transform(), radius)
return inside, {}
def contains_point(self, point, radius=None):
"""
Returns *True* if the given point is inside the path
(transformed with its transform attribute).
"""
if radius is None:
radius = self.get_linewidth()
return self.get_path().contains_point(point,
self.get_transform(),
radius)
def update_from(self, other):
"""
Updates this :class:`Patch` from the properties of *other*.
"""
artist.Artist.update_from(self, other)
self.set_edgecolor(other.get_edgecolor())
self.set_facecolor(other.get_facecolor())
self.set_fill(other.get_fill())
self.set_hatch(other.get_hatch())
self.set_linewidth(other.get_linewidth())
self.set_linestyle(other.get_linestyle())
self.set_transform(other.get_data_transform())
self.set_figure(other.get_figure())
self.set_alpha(other.get_alpha())
def get_extents(self):
"""
Return a :class:`~matplotlib.transforms.Bbox` object defining
the axis-aligned extents of the :class:`Patch`.
"""
return self.get_path().get_extents(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the :class:`Patch`.
"""
return self.get_patch_transform() + artist.Artist.get_transform(self)
def get_data_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
maps data coordinates to physical coordinates.
"""
return artist.Artist.get_transform(self)
def get_patch_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
takes patch coordinates to data coordinates.
For example, one may define a patch of a circle which represents a
radius of 5 by providing coordinates for a unit circle, and a
transform which scales the coordinates (the patch coordinate) by 5.
"""
return transforms.IdentityTransform()
def get_antialiased(self):
"""
Returns True if the :class:`Patch` is to be drawn with antialiasing.
"""
return self._antialiased
get_aa = get_antialiased
def get_edgecolor(self):
"""
Return the edge color of the :class:`Patch`.
"""
return self._edgecolor
get_ec = get_edgecolor
def get_facecolor(self):
"""
Return the face color of the :class:`Patch`.
"""
return self._facecolor
get_fc = get_facecolor
def get_linewidth(self):
"""
Return the line width in points.
"""
return self._linewidth
get_lw = get_linewidth
def get_linestyle(self):
"""
Return the linestyle. Will be one of ['solid' | 'dashed' |
'dashdot' | 'dotted']
"""
return self._linestyle
get_ls = get_linestyle
def set_antialiased(self, aa):
"""
Set whether to use antialiased rendering
ACCEPTS: [True | False] or None for default
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiased = aa
def set_aa(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_edgecolor(self, color):
"""
Set the patch edge color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None:
color = mpl.rcParams['patch.edgecolor']
self._original_edgecolor = color
self._edgecolor = colors.colorConverter.to_rgba(color, self._alpha)
def set_ec(self, color):
"""alias for set_edgecolor"""
return self.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the patch face color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
if color is None:
color = mpl.rcParams['patch.facecolor']
self._original_facecolor = color # save: otherwise changing _fill
# may lose alpha information
self._facecolor = colors.colorConverter.to_rgba(color, self._alpha)
if not self._fill:
self._facecolor = list(self._facecolor)
self._facecolor[3] = 0
def set_fc(self, color):
"""alias for set_facecolor"""
return self.set_facecolor(color)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color spec
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparency of the patch.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
self.set_facecolor(self._original_facecolor) # using self._fill and
# self._alpha
self.set_edgecolor(self._original_edgecolor)
def set_linewidth(self, w):
"""
Set the patch linewidth in points
ACCEPTS: float or None for default
"""
if w is None:
w = mpl.rcParams['patch.linewidth']
self._linewidth = w
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the patch linestyle
ACCEPTS: ['solid' | 'dashed' | 'dashdot' | 'dotted']
"""
if ls is None:
ls = "solid"
self._linestyle = ls
def set_ls(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_fill(self, b):
"""
Set whether to fill the patch
ACCEPTS: [True | False]
"""
self._fill = bool(b)
self.set_facecolor(self._original_facecolor)
def get_fill(self):
'return whether fill is set'
return self._fill
# Make fill a property so as to preserve the long-standing
# but somewhat inconsistent behavior in which fill was an
# attribute.
fill = property(get_fill, set_fill)
def set_capstyle(self, s):
"""
Set the patch capstyle
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._capstyle = s
def get_capstyle(self):
"Return the current capstyle"
return self._capstyle
def set_joinstyle(self, s):
"""
Set the patch joinstyle
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._joinstyle = s
def get_joinstyle(self):
"Return the current joinstyle"
return self._joinstyle
def set_hatch(self, hatch):
"""
Set the hatching pattern
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
ACCEPTS: ['/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*']
"""
self._hatch = hatch
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
@allow_rasterization
def draw(self, renderer):
'Draw the :class:`Patch` to the given *renderer*.'
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_linestyle(self._linestyle)
gc.set_capstyle(self._capstyle)
gc.set_joinstyle(self._joinstyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_url(self._url)
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
path = self.get_path()
transform = self.get_transform()
tpath = transform.transform_path_non_affine(path)
affine = transform.get_affine()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
renderer.draw_path(gc, tpath, affine, rgbFace)
gc.restore()
renderer.close_group('patch')
def get_path(self):
"""
Return the path of this patch
"""
raise NotImplementedError('Derived must override')
def get_window_extent(self, renderer=None):
return self.get_path().get_extents(self.get_transform())
patchdoc = artist.kwdoc(Patch)
for k in ('Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow',
'FancyArrow', 'YAArrow', 'CirclePolygon', 'Ellipse', 'Arc',
'FancyBboxPatch', 'Patch'):
docstring.interpd.update({k: patchdoc})
# define Patch.__init__ docstring after the class has been added to interpd
docstring.dedent_interpd(Patch.__init__)
class Shadow(Patch):
def __str__(self):
return "Shadow(%s)" % (str(self.patch))
@docstring.dedent_interpd
def __init__(self, patch, ox, oy, props=None, **kwargs):
"""
Create a shadow of the given *patch* offset by *ox*, *oy*.
*props*, if not *None*, is a patch property update dictionary.
If *None*, the shadow will have have the same color as the face,
but darkened.
kwargs are
%(Patch)s
"""
Patch.__init__(self)
self.patch = patch
self.props = props
self._ox, self._oy = ox, oy
self._shadow_transform = transforms.Affine2D()
self._update()
def _update(self):
self.update_from(self.patch)
if self.props is not None:
self.update(self.props)
else:
r, g, b, a = colors.colorConverter.to_rgba(
self.patch.get_facecolor())
rho = 0.3
r = rho * r
g = rho * g
b = rho * b
self.set_facecolor((r, g, b, 0.5))
self.set_edgecolor((r, g, b, 0.5))
self.set_alpha(0.5)
def _update_transform(self, renderer):
ox = renderer.points_to_pixels(self._ox)
oy = renderer.points_to_pixels(self._oy)
self._shadow_transform.clear().translate(ox, oy)
def _get_ox(self):
return self._ox
def _set_ox(self, ox):
self._ox = ox
def _get_oy(self):
return self._oy
def _set_oy(self, oy):
self._oy = oy
def get_path(self):
return self.patch.get_path()
def get_patch_transform(self):
return self.patch.get_patch_transform() + self._shadow_transform
def draw(self, renderer):
self._update_transform(renderer)
Patch.draw(self, renderer)
class Rectangle(Patch):
"""
Draw a rectangle with lower left at *xy* = (*x*, *y*) with
specified *width* and *height*.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*angle*
rotation in degrees (anti-clockwise)
*fill* is a boolean indicating whether to fill the rectangle
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
self._angle = angle
# Note: This cannot be calculated until this is added to an Axes
self._rect_transform = transforms.IdentityTransform()
def get_path(self):
"""
Return the vertices of the rectangle
"""
return Path.unit_rectangle()
def _update_patch_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
x = self.convert_xunits(self._x)
y = self.convert_yunits(self._y)
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
bbox = transforms.Bbox.from_bounds(x, y, width, height)
rot_trans = transforms.Affine2D()
rot_trans.rotate_deg_around(x, y, self._angle)
self._rect_transform = transforms.BboxTransformTo(bbox)
self._rect_transform += rot_trans
def get_patch_transform(self):
self._update_patch_transform()
return self._rect_transform
def contains(self, mouseevent):
# special case the degenerate rectangle
if self._width == 0 or self._height == 0:
return False, {}
x, y = self.get_transform().inverted().transform_point(
(mouseevent.x, mouseevent.y))
return (x >= 0.0 and x <= 1.0 and y >= 0.0 and y <= 1.0), {}
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_xy(self):
"Return the left and bottom coords of the rectangle"
return self._x, self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
def set_xy(self, xy):
"""
Set the left and bottom coords of the rectangle
ACCEPTS: 2-item sequence
"""
self._x, self._y = xy
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
xy = property(get_xy, set_xy)
class RegularPolygon(Patch):
"""
A regular polygon patch.
"""
def __str__(self):
return "Poly%d(%g,%g)" % (self._numVertices, self._xy[0], self._xy[1])
@docstring.dedent_interpd
def __init__(self, xy, numVertices, radius=5, orientation=0,
**kwargs):
"""
Constructor arguments:
*xy*
A length 2 tuple (*x*, *y*) of the center.
*numVertices*
the number of vertices.
*radius*
The distance from the center to each of the vertices.
*orientation*
rotates the polygon (in radians).
Valid kwargs are:
%(Patch)s
"""
self._xy = xy
self._numVertices = numVertices
self._orientation = orientation
self._radius = radius
self._path = Path.unit_regular_polygon(numVertices)
self._poly_transform = transforms.Affine2D()
self._update_transform()
Patch.__init__(self, **kwargs)
def _update_transform(self):
self._poly_transform.clear() \
.scale(self.radius) \
.rotate(self.orientation) \
.translate(*self.xy)
def _get_xy(self):
return self._xy
def _set_xy(self, xy):
self._xy = xy
self._update_transform()
xy = property(_get_xy, _set_xy)
def _get_orientation(self):
return self._orientation
def _set_orientation(self, orientation):
self._orientation = orientation
self._update_transform()
orientation = property(_get_orientation, _set_orientation)
def _get_radius(self):
return self._radius
def _set_radius(self, radius):
self._radius = radius
self._update_transform()
radius = property(_get_radius, _set_radius)
def _get_numvertices(self):
return self._numVertices
def _set_numvertices(self, numVertices):
self._numVertices = numVertices
numvertices = property(_get_numvertices, _set_numvertices)
def get_path(self):
return self._path
def get_patch_transform(self):
self._update_transform()
return self._poly_transform
class PathPatch(Patch):
"""
A general polycurve path patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, path, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._path = path
def get_path(self):
return self._path
class Polygon(Patch):
"""
A general polygon patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, xy, closed=True, **kwargs):
"""
*xy* is a numpy array with shape Nx2.
If *closed* is *True*, the polygon will be closed so the
starting and ending points are the same.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._closed = closed
self.set_xy(xy)
def get_path(self):
"""
Get the path of the polygon
Returns
-------
path : Path
The :class:`~matplotlib.path.Path` object for
the polygon
"""
return self._path
def get_closed(self):
"""
Returns if the polygon is closed
Returns
-------
closed : bool
If the path is closed
"""
return self._closed
def set_closed(self, closed):
"""
Set if the polygon is closed
Parameters
----------
closed : bool
True if the polygon is closed
"""
if self._closed == bool(closed):
return
self._closed = bool(closed)
self.set_xy(self.get_xy())
def get_xy(self):
"""
Get the vertices of the path
Returns
-------
vertices : numpy array
The coordinates of the vertices as a Nx2
ndarray.
"""
return self._path.vertices
def set_xy(self, xy):
"""
Set the vertices of the polygon
Parameters
----------
xy : numpy array or iterable of pairs
The coordinates of the vertices as a Nx2
ndarray or iterable of pairs.
"""
xy = np.asarray(xy)
if self._closed:
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
else:
if len(xy) > 2 and (xy[0] == xy[-1]).all():
xy = xy[:-1]
self._path = Path(xy, closed=self._closed)
_get_xy = get_xy
_set_xy = set_xy
xy = property(
get_xy, set_xy, None,
"""Set/get the vertices of the polygon. This property is
provided for backward compatibility with matplotlib 0.91.x
only. New code should use
:meth:`~matplotlib.patches.Polygon.get_xy` and
:meth:`~matplotlib.patches.Polygon.set_xy` instead.""")
class Wedge(Patch):
"""
Wedge shaped patch.
"""
def __str__(self):
return "Wedge(%g,%g)" % (self.theta1, self.theta2)
@docstring.dedent_interpd
def __init__(self, center, r, theta1, theta2, width=None, **kwargs):
"""
Draw a wedge centered at *x*, *y* center with radius *r* that
sweeps *theta1* to *theta2* (in degrees). If *width* is given,
then a partial wedge is drawn from inner radius *r* - *width*
to outer radius *r*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = center
self.r, self.width = r, width
self.theta1, self.theta2 = theta1, theta2
self._patch_transform = transforms.IdentityTransform()
self._recompute_path()
def _recompute_path(self):
# Inner and outer rings are connected unless the annulus is complete
if abs((self.theta2 - self.theta1) - 360) <= 1e-12:
theta1, theta2 = 0, 360
connector = Path.MOVETO
else:
theta1, theta2 = self.theta1, self.theta2
connector = Path.LINETO
# Form the outer ring
arc = Path.arc(theta1, theta2)
if self.width is not None:
# Partial annulus needs to draw the outer ring
# followed by a reversed and scaled inner ring
v1 = arc.vertices
v2 = arc.vertices[::-1] * float(self.r - self.width) / self.r
v = np.vstack([v1, v2, v1[0, :], (0, 0)])
c = np.hstack([arc.codes, arc.codes, connector, Path.CLOSEPOLY])
c[len(arc.codes)] = connector
else:
# Wedge doesn't need an inner ring
v = np.vstack([arc.vertices, [(0, 0), arc.vertices[0, :], (0, 0)]])
c = np.hstack([arc.codes, [connector, connector, Path.CLOSEPOLY]])
# Shift and scale the wedge to the final location.
v *= self.r
v += np.asarray(self.center)
self._path = Path(v, c)
def set_center(self, center):
self._path = None
self.center = center
def set_radius(self, radius):
self._path = None
self.r = radius
def set_theta1(self, theta1):
self._path = None
self.theta1 = theta1
def set_theta2(self, theta2):
self._path = None
self.theta2 = theta2
def set_width(self, width):
self._path = None
self.width = width
def get_path(self):
if self._path is None:
self._recompute_path()
return self._path
# COVERAGE NOTE: Not used internally or from examples
class Arrow(Patch):
"""
An arrow patch.
"""
def __str__(self):
return "Arrow()"
_path = Path([
[0.0, 0.1], [0.0, -0.1],
[0.8, -0.1], [0.8, -0.3],
[1.0, 0.0], [0.8, 0.3],
[0.8, 0.1], [0.0, 0.1]],
closed=True)
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=1.0, **kwargs):
"""
Draws an arrow, starting at (*x*, *y*), direction and length
given by (*dx*, *dy*) the width of the arrow is scaled by *width*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
L = np.sqrt(dx ** 2 + dy ** 2) or 1 # account for div by zero
cx = float(dx) / L
sx = float(dy) / L
trans1 = transforms.Affine2D().scale(L, width)
trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, 0.0, 0.0)
trans3 = transforms.Affine2D().translate(x, y)
trans = trans1 + trans2 + trans3
self._patch_transform = trans.frozen()
def get_path(self):
return self._path
def get_patch_transform(self):
return self._patch_transform
class FancyArrow(Polygon):
"""
Like Arrow, but lets you set head width and head height independently.
"""
def __str__(self):
return "FancyArrow()"
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False,
head_width=None, head_length=None, shape='full', overhang=0,
head_starts_at_zero=False, **kwargs):
"""
Constructor arguments
*width*: float (default: 0.001)
width of full arrow tail
*length_includes_head*: [True | False] (default: False)
True if head is to be counted in calculating the length.
*head_width*: float or None (default: 3*width)
total width of the full arrow head
*head_length*: float or None (default: 1.5 * head_width)
length of arrow head
*shape*: ['full', 'left', 'right'] (default: 'full')
draw the left-half, right-half, or full arrow
*overhang*: float (default: 0)
fraction that the arrow is swept back (0 overhang means
triangular shape). Can be negative or greater than one.
*head_starts_at_zero*: [True | False] (default: False)
if True, the head starts being drawn at coordinate 0
instead of ending at coordinate 0.
Other valid kwargs (inherited from :class:`Patch`) are:
%(Patch)s
"""
if head_width is None:
head_width = 20 * width
if head_length is None:
head_length = 1.5 * head_width
distance = np.sqrt(dx ** 2 + dy ** 2)
if length_includes_head:
length = distance
else:
length = distance + head_length
if not length:
verts = [] # display nothing if empty
else:
# start by drawing horizontal arrow, point at (0,0)
hw, hl, hs, lw = head_width, head_length, overhang, width
left_half_arrow = np.array([
[0.0, 0.0], # tip
[-hl, -hw / 2.0], # leftmost
[-hl * (1 - hs), -lw / 2.0], # meets stem
[-length, -lw / 2.0], # bottom left
[-length, 0],
])
#if we're not including the head, shift up by head length
if not length_includes_head:
left_half_arrow += [head_length, 0]
#if the head starts at 0, shift up by another head length
if head_starts_at_zero:
left_half_arrow += [head_length / 2.0, 0]
#figure out the shape, and complete accordingly
if shape == 'left':
coords = left_half_arrow
else:
right_half_arrow = left_half_arrow * [1, -1]
if shape == 'right':
coords = right_half_arrow
elif shape == 'full':
# The half-arrows contain the midpoint of the stem,
# which we can omit from the full arrow. Including it
# twice caused a problem with xpdf.
coords = np.concatenate([left_half_arrow[:-1],
right_half_arrow[-2::-1]])
else:
raise ValueError("Got unknown shape: %s" % shape)
cx = float(dx) / distance
sx = float(dy) / distance
M = np.array([[cx, sx], [-sx, cx]])
verts = np.dot(coords, M) + (x + dx, y + dy)
Polygon.__init__(self, list(map(tuple, verts)), closed=True, **kwargs)
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
class YAArrow(Patch):
"""
Yet another arrow class.
This is an arrow that is defined in display space and has a tip at
*x1*, *y1* and a base at *x2*, *y2*.
"""
def __str__(self):
return "YAArrow()"
@docstring.dedent_interpd
def __init__(self, figure, xytip, xybase,
width=4, frac=0.1, headwidth=12, **kwargs):
"""
Constructor arguments:
*xytip*
(*x*, *y*) location of arrow tip
*xybase*
(*x*, *y*) location the arrow base mid point
*figure*
The :class:`~matplotlib.figure.Figure` instance
(fig.dpi)
*width*
The width of the arrow in points
*frac*
The fraction of the arrow length occupied by the head
*headwidth*
The width of the base of the arrow head in points
Valid kwargs are:
%(Patch)s
"""
self.xytip = xytip
self.xybase = xybase
self.width = width
self.frac = frac
self.headwidth = headwidth
Patch.__init__(self, **kwargs)
# Set self.figure after Patch.__init__, since it sets self.figure to
# None
self.figure = figure
def get_path(self):
# Since this is dpi dependent, we need to recompute the path
# every time.
# the base vertices
x1, y1 = self.xytip
x2, y2 = self.xybase
k1 = self.width * self.figure.dpi / 72. / 2.
k2 = self.headwidth * self.figure.dpi / 72. / 2.
xb1, yb1, xb2, yb2 = self.getpoints(x1, y1, x2, y2, k1)
# a point on the segment 20% of the distance from the tip to the base
theta = math.atan2(y2 - y1, x2 - x1)
r = math.sqrt((y2 - y1) ** 2. + (x2 - x1) ** 2.)
xm = x1 + self.frac * r * math.cos(theta)
ym = y1 + self.frac * r * math.sin(theta)
xc1, yc1, xc2, yc2 = self.getpoints(x1, y1, xm, ym, k1)
xd1, yd1, xd2, yd2 = self.getpoints(x1, y1, xm, ym, k2)
xs = self.convert_xunits([xb1, xb2, xc2, xd2, x1, xd1, xc1, xb1])
ys = self.convert_yunits([yb1, yb2, yc2, yd2, y1, yd1, yc1, yb1])
return Path(list(zip(xs, ys)), closed=True)
def get_patch_transform(self):
return transforms.IdentityTransform()
def getpoints(self, x1, y1, x2, y2, k):
"""
For line segment defined by (*x1*, *y1*) and (*x2*, *y2*)
return the points on the line that is perpendicular to the
line and intersects (*x2*, *y2*) and the distance from (*x2*,
*y2*) of the returned points is *k*.
"""
x1, y1, x2, y2, k = list(map(float, (x1, y1, x2, y2, k)))
if y2 - y1 == 0:
return x2, y2 + k, x2, y2 - k
elif x2 - x1 == 0:
return x2 + k, y2, x2 - k, y2
m = (y2 - y1) / (x2 - x1)
pm = -1. / m
a = 1
b = -2 * y2
c = y2 ** 2. - k ** 2. * pm ** 2. / (1. + pm ** 2.)
y3a = (-b + math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3a = (y3a - y2) / pm + x2
y3b = (-b - math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3b = (y3b - y2) / pm + x2
return x3a, y3a, x3b, y3b
class CirclePolygon(RegularPolygon):
"""
A polygon-approximation of a circle patch.
"""
def __str__(self):
return "CirclePolygon(%d,%d)" % self.center
@docstring.dedent_interpd
def __init__(self, xy, radius=5,
resolution=20, # the number of vertices
** kwargs):
"""
Create a circle at *xy* = (*x*, *y*) with given *radius*.
This circle is approximated by a regular polygon with
*resolution* sides. For a smoother circle drawn with splines,
see :class:`~matplotlib.patches.Circle`.
Valid kwargs are:
%(Patch)s
"""
RegularPolygon.__init__(self, xy,
resolution,
radius,
orientation=0,
**kwargs)
class Ellipse(Patch):
"""
A scale-free ellipse.
"""
def __str__(self):
return "Ellipse(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*xy*
center of ellipse
*width*
total length (diameter) of horizontal axis
*height*
total length (diameter) of vertical axis
*angle*
rotation in degrees (anti-clockwise)
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = xy
self.width, self.height = width, height
self.angle = angle
self._path = Path.unit_circle()
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self.angle) \
.translate(*center)
def get_path(self):
"""
Return the vertices of the rectangle
"""
return self._path
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
def contains(self, ev):
if ev.x is None or ev.y is None:
return False, {}
x, y = self.get_transform().inverted().transform_point((ev.x, ev.y))
return (x * x + y * y) <= 1.0, {}
class Circle(Ellipse):
"""
A circle patch.
"""
def __str__(self):
return "Circle((%g,%g),r=%g)" % (self.center[0],
self.center[1],
self.radius)
@docstring.dedent_interpd
def __init__(self, xy, radius=5, **kwargs):
"""
Create true circle at center *xy* = (*x*, *y*) with given
*radius*. Unlike :class:`~matplotlib.patches.CirclePolygon`
which is a polygonal approximation, this uses Bézier splines
and is much closer to a scale-free circle.
Valid kwargs are:
%(Patch)s
"""
self.radius = radius
Ellipse.__init__(self, xy, radius * 2, radius * 2, **kwargs)
def set_radius(self, radius):
"""
Set the radius of the circle
ACCEPTS: float
"""
self.width = self.height = 2 * radius
def get_radius(self):
'return the radius of the circle'
return self.width / 2.
radius = property(get_radius, set_radius)
class Arc(Ellipse):
"""
An elliptical arc. Because it performs various optimizations, it
can not be filled.
The arc must be used in an :class:`~matplotlib.axes.Axes`
instance---it can not be added directly to a
:class:`~matplotlib.figure.Figure`---because it is optimized to
only render the segments that are inside the axes bounding box
with high resolution.
"""
def __str__(self):
return "Arc(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0,
theta1=0.0, theta2=360.0, **kwargs):
"""
The following args are supported:
*xy*
center of ellipse
*width*
length of horizontal axis
*height*
length of vertical axis
*angle*
rotation in degrees (anti-clockwise)
*theta1*
starting angle of the arc in degrees
*theta2*
ending angle of the arc in degrees
If *theta1* and *theta2* are not provided, the arc will form a
complete ellipse.
Valid kwargs are:
%(Patch)s
"""
fill = kwargs.setdefault('fill', False)
if fill:
raise ValueError("Arc objects can not be filled")
Ellipse.__init__(self, xy, width, height, angle, **kwargs)
self.theta1 = theta1
self.theta2 = theta2
self._path = Path.arc(self.theta1, self.theta2)
@allow_rasterization
def draw(self, renderer):
"""
Ellipses are normally drawn using an approximation that uses
eight cubic bezier splines. The error of this approximation
is 1.89818e-6, according to this unverified source:
Lancaster, Don. Approximating a Circle or an Ellipse Using
Four Bezier Cubic Splines.
http://www.tinaja.com/glib/ellipse4.pdf
There is a use case where very large ellipses must be drawn
with very high accuracy, and it is too expensive to render the
entire ellipse with enough segments (either splines or line
segments). Therefore, in the case where either radius of the
ellipse is large enough that the error of the spline
approximation will be visible (greater than one pixel offset
from the ideal), a different technique is used.
In that case, only the visible parts of the ellipse are drawn,
with each visible arc using a fixed number of spline segments
(8). The algorithm proceeds as follows:
1. The points where the ellipse intersects the axes bounding
box are located. (This is done be performing an inverse
transformation on the axes bbox such that it is relative
to the unit circle -- this makes the intersection
calculation much easier than doing rotated ellipse
intersection directly).
This uses the "line intersecting a circle" algorithm
from:
Vince, John. Geometry for Computer Graphics: Formulae,
Examples & Proofs. London: Springer-Verlag, 2005.
2. The angles of each of the intersection points are
calculated.
3. Proceeding counterclockwise starting in the positive
x-direction, each of the visible arc-segments between the
pairs of vertices are drawn using the bezier arc
approximation technique implemented in
:meth:`matplotlib.path.Path.arc`.
"""
if not hasattr(self, 'axes'):
raise RuntimeError('Arcs can only be used in Axes instances')
self._recompute_transform()
# Get the width and height in pixels
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
width, height = self.get_transform().transform_point(
(width, height))
inv_error = (1.0 / 1.89818e-6) * 0.5
if width < inv_error and height < inv_error:
#self._path = Path.arc(self.theta1, self.theta2)
return Patch.draw(self, renderer)
def iter_circle_intersect_on_line(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
dr2 = dx * dx + dy * dy
D = x0 * y1 - x1 * y0
D2 = D * D
discrim = dr2 - D2
# Single (tangential) intersection
if discrim == 0.0:
x = (D * dy) / dr2
y = (-D * dx) / dr2
yield x, y
elif discrim > 0.0:
# The definition of "sign" here is different from
# np.sign: we never want to get 0.0
if dy < 0.0:
sign_dy = -1.0
else:
sign_dy = 1.0
sqrt_discrim = np.sqrt(discrim)
for sign in (1., -1.):
x = (D * dy + sign * sign_dy * dx * sqrt_discrim) / dr2
y = (-D * dx + sign * np.abs(dy) * sqrt_discrim) / dr2
yield x, y
def iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
epsilon = 1e-9
if x1 < x0:
x0e, x1e = x1, x0
else:
x0e, x1e = x0, x1
if y1 < y0:
y0e, y1e = y1, y0
else:
y0e, y1e = y0, y1
x0e -= epsilon
y0e -= epsilon
x1e += epsilon
y1e += epsilon
for x, y in iter_circle_intersect_on_line(x0, y0, x1, y1):
if x >= x0e and x <= x1e and y >= y0e and y <= y1e:
yield x, y
# Transforms the axes box_path so that it is relative to the unit
# circle in the same way that it is relative to the desired
# ellipse.
box_path = Path.unit_rectangle()
box_path_transform = transforms.BboxTransformTo(self.axes.bbox) + \
self.get_transform().inverted()
box_path = box_path.transformed(box_path_transform)
PI = np.pi
TWOPI = PI * 2.0
RAD2DEG = 180.0 / PI
DEG2RAD = PI / 180.0
theta1 = self.theta1
theta2 = self.theta2
thetas = {}
# For each of the point pairs, there is a line segment
for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):
x0, y0 = p0
x1, y1 = p1
for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
theta = np.arccos(x)
if y < 0:
theta = TWOPI - theta
# Convert radians to angles
theta *= RAD2DEG
if theta > theta1 and theta < theta2:
thetas[theta] = None
thetas = list(six.iterkeys(thetas))
thetas.sort()
thetas.append(theta2)
last_theta = theta1
theta1_rad = theta1 * DEG2RAD
inside = box_path.contains_point((np.cos(theta1_rad),
np.sin(theta1_rad)))
# save original path
path_original = self._path
for theta in thetas:
if inside:
Path.arc(last_theta, theta, 8)
Patch.draw(self, renderer)
inside = False
else:
inside = True
last_theta = theta
# restore original path
self._path = path_original
def bbox_artist(artist, renderer, props=None, fill=True):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
*props* is a dict of rectangle props with the additional property
'pad' that sets the padding around the bbox in points.
"""
if props is None:
props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = artist.get_window_extent(renderer)
l, b, w, h = bbox.bounds
l -= pad / 2.
b -= pad / 2.
w += pad
h += pad
r = Rectangle(xy=(l, b),
width=w,
height=h,
fill=fill,
)
r.set_transform(transforms.IdentityTransform())
r.set_clip_on(False)
r.update(props)
r.draw(renderer)
def draw_bbox(bbox, renderer, color='k', trans=None):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
"""
l, b, w, h = bbox.bounds
r = Rectangle(xy=(l, b),
width=w,
height=h,
edgecolor=color,
fill=False,
)
if trans is not None:
r.set_transform(trans)
r.set_clip_on(False)
r.draw(renderer)
def _pprint_table(_table, leadingspace=2):
"""
Given the list of list of strings, return a string of REST table format.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
columns = [[] for cell in _table[0]]
for row in _table:
for column, cell in zip(columns, row):
column.append(cell)
col_len = [max([len(cell) for cell in column]) for column in columns]
lines = []
table_formatstr = pad + ' '.join([('=' * cl) for cl in col_len])
lines.append('')
lines.append(table_formatstr)
lines.append(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(_table[0], col_len)]))
lines.append(table_formatstr)
lines.extend([(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(row, col_len)]))
for row in _table[1:]])
lines.append(table_formatstr)
lines.append('')
return "\n".join(lines)
def _pprint_styles(_styles):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a formatted string listing all the
styles. Used to update the documentation.
"""
names, attrss, clss = [], [], []
import inspect
_table = [["Class", "Name", "Attrs"]]
for name, cls in sorted(_styles.items()):
args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)
if defaults:
args = [(argname, argdefault)
for argname, argdefault in zip(args[1:], defaults)]
else:
args = None
if args is None:
argstr = 'None'
else:
argstr = ",".join([("%s=%s" % (an, av))
for an, av
in args])
#adding ``quotes`` since - and | have special meaning in reST
_table.append([cls.__name__, "``%s``" % name, argstr])
return _pprint_table(_table)
class _Style(object):
"""
A base class for the Styles. It is meant to be a container class,
where actual styles are declared as subclass of it, and it
provides some helper functions.
"""
def __new__(self, stylename, **kw):
"""
return the instance of the subclass with the given style name.
"""
# the "class" should have the _style_list attribute, which is
# a dictionary of stylname, style class paie.
_list = stylename.replace(" ", "").split(",")
_name = _list[0].lower()
try:
_cls = self._style_list[_name]
except KeyError:
raise ValueError("Unknown style : %s" % stylename)
try:
_args_pair = [cs.split("=") for cs in _list[1:]]
_args = dict([(k, float(v)) for k, v in _args_pair])
except ValueError:
raise ValueError("Incorrect style argument : %s" % stylename)
_args.update(kw)
return _cls(**_args)
@classmethod
def get_styles(klass):
"""
A class method which returns a dictionary of available styles.
"""
return klass._style_list
@classmethod
def pprint_styles(klass):
"""
A class method which returns a string of the available styles.
"""
return _pprint_styles(klass._style_list)
@classmethod
def register(klass, name, style):
"""
Register a new style.
"""
if not issubclass(style, klass._Base):
raise ValueError("%s must be a subclass of %s" % (style,
klass._Base))
klass._style_list[name] = style
class BoxStyle(_Style):
"""
:class:`BoxStyle` is a container class which defines several
boxstyle classes, which are used for :class:`FancyBoxPatch`.
A style object can be created as::
BoxStyle.Round(pad=0.2)
or::
BoxStyle("Round", pad=0.2)
or::
BoxStyle("Round, pad=0.2")
Following boxstyle classes are defined.
%(AvailableBoxstyles)s
An instance of any boxstyle class is an callable object,
whose call signature is::
__call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.)
and returns a :class:`Path` instance. *x0*, *y0*, *width* and
*height* specify the location and size of the box to be
drawn. *mutation_scale* determines the overall size of the
mutation (by which I mean the transformation of the rectangle to
the fancy box). *mutation_aspect* determines the aspect-ratio of
the mutation.
.. plot:: mpl_examples/pylab_examples/fancybox_demo2.py
"""
_style_list = {}
class _Base(object):
"""
:class:`BBoxTransmuterBase` and its derivatives are used to make a
fancy box around a given rectangle. The :meth:`__call__` method
returns the :class:`~matplotlib.path.Path` of the fancy box. This
class is not an artist and actual drawing of the fancy box is done
by the :class:`FancyBboxPatch` class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initializtion.
"""
super(BoxStyle._Base, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
"""
The transmute method is a very core of the
:class:`BboxTransmuter` class and must be overriden in the
subclasses. It receives the location and size of the
rectangle, and the mutation_size, with which the amount of
padding and etc. will be scaled. It returns a
:class:`~matplotlib.path.Path` instance.
"""
raise NotImplementedError('Derived must override')
def __call__(self, x0, y0, width, height, mutation_size,
aspect_ratio=1.):
"""
Given the location and size of the box, return the path of
the box around it.
- *x0*, *y0*, *width*, *height* : location and size of the box
- *mutation_size* : a reference scale for the mutation.
- *aspect_ratio* : aspect-ration for the mutation.
"""
# The __call__ method is a thin wrapper around the transmute method
# and take care of the aspect.
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
y0, height = y0 / aspect_ratio, height / aspect_ratio
# call transmute method with squeezed height.
path = self.transmute(x0, y0, width, height, mutation_size)
vertices, codes = path.vertices, path.codes
# Restore the height
vertices[:, 1] = vertices[:, 1] * aspect_ratio
return Path(vertices, codes)
else:
return self.transmute(x0, y0, width, height, mutation_size)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(BoxStyle, self.__class__.__name__),
self.__dict__
)
class Square(_Base):
"""
A simple square box.
"""
def __init__(self, pad=0.3):
"""
*pad*
amount of padding
"""
self.pad = pad
super(BoxStyle.Square, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2*pad, height + 2*pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
vertices = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]
codes = [Path.MOVETO] + [Path.LINETO] * 3 + [Path.CLOSEPOLY]
return Path(vertices, codes)
_style_list["square"] = Square
class Circle(_Base):
"""A simple circle box."""
def __init__(self, pad=0.3):
"""
Parameters
----------
pad : float
The amount of padding around the original box.
"""
self.pad = pad
super(BoxStyle.Circle, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
return Path.circle((x0 + width/2., y0 + height/2.),
(max([width, height]) / 2.))
_style_list["circle"] = Circle
class LArrow(_Base):
"""
(left) Arrow Box
"""
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.LArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2. * pad, \
height + 2. * pad,
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0) / 2.
dxx = dx * .5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1),
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # arrow
(x0 + dxx, y0), (x0 + dxx, y0)]
com = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["larrow"] = LArrow
class RArrow(LArrow):
"""
(right) Arrow Box
"""
def __init__(self, pad=0.3):
#self.pad = pad
super(BoxStyle.RArrow, self).__init__(pad)
def transmute(self, x0, y0, width, height, mutation_size):
p = BoxStyle.LArrow.transmute(self, x0, y0,
width, height, mutation_size)
p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0]
return p
_style_list["rarrow"] = RArrow
class Round(_Base):
"""
A box with round corners.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding radius of corners. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of the roudning corner
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad
width, height = width + 2. * pad, \
height + 2. * pad,
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
# Round corners are implemented as quadratic bezier. e.g.,
# [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.
cp = [(x0 + dr, y0),
(x1 - dr, y0),
(x1, y0), (x1, y0 + dr),
(x1, y1 - dr),
(x1, y1), (x1 - dr, y1),
(x0 + dr, y1),
(x0, y1), (x0, y1 - dr),
(x0, y0 + dr),
(x0, y0), (x0 + dr, y0),
(x0 + dr, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round"] = Round
class Round4(_Base):
"""
Another box with round edges.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding size of edges. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round4, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# roudning size. Use a half of the pad if not set.
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad / 2.
width, height = width + 2. * pad - 2 * dr, \
height + 2. * pad - 2 * dr,
x0, y0 = x0 - pad + dr, y0 - pad + dr,
x1, y1 = x0 + width, y0 + height
cp = [(x0, y0),
(x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0),
(x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1),
(x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1),
(x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round4"] = Round4
class Sawtooth(_Base):
"""
A sawtooth box.
"""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
self.pad = pad
self.tooth_size = tooth_size
super(BoxStyle.Sawtooth, self).__init__()
def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of sawtooth
if self.tooth_size is None:
tooth_size = self.pad * .5 * mutation_size
else:
tooth_size = self.tooth_size * mutation_size
tooth_size2 = tooth_size / 2.
width, height = width + 2. * pad - tooth_size, \
height + 2. * pad - tooth_size,
# the sizes of the vertical and horizontal sawtooth are
# separately adjusted to fit the given box size.
dsx_n = int(round((width - tooth_size) / (tooth_size * 2))) * 2
dsx = (width - tooth_size) / dsx_n
dsy_n = int(round((height - tooth_size) / (tooth_size * 2))) * 2
dsy = (height - tooth_size) / dsy_n
x0, y0 = x0 - pad + tooth_size2, y0 - pad + tooth_size2
x1, y1 = x0 + width, y0 + height
bottom_saw_x = [x0] + \
[x0 + tooth_size2 + dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x1 - tooth_size2]
bottom_saw_y = [y0] + \
[y0 - tooth_size2, y0,
y0 + tooth_size2, y0] * dsx_n + \
[y0 - tooth_size2]
right_saw_x = [x1] + \
[x1 + tooth_size2,
x1,
x1 - tooth_size2,
x1] * dsx_n + \
[x1 + tooth_size2]
right_saw_y = [y0] + \
[y0 + tooth_size2 + dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y1 - tooth_size2]
top_saw_x = [x1] + \
[x1 - tooth_size2 - dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x0 + tooth_size2]
top_saw_y = [y1] + \
[y1 + tooth_size2,
y1,
y1 - tooth_size2,
y1] * dsx_n + \
[y1 + tooth_size2]
left_saw_x = [x0] + \
[x0 - tooth_size2,
x0,
x0 + tooth_size2,
x0] * dsy_n + \
[x0 - tooth_size2]
left_saw_y = [y1] + \
[y1 - tooth_size2 - dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y0 + tooth_size2]
saw_vertices = list(zip(bottom_saw_x, bottom_saw_y)) + \
list(zip(right_saw_x, right_saw_y)) + \
list(zip(top_saw_x, top_saw_y)) + \
list(zip(left_saw_x, left_saw_y)) + \
[(bottom_saw_x[0], bottom_saw_y[0])]
return saw_vertices
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width,
height, mutation_size)
path = Path(saw_vertices, closed=True)
return path
_style_list["sawtooth"] = Sawtooth
class Roundtooth(Sawtooth):
"""A rounded tooth box."""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
super(BoxStyle.Roundtooth, self).__init__(pad, tooth_size)
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0,
width, height,
mutation_size)
# Add a trailing vertex to allow us to close the polygon correctly
saw_vertices = np.concatenate([np.array(saw_vertices),
[saw_vertices[0]]], axis=0)
codes = ([Path.MOVETO] +
[Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1) // 2) +
[Path.CLOSEPOLY])
return Path(saw_vertices, codes)
_style_list["roundtooth"] = Roundtooth
if __doc__: # __doc__ could be None if -OO optimization is enabled
__doc__ = cbook.dedent(__doc__) % \
{"AvailableBoxstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableBoxstyles=_pprint_styles(BoxStyle._style_list))
class FancyBboxPatch(Patch):
"""
Draw a fancy box around a rectangle with lower left at *xy*=(*x*,
*y*) with specified width and height.
:class:`FancyBboxPatch` class is similar to :class:`Rectangle`
class, but it draws a fancy box around the rectangle. The
transformation of the rectangle box to the fancy box is delegated
to the :class:`BoxTransmuterBase` and its derived classes.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y,
self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height,
boxstyle="round",
bbox_transmuter=None,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
*xy* = lower left corner
*width*, *height*
*boxstyle* determines what kind of fancy box will be drawn. It
can be a string of the style name with a comma separated
attribute, or an instance of :class:`BoxStyle`. Following box
styles are available.
%(AvailableBoxstyles)s
*mutation_scale* : a value with which attributes of boxstyle
(e.g., pad) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
if boxstyle == "custom":
if bbox_transmuter is None:
raise ValueError("bbox_transmuter argument is needed with "
"custom boxstyle")
self._bbox_transmuter = bbox_transmuter
else:
self.set_boxstyle(boxstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
@docstring.dedent_interpd
def set_boxstyle(self, boxstyle=None, **kw):
"""
Set the box style.
*boxstyle* can be a string with boxstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords::
set_boxstyle("round,pad=0.2")
set_boxstyle("round", pad=0.2)
Old attrs simply are forgotten.
Without argument (or with *boxstyle* = None), it returns
available box styles.
ACCEPTS: %(AvailableBoxstyles)s
"""
if boxstyle is None:
return BoxStyle.pprint_styles()
if isinstance(boxstyle, BoxStyle._Base):
self._bbox_transmuter = boxstyle
elif six.callable(boxstyle):
self._bbox_transmuter = boxstyle
else:
self._bbox_transmuter = BoxStyle(boxstyle, **kw)
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_boxstyle(self):
"Return the boxstyle object"
return self._bbox_transmuter
def get_path(self):
"""
Return the mutated path of the rectangle
"""
_path = self.get_boxstyle()(self._x, self._y,
self._width, self._height,
self.get_mutation_scale(),
self.get_mutation_aspect())
return _path
# Following methods are borrowed from the Rectangle class.
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
from matplotlib.bezier import split_bezier_intersecting_with_closedpath
from matplotlib.bezier import get_intersection, inside_circle, get_parallels
from matplotlib.bezier import make_wedged_bezier2
from matplotlib.bezier import split_path_inout, get_cos_sin
from matplotlib.bezier import make_path_regular, concatenate_paths
class ConnectionStyle(_Style):
"""
:class:`ConnectionStyle` is a container class which defines
several connectionstyle classes, which is used to create a path
between two points. These are mainly used with
:class:`FancyArrowPatch`.
A connectionstyle object can be either created as::
ConnectionStyle.Arc3(rad=0.2)
or::
ConnectionStyle("Arc3", rad=0.2)
or::
ConnectionStyle("Arc3, rad=0.2")
The following classes are defined
%(AvailableConnectorstyles)s
An instance of any connection style class is an callable object,
whose call signature is::
__call__(self, posA, posB,
patchA=None, patchB=None,
shrinkA=2., shrinkB=2.)
and it returns a :class:`Path` instance. *posA* and *posB* are
tuples of x,y coordinates of the two points to be
connected. *patchA* (or *patchB*) is given, the returned path is
clipped so that it start (or end) from the boundary of the
patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
which is given in points.
"""
_style_list = {}
class _Base(object):
"""
A base class for connectionstyle classes. The dervided needs
to implement a *connect* methods whose call signature is::
connect(posA, posB)
where posA and posB are tuples of x, y coordinates to be
connected. The methods needs to return a path connecting two
points. This base class defines a __call__ method, and few
helper methods.
"""
class SimpleEvent:
def __init__(self, xy):
self.x, self.y = xy
def _clip(self, path, patchA, patchB):
"""
Clip the path to the boundary of the patchA and patchB.
The starting point of the path needed to be inside of the
patchA and the end point inside the patch B. The *contains*
methods of each patch object is utilized to test if the point
is inside the path.
"""
if patchA:
def insideA(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchA.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideA)
except ValueError:
right = path
path = right
if patchB:
def insideB(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchB.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideB)
except ValueError:
left = path
path = left
return path
def _shrink(self, path, shrinkA, shrinkB):
"""
Shrink the path by fixed size (in points) with shrinkA and shrinkB
"""
if shrinkA:
x, y = path.vertices[0]
insideA = inside_circle(x, y, shrinkA)
try:
left, right = split_path_inout(path, insideA)
path = right
except ValueError:
pass
if shrinkB:
x, y = path.vertices[-1]
insideB = inside_circle(x, y, shrinkB)
try:
left, right = split_path_inout(path, insideB)
path = left
except ValueError:
pass
return path
def __call__(self, posA, posB,
shrinkA=2., shrinkB=2., patchA=None, patchB=None):
"""
Calls the *connect* method to create a path between *posA*
and *posB*. The path is clipped and shrinked.
"""
path = self.connect(posA, posB)
clipped_path = self._clip(path, patchA, patchB)
shrinked_path = self._shrink(clipped_path, shrinkA, shrinkB)
return shrinked_path
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ConnectionStyle, self.__class__.__name__),
self.__dict__
)
class Arc3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The curve is created so that the middle contol points
(C1) is located at the same distance from the start (C0) and
end points(C2) and the distance of the C1 to the line
connecting C0-C2 is *rad* times the distance of C0-C2.
"""
def __init__(self, rad=0.):
"""
*rad*
curvature of the curve.
"""
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
dx, dy = x2 - x1, y2 - y1
f = self.rad
cx, cy = x12 + f * dy, y12 - f * dx
vertices = [(x1, y1),
(cx, cy),
(x2, y2)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
return Path(vertices, codes)
_style_list["arc3"] = Arc3
class Angle3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The middle control points is placed at the
intersecting point of two lines which crosses the start (or
end) point and has a angle of angleA (or angleB).
"""
def __init__(self, angleA=90, angleB=0):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
"""
self.angleA = angleA
self.angleB = angleB
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = math.cos(self.angleA / 180. * math.pi),\
math.sin(self.angleA / 180. * math.pi),
cosB, sinB = math.cos(self.angleB / 180. * math.pi),\
math.sin(self.angleB / 180. * math.pi),
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1), (cx, cy), (x2, y2)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
return Path(vertices, codes)
_style_list["angle3"] = Angle3
class Angle(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path has a one passing-through point placed at
the intersecting point of two lines which crosses the start
(or end) point and has a angle of angleA (or angleB). The
connecting edges are rounded with *rad*.
"""
def __init__(self, angleA=90, angleB=0, rad=0.):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
*rad*
rounding radius of the edge
"""
self.angleA = angleA
self.angleB = angleB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = math.cos(self.angleA / 180. * math.pi),\
math.sin(self.angleA / 180. * math.pi),
cosB, sinB = math.cos(self.angleB / 180. * math.pi),\
math.sin(self.angleB / 180. * math.pi),
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1)]
codes = [Path.MOVETO]
if self.rad == 0.:
vertices.append((cx, cy))
codes.append(Path.LINETO)
else:
dx1, dy1 = x1 - cx, y1 - cy
d1 = (dx1 ** 2 + dy1 ** 2) ** .5
f1 = self.rad / d1
dx2, dy2 = x2 - cx, y2 - cy
d2 = (dx2 ** 2 + dy2 ** 2) ** .5
f2 = self.rad / d2
vertices.extend([(cx + dx1 * f1, cy + dy1 * f1),
(cx, cy),
(cx + dx2 * f2, cy + dy2 * f2)])
codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["angle"] = Angle
class Arc(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path can have two passing-through points, a
point placed at the distance of armA and angle of angleA from
point A, another point with respect to point B. The edges are
rounded with *rad*.
"""
def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
"""
*angleA* :
starting angle of the path
*angleB* :
ending angle of the path
*armA* :
length of the starting arm
*armB* :
length of the ending arm
*rad* :
rounding radius of the edges
"""
self.angleA = angleA
self.angleB = angleB
self.armA = armA
self.armB = armB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
vertices = [(x1, y1)]
rounded = []
codes = [Path.MOVETO]
if self.armA:
cosA = math.cos(self.angleA / 180. * math.pi)
sinA = math.sin(self.angleA / 180. * math.pi)
#x_armA, y_armB
d = self.armA - self.rad
rounded.append((x1 + d * cosA, y1 + d * sinA))
d = self.armA
rounded.append((x1 + d * cosA, y1 + d * sinA))
if self.armB:
cosB = math.cos(self.angleB / 180. * math.pi)
sinB = math.sin(self.angleB / 180. * math.pi)
x_armB, y_armB = x2 + self.armB * cosB, y2 + self.armB * sinB
if rounded:
xp, yp = rounded[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
else:
xp, yp = vertices[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
d = dd - self.rad
rounded = [(xp + d * dx / dd, yp + d * dy / dd),
(x_armB, y_armB)]
if rounded:
xp, yp = rounded[-1]
dx, dy = x2 - xp, y2 - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["arc"] = Arc
class Bar(_Base):
"""
A line with *angle* between A and B with *armA* and
*armB*. One of the arm is extend so that they are connected in
a right angle. The length of armA is determined by (*armA*
+ *fraction* x AB distance). Same for armB.
"""
def __init__(self, armA=0., armB=0., fraction=0.3, angle=None):
"""
*armA* : minimum length of armA
*armB* : minimum length of armB
*fraction* : a fraction of the distance between two points that
will be added to armA and armB.
*angle* : angle of the connecting line (if None, parallel to A
and B)
"""
self.armA = armA
self.armB = armB
self.fraction = fraction
self.angle = angle
def connect(self, posA, posB):
x1, y1 = posA
x20, y20 = x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
theta1 = math.atan2(y2 - y1, x2 - x1)
dx, dy = x2 - x1, y2 - y1
dd = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd, dy / dd
armA, armB = self.armA, self.armB
if self.angle is not None:
#angle = self.angle % 180.
#if angle < 0. or angle > 180.:
# angle
#theta0 = (self.angle%180.)/180.*math.pi
theta0 = self.angle / 180. * math.pi
#theta0 = (((self.angle+90)%180.) - 90.)/180.*math.pi
dtheta = theta1 - theta0
dl = dd * math.sin(dtheta)
dL = dd * math.cos(dtheta)
#x2, y2 = x2 + dl*ddy, y2 - dl*ddx
x2, y2 = x1 + dL * math.cos(theta0), y1 + dL * math.sin(theta0)
armB = armB - dl
# update
dx, dy = x2 - x1, y2 - y1
dd2 = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd2, dy / dd2
else:
dl = 0.
#if armA > armB:
# armB = armA + dl
#else:
# armA = armB - dl
arm = max(armA, armB)
f = self.fraction * dd + arm
#fB = self.fraction*dd + armB
cx1, cy1 = x1 + f * ddy, y1 - f * ddx
cx2, cy2 = x2 + f * ddy, y2 - f * ddx
vertices = [(x1, y1),
(cx1, cy1),
(cx2, cy2),
(x20, y20)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return Path(vertices, codes)
_style_list["bar"] = Bar
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableConnectorstyles": _pprint_styles(_style_list)}
def _point_along_a_line(x0, y0, x1, y1, d):
"""
find a point along a line connecting (x0, y0) -- (x1, y1) whose
distance from (x0, y0) is d.
"""
dx, dy = x0 - x1, y0 - y1
ff = d / (dx * dx + dy * dy) ** .5
x2, y2 = x0 - ff * dx, y0 - ff * dy
return x2, y2
class ArrowStyle(_Style):
"""
:class:`ArrowStyle` is a container class which defines several
arrowstyle classes, which is used to create an arrow path along a
given path. These are mainly used with :class:`FancyArrowPatch`.
A arrowstyle object can be either created as::
ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
The following classes are defined
%(AvailableArrowstyles)s
An instance of any arrow style class is an callable object,
whose call signature is::
__call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
and it returns a tuple of a :class:`Path` instance and a boolean
value. *path* is a :class:`Path` instance along witch the arrow
will be drawn. *mutation_size* and *aspect_ratio* has a same
meaning as in :class:`BoxStyle`. *linewidth* is a line width to be
stroked. This is meant to be used to correct the location of the
head so that it does not overshoot the destination point, but not all
classes support it.
.. plot:: mpl_examples/pylab_examples/fancyarrow_demo.py
"""
_style_list = {}
class _Base(object):
"""
Arrow Transmuter Base class
ArrowTransmuterBase and its derivatives are used to make a fancy
arrow around a given path. The __call__ method returns a path
(which will be used to create a PathPatch instance) and a boolean
value indicating the path is open therefore is not fillable. This
class is not an artist and actual drawing of the fancy arrow is
done by the FancyArrowPatch class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
super(ArrowStyle._Base, self).__init__()
@staticmethod
def ensure_quadratic_bezier(path):
""" Some ArrowStyle class only wokrs with a simple
quaratic bezier curve (created with Arc3Connetion or
Angle3Connector). This static method is to check if the
provided path is a simple quadratic bezier curve and returns
its control points if true.
"""
segments = list(path.iter_segments())
assert len(segments) == 2
assert segments[0][1] == Path.MOVETO
assert segments[1][1] == Path.CURVE3
return list(segments[0][0]) + list(segments[1][0])
def transmute(self, path, mutation_size, linewidth):
"""
The transmute method is a very core of the ArrowStyle
class and must be overriden in the subclasses. It receives
the path object along which the arrow will be drawn, and
the mutation_size, with which the amount arrow head and
etc. will be scaled. The linewidth may be used to adjust
the the path so that it does not pass beyond the given
points. It returns a tuple of a Path instance and a
boolean. The boolean value indicate whether the path can
be filled or not. The return value can also be a list of paths
and list of booleans of a same length.
"""
raise NotImplementedError('Derived must override')
def __call__(self, path, mutation_size, linewidth,
aspect_ratio=1.):
"""
The __call__ method is a thin wrapper around the transmute method
and take care of the aspect ratio.
"""
path = make_path_regular(path)
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
vertices, codes = path.vertices[:], path.codes[:]
# Squeeze the height
vertices[:, 1] = vertices[:, 1] / aspect_ratio
path_shrinked = Path(vertices, codes)
# call transmute method with squeezed height.
path_mutated, fillable = self.transmute(path_shrinked,
linewidth,
mutation_size)
if cbook.iterable(fillable):
path_list = []
for p in zip(path_mutated):
v, c = p.vertices, p.codes
# Restore the height
v[:, 1] = v[:, 1] * aspect_ratio
path_list.append(Path(v, c))
return path_list, fillable
else:
return path_mutated, fillable
else:
return self.transmute(path, mutation_size, linewidth)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ArrowStyle, self.__class__.__name__),
self.__dict__
)
class _Curve(_Base):
"""
A simple arrow which will work with any path instance. The
returned path is simply concatenation of the original path + at
most two paths representing the arrow head at the begin point and the
at the end point. The arrow heads can be either open or closed.
"""
def __init__(self, beginarrow=None, endarrow=None,
fillbegin=False, fillend=False,
head_length=.2, head_width=.1):
"""
The arrows are drawn if *beginarrow* and/or *endarrow* are
true. *head_length* and *head_width* determines the size
of the arrow relative to the *mutation scale*. The
arrowhead at the begin (or end) is closed if fillbegin (or
fillend) is True.
"""
self.beginarrow, self.endarrow = beginarrow, endarrow
self.head_length, self.head_width = \
head_length, head_width
self.fillbegin, self.fillend = fillbegin, fillend
super(ArrowStyle._Curve, self).__init__()
def _get_arrow_wedge(self, x0, y0, x1, y1,
head_dist, cos_t, sin_t, linewidth
):
"""
Return the paths for arrow heads. Since arrow lines are
drawn with capstyle=projected, The arrow goes beyond the
desired point. This method also returns the amount of the path
to be shrinked so that it does not overshoot.
"""
# arrow from x0, y0 to x1, y1
dx, dy = x0 - x1, y0 - y1
cp_distance = math.sqrt(dx ** 2 + dy ** 2)
# pad_projected : amount of pad to account the
# overshooting of the projection of the wedge
pad_projected = (.5 * linewidth / sin_t)
# apply pad for projected edge
ddx = pad_projected * dx / cp_distance
ddy = pad_projected * dy / cp_distance
# offset for arrow wedge
dx = dx / cp_distance * head_dist
dy = dy / cp_distance * head_dist
dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1),
(x1 + ddx, y1 + ddy),
(x1 + ddx + dx2, y1 + ddy + dy2)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow, ddx, ddy
def transmute(self, path, mutation_size, linewidth):
head_length, head_width = self.head_length * mutation_size, \
self.head_width * mutation_size
head_dist = math.sqrt(head_length ** 2 + head_width ** 2)
cos_t, sin_t = head_length / head_dist, head_width / head_dist
# begin arrow
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
if self.beginarrow:
verticesA, codesA, ddxA, ddyA = \
self._get_arrow_wedge(x1, y1, x0, y0,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesA, codesA = [], []
ddxA, ddyA = 0., 0.
# end arrow
x2, y2 = path.vertices[-2]
x3, y3 = path.vertices[-1]
if self.endarrow:
verticesB, codesB, ddxB, ddyB = \
self._get_arrow_wedge(x2, y2, x3, y3,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesB, codesB = [], []
ddxB, ddyB = 0., 0.
# this simple code will not work if ddx, ddy is greater than
# separation bettern vertices.
_path = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)],
path.vertices[1:-1],
[(x3 + ddxB, y3 + ddyB)]]),
path.codes)]
_fillable = [False]
if self.beginarrow:
if self.fillbegin:
p = np.concatenate([verticesA, [verticesA[0],
verticesA[0]], ])
c = np.concatenate([codesA, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
_fillable.append(True)
else:
_path.append(Path(verticesA, codesA))
_fillable.append(False)
if self.endarrow:
if self.fillend:
_fillable.append(True)
p = np.concatenate([verticesB, [verticesB[0],
verticesB[0]], ])
c = np.concatenate([codesB, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
else:
_fillable.append(False)
_path.append(Path(verticesB, codesB))
return _path, _fillable
class Curve(_Curve):
"""
A simple curve without any arrow head.
"""
def __init__(self):
super(ArrowStyle.Curve, self).__init__(
beginarrow=False, endarrow=False)
_style_list["-"] = Curve
class CurveA(_Curve):
"""
An arrow with a head at its begin point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveA, self).__init__(
beginarrow=True, endarrow=False,
head_length=head_length, head_width=head_width)
_style_list["<-"] = CurveA
class CurveB(_Curve):
"""
An arrow with a head at its end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveB, self).__init__(
beginarrow=False, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["->"] = CurveB
class CurveAB(_Curve):
"""
An arrow with heads both at the begin and the end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveAB, self).__init__(
beginarrow=True, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["<->"] = CurveAB
class CurveFilledA(_Curve):
"""
An arrow with filled triangle head at the begin.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledA, self).__init__(
beginarrow=True, endarrow=False,
fillbegin=True, fillend=False,
head_length=head_length, head_width=head_width)
_style_list["<|-"] = CurveFilledA
class CurveFilledB(_Curve):
"""
An arrow with filled triangle head at the end.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledB, self).__init__(
beginarrow=False, endarrow=True,
fillbegin=False, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["-|>"] = CurveFilledB
class CurveFilledAB(_Curve):
"""
An arrow with filled triangle heads both at the begin and the end
point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledAB, self).__init__(
beginarrow=True, endarrow=True,
fillbegin=True, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["<|-|>"] = CurveFilledAB
class _Bracket(_Base):
def __init__(self, bracketA=None, bracketB=None,
widthA=1., widthB=1.,
lengthA=0.2, lengthB=0.2,
angleA=None, angleB=None,
scaleA=None, scaleB=None
):
self.bracketA, self.bracketB = bracketA, bracketB
self.widthA, self.widthB = widthA, widthB
self.lengthA, self.lengthB = lengthA, lengthB
self.angleA, self.angleB = angleA, angleB
self.scaleA, self.scaleB = scaleA, scaleB
def _get_bracket(self, x0, y0,
cos_t, sin_t, width, length,
):
# arrow from x0, y0 to x1, y1
from matplotlib.bezier import get_normal_points
x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
dx, dy = length * cos_t, length * sin_t
vertices_arrow = [(x1 + dx, y1 + dy),
(x1, y1),
(x2, y2),
(x2 + dx, y2 + dy)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow
def transmute(self, path, mutation_size, linewidth):
if self.scaleA is None:
scaleA = mutation_size
else:
scaleA = self.scaleA
if self.scaleB is None:
scaleB = mutation_size
else:
scaleB = self.scaleB
vertices_list, codes_list = [], []
if self.bracketA:
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesA, codesA = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthA * scaleA,
self.lengthA * scaleA)
vertices_list.append(verticesA)
codes_list.append(codesA)
vertices_list.append(path.vertices)
codes_list.append(path.codes)
if self.bracketB:
x0, y0 = path.vertices[-1]
x1, y1 = path.vertices[-2]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesB, codesB = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthB * scaleB,
self.lengthB * scaleB)
vertices_list.append(verticesB)
codes_list.append(codesB)
vertices = np.concatenate(vertices_list)
codes = np.concatenate(codes_list)
p = Path(vertices, codes)
return p, False
class BracketAB(_Bracket):
"""
An arrow with a bracket(]) at both ends.
"""
def __init__(self,
widthA=1., lengthA=0.2, angleA=None,
widthB=1., lengthB=0.2, angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketAB, self).__init__(
True, True, widthA=widthA, lengthA=lengthA,
angleA=angleA, widthB=widthB, lengthB=lengthB,
angleB=angleB)
_style_list["]-["] = BracketAB
class BracketA(_Bracket):
"""
An arrow with a bracket(]) at its end.
"""
def __init__(self, widthA=1., lengthA=0.2, angleA=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
"""
super(ArrowStyle.BracketA, self).__init__(True, None,
widthA=widthA, lengthA=lengthA, angleA=angleA)
_style_list["]-"] = BracketA
class BracketB(_Bracket):
"""
An arrow with a bracket([) at its end.
"""
def __init__(self, widthB=1., lengthB=0.2, angleB=None):
"""
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketB, self).__init__(None, True,
widthB=widthB, lengthB=lengthB, angleB=angleB)
_style_list["-["] = BracketB
class BarAB(_Bracket):
"""
An arrow with a bar(|) at both ends.
"""
def __init__(self,
widthA=1., angleA=None,
widthB=1., angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BarAB, self).__init__(
True, True, widthA=widthA, lengthA=0, angleA=angleA,
widthB=widthB, lengthB=0, angleB=angleB)
_style_list["|-|"] = BarAB
class Simple(_Base):
"""
A simple arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Simple, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
in_f = inside_circle(x2, y2, head_length)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
try:
arrow_out, arrow_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_in = [(x0, y0), (x1n, y1n), (x2, y2)]
arrow_out = None
# head
head_width = self.head_width * mutation_size
head_left, head_right = \
make_wedged_bezier2(arrow_in, head_width / 2.,
wm=.5)
# tail
if arrow_out is not None:
tail_width = self.tail_width * mutation_size
tail_left, tail_right = get_parallels(arrow_out,
tail_width / 2.)
#head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
else:
patch_path = [(Path.MOVETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.CLOSEPOLY, head_left[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["simple"] = Simple
class Fancy(_Base):
"""
A fancy arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Fancy, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
# path for head
in_f = inside_circle(x2, y2, head_length)
try:
path_out, path_in = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_path = [(x0, y0), (x1n, y1n), (x2, y2)]
path_head = arrow_path
else:
path_head = path_in
# path for head
in_f = inside_circle(x2, y2, head_length * .8)
path_out, path_in = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
path_tail = path_out
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(path_head,
head_width / 2.,
wm=.6)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = make_wedged_bezier2(path_tail,
tail_width * .5,
w1=1., wm=0.6, w2=0.3)
# path for head
in_f = inside_circle(x0, y0, tail_width * .3)
path_in, path_out = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
tail_start = path_in[-1]
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_start),
(Path.LINETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_start),
(Path.CLOSEPOLY, tail_start),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["fancy"] = Fancy
class Wedge(_Base):
"""
Wedge(?) shape. Only wokrs with a quadratic bezier curve. The
begin point has a width of the tail_width and the end point has a
width of 0. At the middle, the width is shrink_factor*tail_width.
"""
def __init__(self, tail_width=.3, shrink_factor=0.5):
"""
*tail_width*
width of the tail
*shrink_factor*
fraction of the arrow width at the middle point
"""
self.tail_width = tail_width
self.shrink_factor = shrink_factor
super(ArrowStyle.Wedge, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
b_plus, b_minus = make_wedged_bezier2(
arrow_path,
self.tail_width * mutation_size / 2.,
wm=self.shrink_factor)
patch_path = [(Path.MOVETO, b_plus[0]),
(Path.CURVE3, b_plus[1]),
(Path.CURVE3, b_plus[2]),
(Path.LINETO, b_minus[2]),
(Path.CURVE3, b_minus[1]),
(Path.CURVE3, b_minus[0]),
(Path.CLOSEPOLY, b_minus[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["wedge"] = Wedge
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableArrowstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableArrowstyles=_pprint_styles(ArrowStyle._style_list),
AvailableConnectorstyles=_pprint_styles(ConnectionStyle._style_list),
)
class FancyArrowPatch(Patch):
"""
A fancy arrow patch. It draws an arrow using the :class:ArrowStyle.
"""
def __str__(self):
if self._posA_posB is not None:
(x1, y1), (x2, y2) = self._posA_posB
return self.__class__.__name__ \
+ "(%g,%g->%g,%g)" % (x1, y1, x2, y2)
else:
return self.__class__.__name__ \
+ "(%s)" % (str(self._path_original),)
@docstring.dedent_interpd
def __init__(self, posA=None, posB=None,
path=None,
arrowstyle="simple",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=2.,
shrinkB=2.,
mutation_scale=1.,
mutation_aspect=None,
dpi_cor=1.,
**kwargs):
"""
If *posA* and *posB* is given, a path connecting two point are
created according to the connectionstyle. The path will be
clipped with *patchA* and *patchB* and further shirnked by
*shrinkA* and *shrinkB*. An arrow is drawn along this
resulting path using the *arrowstyle* parameter. If *path*
provided, an arrow is drawn along this path and *patchA*,
*patchB*, *shrinkA*, and *shrinkB* are ignored.
The *connectionstyle* describes how *posA* and *posB* are
connected. It can be an instance of the ConnectionStyle class
(matplotlib.patches.ConnectionStlye) or a string of the
connectionstyle name, with optional comma-separated
attributes. The following connection styles are available.
%(AvailableConnectorstyles)s
The *arrowstyle* describes how the fancy arrow will be
drawn. It can be string of the available arrowstyle names,
with optional comma-separated attributes, or one of the
ArrowStyle instance. The optional attributes are meant to be
scaled with the *mutation_scale*. The following arrow styles are
available.
%(AvailableArrowstyles)s
*mutation_scale* : a value with which attributes of arrowstyle
(e.g., head_length) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
if posA is not None and posB is not None and path is None:
self._posA_posB = [posA, posB]
if connectionstyle is None:
connectionstyle = "arc3"
self.set_connectionstyle(connectionstyle)
elif posA is None and posB is None and path is not None:
self._posA_posB = None
self._connetors = None
else:
raise ValueError("either posA and posB, or path need to provided")
self.patchA = patchA
self.patchB = patchB
self.shrinkA = shrinkA
self.shrinkB = shrinkB
Patch.__init__(self, **kwargs)
self._path_original = path
self.set_arrowstyle(arrowstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.set_dpi_cor(dpi_cor)
#self._draw_in_display_coordinate = True
def set_dpi_cor(self, dpi_cor):
"""
dpi_cor is currently used for linewidth-related things and
shink factor. Mutation scale is not affected by this.
"""
self._dpi_cor = dpi_cor
def get_dpi_cor(self):
"""
dpi_cor is currently used for linewidth-related things and
shink factor. Mutation scale is not affected by this.
"""
return self._dpi_cor
def set_positions(self, posA, posB):
""" set the begin end end positions of the connecting
path. Use current vlaue if None.
"""
if posA is not None:
self._posA_posB[0] = posA
if posB is not None:
self._posA_posB[1] = posB
def set_patchA(self, patchA):
""" set the begin patch.
"""
self.patchA = patchA
def set_patchB(self, patchB):
""" set the begin patch
"""
self.patchB = patchB
def set_connectionstyle(self, connectionstyle, **kw):
"""
Set the connection style.
*connectionstyle* can be a string with connectionstyle name with
optional comma-separated attributes. Alternatively, the attrs can be
probided as keywords.
set_connectionstyle("arc,angleA=0,armA=30,rad=10")
set_connectionstyle("arc", angleA=0,armA=30,rad=10)
Old attrs simply are forgotten.
Without argument (or with connectionstyle=None), return
available styles as a list of strings.
"""
if connectionstyle is None:
return ConnectionStyle.pprint_styles()
if isinstance(connectionstyle, ConnectionStyle._Base):
self._connector = connectionstyle
elif six.callable(connectionstyle):
# we may need check the calling convention of the given function
self._connector = connectionstyle
else:
self._connector = ConnectionStyle(connectionstyle, **kw)
def get_connectionstyle(self):
"""
Return the ConnectionStyle instance
"""
return self._connector
def set_arrowstyle(self, arrowstyle=None, **kw):
"""
Set the arrow style.
*arrowstyle* can be a string with arrowstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords.
set_arrowstyle("Fancy,head_length=0.2")
set_arrowstyle("fancy", head_length=0.2)
Old attrs simply are forgotten.
Without argument (or with arrowstyle=None), return
available box styles as a list of strings.
"""
if arrowstyle is None:
return ArrowStyle.pprint_styles()
if isinstance(arrowstyle, ArrowStyle._Base):
self._arrow_transmuter = arrowstyle
else:
self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)
def get_arrowstyle(self):
"""
Return the arrowstyle object
"""
return self._arrow_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_path(self):
"""
return the path of the arrow in the data coordinate. Use
get_path_in_displaycoord() method to retrieve the arrow path
in the display coord.
"""
_path, fillable = self.get_path_in_displaycoord()
if cbook.iterable(fillable):
_path = concatenate_paths(_path)
return self.get_transform().inverted().transform_path(_path)
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
if self._posA_posB is not None:
posA = self.get_transform().transform_point(self._posA_posB[0])
posB = self.get_transform().transform_point(self._posA_posB[1])
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
else:
_path = self.get_transform().transform_path(self._path_original)
_path, fillable = self.get_arrowstyle()(_path,
self.get_mutation_scale(),
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
#if not fillable:
# self._fill = False
return _path, fillable
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_linestyle(self._linestyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_capstyle('round')
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
# FIXME : dpi_cor is for the dpi-dependecy of the
# linewidth. There could be room for improvement.
#
#dpi_cor = renderer.points_to_pixels(1.)
self.set_dpi_cor(renderer.points_to_pixels(1.))
path, fillable = self.get_path_in_displaycoord()
if not cbook.iterable(fillable):
path = [path]
fillable = [fillable]
affine = transforms.IdentityTransform()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
for p, f in zip(path, fillable):
if f:
renderer.draw_path(gc, p, affine, rgbFace)
else:
renderer.draw_path(gc, p, affine, None)
gc.restore()
renderer.close_group('patch')
class ConnectionPatch(FancyArrowPatch):
"""
A :class:`~matplotlib.patches.ConnectionPatch` class is to make
connecting lines between two points (possibly in different axes).
"""
def __str__(self):
return "ConnectionPatch((%g,%g),(%g,%g))" % \
(self.xy1[0], self.xy1[1], self.xy2[0], self.xy2[1])
@docstring.dedent_interpd
def __init__(self, xyA, xyB, coordsA, coordsB=None,
axesA=None, axesB=None,
arrowstyle="-",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=10.,
mutation_aspect=None,
clip_on=False,
dpi_cor=1.,
**kwargs):
"""
Connect point *xyA* in *coordsA* with point *xyB* in *coordsB*
Valid keys are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*coordsA* and *coordsB* are strings that indicate the
coordinates of *xyA* and *xyB*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
"""
if coordsB is None:
coordsB = coordsA
# we'll draw ourself after the artist we annotate by default
self.xy1 = xyA
self.xy2 = xyB
self.coords1 = coordsA
self.coords2 = coordsB
self.axesA = axesA
self.axesB = axesB
FancyArrowPatch.__init__(self,
posA=(0, 0), posB=(1, 1),
arrowstyle=arrowstyle,
arrow_transmuter=arrow_transmuter,
connectionstyle=connectionstyle,
connector=connector,
patchA=patchA,
patchB=patchB,
shrinkA=shrinkA,
shrinkB=shrinkB,
mutation_scale=mutation_scale,
mutation_aspect=mutation_aspect,
clip_on=clip_on,
dpi_cor=dpi_cor,
**kwargs)
# if True, draw annotation only if self.xy is inside the axes
self._annotation_clip = None
def _get_xy(self, x, y, s, axes=None):
"""
caculate the pixel position of given point
"""
if axes is None:
axes = self.axes
if s == 'data':
trans = axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s == 'offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi / 72.
y *= dpi / 72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s == 'polar':
theta, r = x, y
x = r * np.cos(theta)
y = r * np.sin(theta)
trans = axes.transData
return trans.transform_point((x, y))
elif s == 'figure points':
# points from the lower left corner of the figure
dpi = self.figure.dpi
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
x *= dpi / 72.
y *= dpi / 72.
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure pixels':
# pixels from the lower left corner of the figure
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure fraction':
# (0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x, y))
elif s == 'axes points':
# points from the lower left corner of the axes
dpi = self.figure.dpi
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x * dpi / 72.
else:
x = l + x * dpi / 72.
if y < 0:
y = t + y * dpi / 72.
else:
y = b + y * dpi / 72.
return x, y
elif s == 'axes pixels':
#pixels from the lower left corner of the axes
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
else:
x = l + x
if y < 0:
y = t + y
else:
y = b + y
return x, y
elif s == 'axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = axes.transAxes
return trans.transform_point((x, y))
def set_annotation_clip(self, b):
"""
set *annotation_clip* attribute.
* True: the annotation will only be drawn when self.xy is inside the
axes.
* False: the annotation will always be drawn regardless of its
position.
* None: the self.xy will be checked only if *xycoords* is "data"
"""
self._annotation_clip = b
def get_annotation_clip(self):
"""
Return *annotation_clip* attribute.
See :meth:`set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
x, y = self.xy1
posA = self._get_xy(x, y, self.coords1, self.axesA)
x, y = self.xy2
posB = self._get_xy(x, y, self.coords2, self.axesB)
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
_path, fillable = self.get_arrowstyle()(_path,
self.get_mutation_scale(),
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
return _path, fillable
def _check_xy(self, renderer):
"""
check if the annotation need to
be drawn.
"""
b = self.get_annotation_clip()
if b or (b is None and self.coords1 == "data"):
x, y = self.xy1
xy_pixel = self._get_xy(x, y, self.coords1, self.axesA)
if not self.axes.contains_point(xy_pixel):
return False
if b or (b is None and self.coords2 == "data"):
x, y = self.xy2
xy_pixel = self._get_xy(x, y, self.coords2, self.axesB)
if self.axesB is None:
axes = self.axes
else:
axes = self.axesB
if not axes.contains_point(xy_pixel):
return False
return True
def draw(self, renderer):
"""
Draw.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if not self._check_xy(renderer):
return
FancyArrowPatch.draw(self, renderer)
| gpl-2.0 |
INMEGEN/MCAC | heatmap_genes_samples.py | 2 | 2795 | import csv
import numpy as np
import matplotlib
matplotlib.use('svg')
import matplotlib.pyplot as plt
from pprint import pprint
genes = ['ABCC8', 'ABCC9', 'ACTC1', 'AKAP9', 'ANK2', 'BAG3', 'CACNA1C', 'CACNA2D1', 'CACNB2', 'CALR3', 'CAMK2A', 'CASQ2',
'CAV3', 'CTD-3222D19.2', 'DES', 'DLG1', 'DPP6', 'DSC3', 'DSG2', 'DTNA', 'GLA', 'HCN4', 'JUP', 'KCNA5', 'KCNH2',
'KCNQ1', 'LDB3', 'LMNA', 'MADD', 'MYBPC3', 'MYH6', 'MYH7', 'MYLK2', 'MYOZ2', 'MYPN', 'NEXN', 'NOS1AP', 'PKP2',
'PRKAG2', 'RANGRF', 'RP11-729I10.2', 'RYR2', 'SCN10A', 'SCN1B', 'SCN5A', 'SLMAP', 'SSUH2', 'TAZ', 'TCAP', 'TNNI3',
'TPM1', 'TRDN', 'TRPM4', 'TTN', 'TTN-AS1', 'VCL', 'WWTR1',]
bwa_mask = "S%02d.bwa_ir_fb.norepeats.intersected.vep.filtered.csv"
directory = "/export/home/rgarcia/MCAC/vcf/intersections/filtered/"
score_threshold = 5
varcount = {}
for sample in range(1, 49):
archivo = directory + bwa_mask % sample
varcount[sample] = {}
with open(archivo, 'rb') as csvfile:
vepreader = csv.reader(csvfile, delimiter='\t')
header = vepreader.next()
for row in vepreader:
if int(row[0]) >= score_threshold:
for gene in genes:
if row[13] == gene:
if gene in varcount[sample]:
varcount[sample][gene].add((row[5], row[15]))
else:
varcount[sample][gene] = set([(row[5], row[15]),])
rows = []
for sample in range(1, 49):
cols = []
for gene in genes:
if gene in varcount[sample]:
heat = len(varcount[sample][gene])
else:
heat = 0
cols.append(heat)
rows.append(cols)
with open('var_count_per_sample_score_gt5.csv', 'wb') as csvfile:
varwriter = csv.writer(csvfile, delimiter=',')
varwriter.writerow(['S'] + genes )
for row in range(len(rows)):
varwriter.writerow([row+1] + rows[row] )
# rows = np.array( rows )
# column_labels = range(1,49)
# row_labels = genes
# data = np.array( rows )
# fig, ax = plt.subplots()
# heatmap = ax.pcolor(data, cmap='summer', picker=True)
# # Format
# fig = plt.gcf()
# fig.set_size_inches(16, 13)
# # put the major ticks at the middle of each cell
# ax.set_xticks(np.arange(data.shape[0])+0.5, minor=False)
# ax.set_yticks(np.arange(data.shape[1])+0.5, minor=False)
# # want a more natural, table-like display
# ax.invert_yaxis()
# ax.xaxis.tick_top()
# ax.set_xticklabels(row_labels, minor=False, fontsize=8)
# ax.set_yticklabels(column_labels, minor=False, fontsize=8)
# # plt.show()
# plt.xticks(rotation=90)
# plt.colorbar(heatmap, orientation="vertical")
# plt.savefig('heatmap.png')
| gpl-3.0 |
pravsripad/mne-python | mne/source_estimate.py | 3 | 127672 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hämäläinen <[email protected]>
# Martin Luessi <[email protected]>
# Mads Jensen <[email protected]>
#
# License: BSD (3-clause)
import contextlib
import copy
import os.path as op
from types import GeneratorType
import numpy as np
from .baseline import rescale
from .cov import Covariance
from .evoked import _get_peak
from .filter import resample
from .io.constants import FIFF
from .surface import (read_surface, _get_ico_surface, mesh_edges,
_project_onto_surface)
from .source_space import (_ensure_src, _get_morph_src_reordering,
_ensure_src_subject, SourceSpaces, _get_src_nn,
_import_nibabel, _get_mri_info_data,
_get_atlas_values, _check_volume_labels,
read_freesurfer_lut)
from .transforms import _get_trans, apply_trans
from .utils import (get_subjects_dir, _check_subject, logger, verbose, _pl,
_time_mask, warn, copy_function_doc_to_method_doc,
fill_doc, _check_option, _validate_type, _check_src_normal,
_check_stc_units, _check_pandas_installed,
_check_pandas_index_arguments, _convert_times, _ensure_int,
_build_data_frame, _check_time_format, _check_path_like,
sizeof_fmt, object_size)
from .viz import (plot_source_estimates, plot_vector_source_estimates,
plot_volume_source_estimates)
from .io.base import TimeMixin
from .io.meas_info import Info
from .externals.h5io import read_hdf5, write_hdf5
def _read_stc(filename):
"""Aux Function."""
with open(filename, 'rb') as fid:
buf = fid.read()
stc = dict()
offset = 0
num_bytes = 4
# read tmin in ms
stc['tmin'] = float(np.frombuffer(buf, dtype=">f4", count=1,
offset=offset))
stc['tmin'] /= 1000.0
offset += num_bytes
# read sampling rate in ms
stc['tstep'] = float(np.frombuffer(buf, dtype=">f4", count=1,
offset=offset))
stc['tstep'] /= 1000.0
offset += num_bytes
# read number of vertices/sources
vertices_n = int(np.frombuffer(buf, dtype=">u4", count=1, offset=offset))
offset += num_bytes
# read the source vector
stc['vertices'] = np.frombuffer(buf, dtype=">u4", count=vertices_n,
offset=offset)
offset += num_bytes * vertices_n
# read the number of timepts
data_n = int(np.frombuffer(buf, dtype=">u4", count=1, offset=offset))
offset += num_bytes
if (vertices_n and # vertices_n can be 0 (empty stc)
((len(buf) // 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):
raise ValueError('incorrect stc file size')
# read the data matrix
stc['data'] = np.frombuffer(buf, dtype=">f4", count=vertices_n * data_n,
offset=offset)
stc['data'] = stc['data'].reshape([data_n, vertices_n]).T
return stc
def _write_stc(filename, tmin, tstep, vertices, data):
"""Write an STC file.
Parameters
----------
filename : string
The name of the STC file.
tmin : float
The first time point of the data in seconds.
tstep : float
Time between frames in seconds.
vertices : array of integers
Vertex indices (0 based).
data : 2D array
The data matrix (nvert * ntime).
"""
fid = open(filename, 'wb')
# write start time in ms
fid.write(np.array(1000 * tmin, dtype='>f4').tobytes())
# write sampling rate in ms
fid.write(np.array(1000 * tstep, dtype='>f4').tobytes())
# write number of vertices
fid.write(np.array(vertices.shape[0], dtype='>u4').tobytes())
# write the vertex indices
fid.write(np.array(vertices, dtype='>u4').tobytes())
# write the number of timepts
fid.write(np.array(data.shape[1], dtype='>u4').tobytes())
#
# write the data
#
fid.write(np.array(data.T, dtype='>f4').tobytes())
# close the file
fid.close()
def _read_3(fid):
"""Read 3 byte integer from file."""
data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)
out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]
return out
def _read_w(filename):
"""Read a w file.
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename : string
The name of the w file.
Returns
-------
data: dict
The w structure. It has the following keys:
vertices vertex indices (0 based)
data The data matrix (nvert long)
"""
with open(filename, 'rb', buffering=0) as fid: # buffering=0 for np bug
# skip first 2 bytes
fid.read(2)
# read number of vertices/sources (3 byte integer)
vertices_n = int(_read_3(fid))
vertices = np.zeros((vertices_n), dtype=np.int32)
data = np.zeros((vertices_n), dtype=np.float32)
# read the vertices and data
for i in range(vertices_n):
vertices[i] = _read_3(fid)
data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]
w = dict()
w['vertices'] = vertices
w['data'] = data
return w
def _write_3(fid, val):
"""Write 3 byte integer to file."""
f_bytes = np.zeros((3), dtype=np.uint8)
f_bytes[0] = (val >> 16) & 255
f_bytes[1] = (val >> 8) & 255
f_bytes[2] = val & 255
fid.write(f_bytes.tobytes())
def _write_w(filename, vertices, data):
"""Write a w file.
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename: string
The name of the w file.
vertices: array of int
Vertex indices (0 based).
data: 1D array
The data array (nvert).
"""
assert (len(vertices) == len(data))
fid = open(filename, 'wb')
# write 2 zero bytes
fid.write(np.zeros((2), dtype=np.uint8).tobytes())
# write number of vertices/sources (3 byte integer)
vertices_n = len(vertices)
_write_3(fid, vertices_n)
# write the vertices and data
for i in range(vertices_n):
_write_3(fid, vertices[i])
# XXX: without float() endianness is wrong, not sure why
fid.write(np.array(float(data[i]), dtype='>f4').tobytes())
# close the file
fid.close()
def read_source_estimate(fname, subject=None):
"""Read a source estimate object.
Parameters
----------
fname : str
Path to (a) source-estimate file(s).
subject : str | None
Name of the subject the source estimate(s) is (are) from.
It is good practice to set this attribute to avoid combining
incompatible labels and SourceEstimates (e.g., ones from other
subjects). Note that due to file specification limitations, the
subject name isn't saved to or loaded from files written to disk.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate | VolSourceEstimate | MixedSourceEstimate
The source estimate object loaded from file.
Notes
-----
- for volume source estimates, ``fname`` should provide the path to a
single file named '*-vl.stc` or '*-vol.stc'
- for surface source estimates, ``fname`` should either provide the
path to the file corresponding to a single hemisphere ('*-lh.stc',
'*-rh.stc') or only specify the asterisk part in these patterns. In any
case, the function expects files for both hemisphere with names
following this pattern.
- for vector surface source estimates, only HDF5 files are supported.
- for mixed source estimates, only HDF5 files are supported.
- for single time point .w files, ``fname`` should follow the same
pattern as for surface estimates, except that files are named
'*-lh.w' and '*-rh.w'.
""" # noqa: E501
fname_arg = fname
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
# make sure corresponding file(s) can be found
ftype = None
if op.exists(fname):
if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \
fname.endswith('-vl.w') or fname.endswith('-vol.w'):
ftype = 'volume'
elif fname.endswith('.stc'):
ftype = 'surface'
if fname.endswith(('-lh.stc', '-rh.stc')):
fname = fname[:-7]
else:
err = ("Invalid .stc filename: %r; needs to end with "
"hemisphere tag ('...-lh.stc' or '...-rh.stc')"
% fname)
raise IOError(err)
elif fname.endswith('.w'):
ftype = 'w'
if fname.endswith(('-lh.w', '-rh.w')):
fname = fname[:-5]
else:
err = ("Invalid .w filename: %r; needs to end with "
"hemisphere tag ('...-lh.w' or '...-rh.w')"
% fname)
raise IOError(err)
elif fname.endswith('.h5'):
ftype = 'h5'
fname = fname[:-3]
else:
raise RuntimeError('Unknown extension for file %s' % fname_arg)
if ftype != 'volume':
stc_exist = [op.exists(f)
for f in [fname + '-rh.stc', fname + '-lh.stc']]
w_exist = [op.exists(f)
for f in [fname + '-rh.w', fname + '-lh.w']]
if all(stc_exist) and ftype != 'w':
ftype = 'surface'
elif all(w_exist):
ftype = 'w'
elif op.exists(fname + '.h5'):
ftype = 'h5'
elif op.exists(fname + '-stc.h5'):
ftype = 'h5'
fname += '-stc'
elif any(stc_exist) or any(w_exist):
raise IOError("Hemisphere missing for %r" % fname_arg)
else:
raise IOError("SourceEstimate File(s) not found for: %r"
% fname_arg)
# read the files
if ftype == 'volume': # volume source space
if fname.endswith('.stc'):
kwargs = _read_stc(fname)
elif fname.endswith('.w'):
kwargs = _read_w(fname)
kwargs['data'] = kwargs['data'][:, np.newaxis]
kwargs['tmin'] = 0.0
kwargs['tstep'] = 0.0
else:
raise IOError('Volume source estimate must end with .stc or .w')
kwargs['vertices'] = [kwargs['vertices']]
elif ftype == 'surface': # stc file with surface source spaces
lh = _read_stc(fname + '-lh.stc')
rh = _read_stc(fname + '-rh.stc')
assert lh['tmin'] == rh['tmin']
assert lh['tstep'] == rh['tstep']
kwargs = lh.copy()
kwargs['data'] = np.r_[lh['data'], rh['data']]
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
elif ftype == 'w': # w file with surface source spaces
lh = _read_w(fname + '-lh.w')
rh = _read_w(fname + '-rh.w')
kwargs = lh.copy()
kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
# w files only have a single time point
kwargs['tmin'] = 0.0
kwargs['tstep'] = 1.0
ftype = 'surface'
elif ftype == 'h5':
kwargs = read_hdf5(fname + '.h5', title='mnepython')
ftype = kwargs.pop('src_type', 'surface')
if isinstance(kwargs['vertices'], np.ndarray):
kwargs['vertices'] = [kwargs['vertices']]
if ftype != 'volume':
# Make sure the vertices are ordered
vertices = kwargs['vertices']
if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
sidx = [np.argsort(verts) for verts in vertices]
vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]
data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]
kwargs['vertices'] = vertices
kwargs['data'] = data
if 'subject' not in kwargs:
kwargs['subject'] = subject
if subject is not None and subject != kwargs['subject']:
raise RuntimeError('provided subject name "%s" does not match '
'subject name from the file "%s'
% (subject, kwargs['subject']))
if ftype in ('volume', 'discrete'):
klass = VolVectorSourceEstimate
elif ftype == 'mixed':
klass = MixedVectorSourceEstimate
else:
assert ftype == 'surface'
klass = VectorSourceEstimate
if kwargs['data'].ndim < 3:
klass = klass._scalar_class
return klass(**kwargs)
def _get_src_type(src, vertices, warn_text=None):
src_type = None
if src is None:
if warn_text is None:
warn("src should not be None for a robust guess of stc type.")
else:
warn(warn_text)
if isinstance(vertices, list) and len(vertices) == 2:
src_type = 'surface'
elif isinstance(vertices, np.ndarray) or isinstance(vertices, list) \
and len(vertices) == 1:
src_type = 'volume'
elif isinstance(vertices, list) and len(vertices) > 2:
src_type = 'mixed'
else:
src_type = src.kind
assert src_type in ('surface', 'volume', 'mixed', 'discrete')
return src_type
def _make_stc(data, vertices, src_type=None, tmin=None, tstep=None,
subject=None, vector=False, source_nn=None, warn_text=None):
"""Generate a surface, vector-surface, volume or mixed source estimate."""
def guess_src_type():
return _get_src_type(src=None, vertices=vertices, warn_text=warn_text)
src_type = guess_src_type() if src_type is None else src_type
if vector and src_type == 'surface' and source_nn is None:
raise RuntimeError('No source vectors supplied.')
# infer Klass from src_type
if src_type == 'surface':
Klass = VectorSourceEstimate if vector else SourceEstimate
elif src_type in ('volume', 'discrete'):
Klass = VolVectorSourceEstimate if vector else VolSourceEstimate
elif src_type == 'mixed':
Klass = MixedVectorSourceEstimate if vector else MixedSourceEstimate
else:
raise ValueError('vertices has to be either a list with one or more '
'arrays or an array')
# Rotate back for vector source estimates
if vector:
n_vertices = sum(len(v) for v in vertices)
assert data.shape[0] in (n_vertices, n_vertices * 3)
if len(data) == n_vertices:
assert src_type == 'surface' # should only be possible for this
assert source_nn.shape == (n_vertices, 3)
data = data[:, np.newaxis] * source_nn[:, :, np.newaxis]
else:
data = data.reshape((-1, 3, data.shape[-1]))
assert source_nn.shape in ((n_vertices, 3, 3),
(n_vertices * 3, 3))
# This will be an identity transform for volumes, but let's keep
# the code simple and general and just do the matrix mult
data = np.matmul(
np.transpose(source_nn.reshape(n_vertices, 3, 3),
axes=[0, 2, 1]), data)
return Klass(
data=data, vertices=vertices, tmin=tmin, tstep=tstep, subject=subject
)
def _verify_source_estimate_compat(a, b):
"""Make sure two SourceEstimates are compatible for arith. operations."""
compat = False
if type(a) != type(b):
raise ValueError('Cannot combine %s and %s.' % (type(a), type(b)))
if len(a.vertices) == len(b.vertices):
if all(np.array_equal(av, vv)
for av, vv in zip(a.vertices, b.vertices)):
compat = True
if not compat:
raise ValueError('Cannot combine source estimates that do not have '
'the same vertices. Consider using stc.expand().')
if a.subject != b.subject:
raise ValueError('source estimates do not have the same subject '
'names, %r and %r' % (a.subject, b.subject))
class _BaseSourceEstimate(TimeMixin):
_data_ndim = 2
@verbose
def __init__(self, data, vertices, tmin, tstep,
subject=None, verbose=None): # noqa: D102
assert hasattr(self, '_data_ndim'), self.__class__.__name__
assert hasattr(self, '_src_type'), self.__class__.__name__
assert hasattr(self, '_src_count'), self.__class__.__name__
kernel, sens_data = None, None
if isinstance(data, tuple):
if len(data) != 2:
raise ValueError('If data is a tuple it has to be length 2')
kernel, sens_data = data
data = None
if kernel.shape[1] != sens_data.shape[0]:
raise ValueError('kernel (%s) and sens_data (%s) have invalid '
'dimensions'
% (kernel.shape, sens_data.shape))
if sens_data.ndim != 2:
raise ValueError('The sensor data must have 2 dimensions, got '
'%s' % (sens_data.ndim,))
_validate_type(vertices, list, 'vertices')
if self._src_count is not None:
if len(vertices) != self._src_count:
raise ValueError('vertices must be a list with %d entries, '
'got %s' % (self._src_count, len(vertices)))
vertices = [np.array(v, np.int64) for v in vertices] # makes copy
if any(np.any(np.diff(v) <= 0) for v in vertices):
raise ValueError('Vertices must be ordered in increasing order.')
n_src = sum([len(v) for v in vertices])
# safeguard the user against doing something silly
if data is not None:
if data.ndim not in (self._data_ndim, self._data_ndim - 1):
raise ValueError('Data (shape %s) must have %s dimensions for '
'%s' % (data.shape, self._data_ndim,
self.__class__.__name__))
if data.shape[0] != n_src:
raise ValueError(
f'Number of vertices ({n_src}) and stc.data.shape[0] '
f'({data.shape[0]}) must match')
if self._data_ndim == 3:
if data.shape[1] != 3:
raise ValueError(
'Data for VectorSourceEstimate must have '
'shape[1] == 3, got shape %s' % (data.shape,))
if data.ndim == self._data_ndim - 1: # allow upbroadcasting
data = data[..., np.newaxis]
self._data = data
self._tmin = tmin
self._tstep = tstep
self.vertices = vertices
self.verbose = verbose
self._kernel = kernel
self._sens_data = sens_data
self._kernel_removed = False
self._times = None
self._update_times()
self.subject = _check_subject(None, subject, False)
def __repr__(self): # noqa: D105
s = "%d vertices" % (sum(len(v) for v in self.vertices),)
if self.subject is not None:
s += ", subject : %s" % self.subject
s += ", tmin : %s (ms)" % (1e3 * self.tmin)
s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
s += ", tstep : %s (ms)" % (1e3 * self.tstep)
s += ", data shape : %s" % (self.shape,)
sz = sum(object_size(x) for x in (self.vertices + [self.data]))
s += f", ~{sizeof_fmt(sz)}"
return "<%s | %s>" % (type(self).__name__, s)
@fill_doc
def get_peak(self, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude.
Parameters
----------
%(get_peak_parameters)s
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float
The latency in seconds.
"""
stc = self.magnitude() if self._data_ndim == 3 else self
if self._n_vertices == 0:
raise RuntimeError('Cannot find peaks with no vertices')
vert_idx, time_idx, _ = _get_peak(
stc.data, self.times, tmin, tmax, mode)
if not vert_as_index:
vert_idx = np.concatenate(self.vertices)[vert_idx]
if not time_as_index:
time_idx = self.times[time_idx]
return vert_idx, time_idx
@verbose
def extract_label_time_course(self, labels, src, mode='auto',
allow_empty=False, verbose=None):
"""Extract label time courses for lists of labels.
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Parameters
----------
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
%(verbose_meth)s
Returns
-------
%(eltc_returns)s
See Also
--------
extract_label_time_course : Extract time courses for multiple STCs.
Notes
-----
%(eltc_mode_notes)s
"""
return extract_label_time_course(
self, labels, src, mode=mode, return_generator=False,
allow_empty=allow_empty, verbose=verbose)
@verbose
def apply_baseline(self, baseline=(None, 0), *, verbose=None):
"""Baseline correct source estimate data.
Parameters
----------
%(baseline_stc)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(verbose_meth)s
Returns
-------
stc : instance of SourceEstimate
The baseline-corrected source estimate object.
Notes
-----
Baseline correction can be done multiple times.
"""
self.data = rescale(self.data, self.times, baseline, copy=False)
return self
@verbose
def save(self, fname, ftype='h5', verbose=None):
"""Save the full source estimate to an HDF5 file.
Parameters
----------
fname : str
The file name to write the source estimate to, should end in
'-stc.h5'.
ftype : str
File format to use. Currently, the only allowed values is "h5".
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
if ftype != 'h5':
raise ValueError('%s objects can only be written as HDF5 files.'
% (self.__class__.__name__,))
if not fname.endswith('.h5'):
fname += '-stc.h5'
write_hdf5(fname,
dict(vertices=self.vertices, data=self.data,
tmin=self.tmin, tstep=self.tstep, subject=self.subject,
src_type=self._src_type),
title='mnepython', overwrite=True)
@copy_function_doc_to_method_doc(plot_source_estimates)
def plot(self, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='auto', smoothing_steps=10,
transparent=True, alpha=1.0, time_viewer='auto',
subjects_dir=None,
figure=None, views='auto', colorbar=True, clim='auto',
cortex="classic", size=800, background="black",
foreground=None, initial_time=None, time_unit='s',
backend='auto', spacing='oct6', title=None, show_traces='auto',
src=None, volume_options=1., view_layout='vertical',
add_data_kwargs=None, brain_kwargs=None, verbose=None):
brain = plot_source_estimates(
self, subject, surface=surface, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit, backend=backend,
spacing=spacing, title=title, show_traces=show_traces,
src=src, volume_options=volume_options, view_layout=view_layout,
add_data_kwargs=add_data_kwargs, brain_kwargs=brain_kwargs,
verbose=verbose)
return brain
@property
def sfreq(self):
"""Sample rate of the data."""
return 1. / self.tstep
@property
def _n_vertices(self):
return sum(len(v) for v in self.vertices)
def _remove_kernel_sens_data_(self):
"""Remove kernel and sensor space data and compute self._data."""
if self._kernel is not None or self._sens_data is not None:
self._kernel_removed = True
self._data = np.dot(self._kernel, self._sens_data)
self._kernel = None
self._sens_data = None
@fill_doc
def crop(self, tmin=None, tmax=None, include_tmax=True):
"""Restrict SourceEstimate to a time interval.
Parameters
----------
tmin : float | None
The first time point in seconds. If None the first present is used.
tmax : float | None
The last time point in seconds. If None the last present is used.
%(include_tmax)s
Returns
-------
stc : instance of SourceEstimate
The cropped source estimate.
"""
mask = _time_mask(self.times, tmin, tmax, sfreq=self.sfreq,
include_tmax=include_tmax)
self.tmin = self.times[np.where(mask)[0][0]]
if self._kernel is not None and self._sens_data is not None:
self._sens_data = self._sens_data[..., mask]
else:
self.data = self.data[..., mask]
return self # return self for chaining methods
@verbose
def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1,
verbose=None):
"""Resample data.
If appropriate, an anti-aliasing filter is applied before resampling.
See :ref:`resampling-and-decimating` for more information.
Parameters
----------
sfreq : float
New sample rate to use.
npad : int | str
Amount to pad the start and end of the data.
Can also be "auto" to use a padding that will result in
a power-of-two size (can be much faster).
window : str | tuple
Window to use in resampling. See :func:`scipy.signal.resample`.
%(n_jobs)s
%(verbose_meth)s
Returns
-------
stc : instance of SourceEstimate
The resampled source estimate.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
Note that the sample rate of the original data is inferred from tstep.
"""
# resampling in sensor instead of source space gives a somewhat
# different result, so we don't allow it
self._remove_kernel_sens_data_()
o_sfreq = 1.0 / self.tstep
data = self.data
if data.dtype == np.float32:
data = data.astype(np.float64)
self.data = resample(data, sfreq, o_sfreq, npad, n_jobs=n_jobs)
# adjust indirectly affected variables
self.tstep = 1.0 / sfreq
return self
@property
def data(self):
"""Numpy array of source estimate data."""
if self._data is None:
# compute the solution the first time the data is accessed and
# remove the kernel and sensor data
self._remove_kernel_sens_data_()
return self._data
@data.setter
def data(self, value):
value = np.asarray(value)
if self._data is not None and value.ndim != self._data.ndim:
raise ValueError('Data array should have %d dimensions.' %
self._data.ndim)
n_verts = sum(len(v) for v in self.vertices)
if value.shape[0] != n_verts:
raise ValueError('The first dimension of the data array must '
'match the number of vertices (%d != %d)' %
(value.shape[0], n_verts))
self._data = value
self._update_times()
@property
def shape(self):
"""Shape of the data."""
if self._data is not None:
return self._data.shape
return (self._kernel.shape[0], self._sens_data.shape[1])
@property
def tmin(self):
"""The first timestamp."""
return self._tmin
@tmin.setter
def tmin(self, value):
self._tmin = float(value)
self._update_times()
@property
def tstep(self):
"""The change in time between two consecutive samples (1 / sfreq)."""
return self._tstep
@tstep.setter
def tstep(self, value):
if value <= 0:
raise ValueError('.tstep must be greater than 0.')
self._tstep = float(value)
self._update_times()
@property
def times(self):
"""A timestamp for each sample."""
return self._times
@times.setter
def times(self, value):
raise ValueError('You cannot write to the .times attribute directly. '
'This property automatically updates whenever '
'.tmin, .tstep or .data changes.')
def _update_times(self):
"""Update the times attribute after changing tmin, tmax, or tstep."""
self._times = self.tmin + (self.tstep * np.arange(self.shape[-1]))
self._times.flags.writeable = False
def __add__(self, a):
"""Add source estimates."""
stc = self.copy()
stc += a
return stc
def __iadd__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data += a.data
else:
self.data += a
return self
def mean(self):
"""Make a summary stc file with mean over time points.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc.
"""
out = self.sum()
out /= len(self.times)
return out
def sum(self):
"""Make a summary stc file with sum over time points.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc.
"""
data = self.data
tmax = self.tmin + self.tstep * data.shape[-1]
tmin = (self.tmin + tmax) / 2.
tstep = tmax - self.tmin
sum_stc = self.__class__(self.data.sum(axis=-1, keepdims=True),
vertices=self.vertices, tmin=tmin,
tstep=tstep, subject=self.subject)
return sum_stc
def __sub__(self, a):
"""Subtract source estimates."""
stc = self.copy()
stc -= a
return stc
def __isub__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data -= a.data
else:
self.data -= a
return self
def __truediv__(self, a): # noqa: D105
return self.__div__(a)
def __div__(self, a): # noqa: D105
"""Divide source estimates."""
stc = self.copy()
stc /= a
return stc
def __itruediv__(self, a): # noqa: D105
return self.__idiv__(a)
def __idiv__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data /= a.data
else:
self.data /= a
return self
def __mul__(self, a):
"""Multiply source estimates."""
stc = self.copy()
stc *= a
return stc
def __imul__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data *= a.data
else:
self.data *= a
return self
def __pow__(self, a): # noqa: D105
stc = self.copy()
stc **= a
return stc
def __ipow__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
self.data **= a
return self
def __radd__(self, a): # noqa: D105
return self + a
def __rsub__(self, a): # noqa: D105
return self - a
def __rmul__(self, a): # noqa: D105
return self * a
def __rdiv__(self, a): # noqa: D105
return self / a
def __neg__(self): # noqa: D105
"""Negate the source estimate."""
stc = self.copy()
stc._remove_kernel_sens_data_()
stc.data *= -1
return stc
def __pos__(self): # noqa: D105
return self
def __abs__(self):
"""Compute the absolute value of the data.
Returns
-------
stc : instance of _BaseSourceEstimate
A version of the source estimate, where the data attribute is set
to abs(self.data).
"""
stc = self.copy()
stc._remove_kernel_sens_data_()
stc._data = abs(stc._data)
return stc
def sqrt(self):
"""Take the square root.
Returns
-------
stc : instance of SourceEstimate
A copy of the SourceEstimate with sqrt(data).
"""
return self ** (0.5)
def copy(self):
"""Return copy of source estimate instance.
Returns
-------
stc : instance of SourceEstimate
A copy of the source estimate.
"""
return copy.deepcopy(self)
def bin(self, width, tstart=None, tstop=None, func=np.mean):
"""Return a source estimate object with data summarized over time bins.
Time bins of ``width`` seconds. This method is intended for
visualization only. No filter is applied to the data before binning,
making the method inappropriate as a tool for downsampling data.
Parameters
----------
width : scalar
Width of the individual bins in seconds.
tstart : scalar | None
Time point where the first bin starts. The default is the first
time point of the stc.
tstop : scalar | None
Last possible time point contained in a bin (if the last bin would
be shorter than width it is dropped). The default is the last time
point of the stc.
func : callable
Function that is applied to summarize the data. Needs to accept a
numpy.array as first input and an ``axis`` keyword argument.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The binned source estimate.
"""
if tstart is None:
tstart = self.tmin
if tstop is None:
tstop = self.times[-1]
times = np.arange(tstart, tstop + self.tstep, width)
nt = len(times) - 1
data = np.empty(self.shape[:-1] + (nt,), dtype=self.data.dtype)
for i in range(nt):
idx = (self.times >= times[i]) & (self.times < times[i + 1])
data[..., i] = func(self.data[..., idx], axis=-1)
tmin = times[0] + width / 2.
stc = self.copy()
stc._data = data
stc.tmin = tmin
stc.tstep = width
return stc
def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):
"""Get data after a linear (time) transform has been applied.
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
:func:`functools.partial`). The first parameter of the function is
the input data. The first return value is the transformed data,
remaining outputs are ignored. The first dimension of the
transformed data has to be the same as the first dimension of the
input data.
idx : array | None
Indicices of source time courses for which to compute transform.
If None, all time courses are used.
tmin_idx : int | None
Index of first time point to include. If None, the index of the
first time point is used.
tmax_idx : int | None
Index of the first time point not to include. If None, time points
up to (and including) the last time point are included.
Returns
-------
data_t : ndarray
The transformed data.
Notes
-----
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "apply_lcmv_epochs"
do this automatically (if possible).
"""
if idx is None:
# use all time courses by default
idx = slice(None, None)
if self._kernel is None and self._sens_data is None:
if self._kernel_removed:
warn('Performance can be improved by not accessing the data '
'attribute before calling this method.')
# transform source space data directly
data_t = func(self.data[idx, ..., tmin_idx:tmax_idx])
if isinstance(data_t, tuple):
# use only first return value
data_t = data_t[0]
else:
# apply transform in sensor space
sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])
if isinstance(sens_data_t, tuple):
# use only first return value
sens_data_t = sens_data_t[0]
# apply inverse
data_shape = sens_data_t.shape
if len(data_shape) > 2:
# flatten the last dimensions
sens_data_t = sens_data_t.reshape(data_shape[0],
np.prod(data_shape[1:]))
data_t = np.dot(self._kernel[idx, :], sens_data_t)
# restore original shape if necessary
if len(data_shape) > 2:
data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])
return data_t
def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):
"""Apply linear transform.
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
:func:`functools.partial`). The first parameter of the function is
the input data. The first two dimensions of the transformed data
should be (i) vertices and (ii) time. See Notes for details.
idx : array | None
Indices of source time courses for which to compute transform.
If None, all time courses are used.
tmin : float | int | None
First time point to include (ms). If None, self.tmin is used.
tmax : float | int | None
Last time point to include (ms). If None, self.tmax is used.
copy : bool
If True, return a new instance of SourceEstimate instead of
modifying the input inplace.
Returns
-------
stcs : SourceEstimate | VectorSourceEstimate | list
The transformed stc or, in the case of transforms which yield
N-dimensional output (where N > 2), a list of stcs. For a list,
copy must be True.
Notes
-----
Transforms which yield 3D
output (e.g. time-frequency transforms) are valid, so long as the
first two dimensions are vertices and time. In this case, the
copy parameter must be True and a list of
SourceEstimates, rather than a single instance of SourceEstimate,
will be returned, one for each index of the 3rd dimension of the
transformed data. In the case of transforms yielding 2D output
(e.g. filtering), the user has the option of modifying the input
inplace (copy = False) or returning a new instance of
SourceEstimate (copy = True) with the transformed data.
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "apply_lcmv_epochs"
do this automatically (if possible).
"""
# min and max data indices to include
times = 1000. * self.times
t_idx = np.where(_time_mask(times, tmin, tmax, sfreq=self.sfreq))[0]
if tmin is None:
tmin_idx = None
else:
tmin_idx = t_idx[0]
if tmax is None:
tmax_idx = None
else:
# +1, because upper boundary needs to include the last sample
tmax_idx = t_idx[-1] + 1
data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
# account for change in n_vertices
if idx is not None:
idx_lh = idx[idx < len(self.lh_vertno)]
idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)
verts_lh = self.lh_vertno[idx_lh]
verts_rh = self.rh_vertno[idx_rh]
else:
verts_lh = self.lh_vertno
verts_rh = self.rh_vertno
verts = [verts_lh, verts_rh]
tmin_idx = 0 if tmin_idx is None else tmin_idx
tmin = self.times[tmin_idx]
if data_t.ndim > 2:
# return list of stcs if transformed data has dimensionality > 2
if copy:
stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,
self.tstep, self.subject)
for a in range(data_t.shape[-1])]
else:
raise ValueError('copy must be True if transformed data has '
'more than 2 dimensions')
else:
# return new or overwritten stc
stcs = self if not copy else self.copy()
stcs.vertices = verts
stcs.data = data_t
stcs.tmin = tmin
return stcs
@fill_doc
def to_data_frame(self, index=None, scalings=None,
long_format=False, time_format='ms'):
"""Export data in tabular structure as a pandas DataFrame.
Vertices are converted to columns in the DataFrame. By default,
an additional column "time" is added, unless ``index='time'``
(in which case time values form the DataFrame's index).
Parameters
----------
%(df_index_evk)s
Defaults to ``None``.
%(df_scalings)s
%(df_longform_stc)s
%(df_time_format)s
.. versionadded:: 0.20
Returns
-------
%(df_return)s
"""
# check pandas once here, instead of in each private utils function
pd = _check_pandas_installed() # noqa
# arg checking
valid_index_args = ['time', 'subject']
valid_time_formats = ['ms', 'timedelta']
index = _check_pandas_index_arguments(index, valid_index_args)
time_format = _check_time_format(time_format, valid_time_formats)
# get data
data = self.data.T
times = self.times
# prepare extra columns / multiindex
mindex = list()
default_index = ['time']
if self.subject is not None:
default_index = ['subject', 'time']
mindex.append(('subject', np.repeat(self.subject, data.shape[0])))
times = _convert_times(self, times, time_format)
mindex.append(('time', times))
# triage surface vs volume source estimates
col_names = list()
kinds = ['VOL'] * len(self.vertices)
if isinstance(self, (_BaseSurfaceSourceEstimate,
_BaseMixedSourceEstimate)):
kinds[:2] = ['LH', 'RH']
for ii, (kind, vertno) in enumerate(zip(kinds, self.vertices)):
col_names.extend(['{}_{}'.format(kind, vert) for vert in vertno])
# build DataFrame
df = _build_data_frame(self, data, None, long_format, mindex, index,
default_index=default_index,
col_names=col_names, col_kind='source')
return df
def _center_of_mass(vertices, values, hemi, surf, subject, subjects_dir,
restrict_vertices):
"""Find the center of mass on a surface."""
if (values == 0).all() or (values < 0).any():
raise ValueError('All values must be non-negative and at least one '
'must be non-zero, cannot compute COM')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surf = read_surface(op.join(subjects_dir, subject, 'surf',
hemi + '.' + surf))
if restrict_vertices is True:
restrict_vertices = vertices
elif restrict_vertices is False:
restrict_vertices = np.arange(surf[0].shape[0])
elif isinstance(restrict_vertices, SourceSpaces):
idx = 1 if restrict_vertices.kind == 'surface' and hemi == 'rh' else 0
restrict_vertices = restrict_vertices[idx]['vertno']
else:
restrict_vertices = np.array(restrict_vertices, int)
pos = surf[0][vertices, :].T
c_o_m = np.sum(pos * values, axis=1) / np.sum(values)
vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -
c_o_m) ** 2, axis=1)))
vertex = restrict_vertices[vertex]
return vertex
@fill_doc
class _BaseSurfaceSourceEstimate(_BaseSourceEstimate):
"""Abstract base class for surface source estimates.
Parameters
----------
data : array
The data in source space.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
data : array
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
_src_type = 'surface'
_src_count = 2
@property
def lh_data(self):
"""Left hemisphere data."""
return self.data[:len(self.lh_vertno)]
@property
def rh_data(self):
"""Right hemisphere data."""
return self.data[len(self.lh_vertno):]
@property
def lh_vertno(self):
"""Left hemisphere vertno."""
return self.vertices[0]
@property
def rh_vertno(self):
"""Right hemisphere vertno."""
return self.vertices[1]
def _hemilabel_stc(self, label):
if label.hemi == 'lh':
stc_vertices = self.vertices[0]
else:
stc_vertices = self.vertices[1]
# find index of the Label's vertices
idx = np.nonzero(np.in1d(stc_vertices, label.vertices))[0]
# find output vertices
vertices = stc_vertices[idx]
# find data
if label.hemi == 'rh':
values = self.data[idx + len(self.vertices[0])]
else:
values = self.data[idx]
return vertices, values
def in_label(self, label):
"""Get a source estimate object restricted to a label.
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : Label | BiHemiLabel
The label (as created for example by mne.read_label). If the label
does not match any sources in the SourceEstimate, a ValueError is
raised.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The source estimate restricted to the given label.
"""
# make sure label and stc are compatible
from .label import Label, BiHemiLabel
_validate_type(label, (Label, BiHemiLabel), 'label')
if label.subject is not None and self.subject is not None \
and label.subject != self.subject:
raise RuntimeError('label and stc must have same subject names, '
'currently "%s" and "%s"' % (label.subject,
self.subject))
if label.hemi == 'both':
lh_vert, lh_val = self._hemilabel_stc(label.lh)
rh_vert, rh_val = self._hemilabel_stc(label.rh)
vertices = [lh_vert, rh_vert]
values = np.vstack((lh_val, rh_val))
elif label.hemi == 'lh':
lh_vert, values = self._hemilabel_stc(label)
vertices = [lh_vert, np.array([], int)]
else:
assert label.hemi == 'rh'
rh_vert, values = self._hemilabel_stc(label)
vertices = [np.array([], int), rh_vert]
if sum([len(v) for v in vertices]) == 0:
raise ValueError('No vertices match the label in the stc file')
label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,
tstep=self.tstep, subject=self.subject)
return label_stc
def expand(self, vertices):
"""Expand SourceEstimate to include more vertices.
This will add rows to stc.data (zero-filled) and modify stc.vertices
to include all vertices in stc.vertices and the input vertices.
Parameters
----------
vertices : list of array
New vertices to add. Can also contain old values.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc (note: method operates inplace).
"""
if not isinstance(vertices, list):
raise TypeError('vertices must be a list')
if not len(self.vertices) == len(vertices):
raise ValueError('vertices must have the same length as '
'stc.vertices')
# can no longer use kernel and sensor data
self._remove_kernel_sens_data_()
inserters = list()
offsets = [0]
for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):
v_new = np.setdiff1d(v_new, v_old)
inds = np.searchsorted(v_old, v_new)
# newer numpy might overwrite inds after np.insert, copy here
inserters += [inds.copy()]
offsets += [len(v_old)]
self.vertices[vi] = np.insert(v_old, inds, v_new)
inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]
inds = np.concatenate(inds)
new_data = np.zeros((len(inds),) + self.data.shape[1:])
self.data = np.insert(self.data, inds, new_data, axis=0)
return self
@verbose
def to_original_src(self, src_orig, subject_orig=None,
subjects_dir=None, verbose=None):
"""Get a source estimate from morphed source to the original subject.
Parameters
----------
src_orig : instance of SourceSpaces
The original source spaces that were morphed to the current
subject.
subject_orig : str | None
The original subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
%(subjects_dir)s
%(verbose_meth)s
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The transformed source estimate.
See Also
--------
morph_source_spaces
Notes
-----
.. versionadded:: 0.10.0
"""
if self.subject is None:
raise ValueError('stc.subject must be set')
src_orig = _ensure_src(src_orig, kind='surface')
subject_orig = _ensure_src_subject(src_orig, subject_orig)
data_idx, vertices = _get_morph_src_reordering(
self.vertices, src_orig, subject_orig, self.subject, subjects_dir)
return self.__class__(self._data[data_idx], vertices,
self.tmin, self.tstep, subject_orig)
@fill_doc
def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude.
Parameters
----------
hemi : {'lh', 'rh', None}
The hemi to be considered. If None, the entire source space is
considered.
%(get_peak_parameters)s
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
"""
_check_option('hemi', hemi, ('lh', 'rh', None))
vertex_offset = 0
if hemi is not None:
if hemi == 'lh':
data = self.lh_data
vertices = [self.lh_vertno, []]
else:
vertex_offset = len(self.vertices[0])
data = self.rh_data
vertices = [[], self.rh_vertno]
meth = self.__class__(
data, vertices, self.tmin, self.tstep).get_peak
else:
meth = super().get_peak
out = meth(tmin=tmin, tmax=tmax, mode=mode,
vert_as_index=vert_as_index,
time_as_index=time_as_index)
if vertex_offset and vert_as_index:
out = (out[0] + vertex_offset, out[1])
return out
@fill_doc
class SourceEstimate(_BaseSurfaceSourceEstimate):
"""Container for surface source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. When it is a single array, the
left hemisphere is stored in data[:len(vertices[0])] and the right
hemisphere is stored in data[-len(vertices[1]):].
When data is a tuple, it contains two arrays:
- "kernel" shape (n_vertices, n_sensors) and
- "sens_data" shape (n_sensors, n_times).
In this case, the source space data corresponds to
``np.dot(kernel, sens_data)``.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array, shape (2,)
The indices of the dipoles in the left and right source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
VectorSourceEstimate : A container for vector source estimates.
VolSourceEstimate : A container for volume source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
"""
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : str
The stem of the file name. The file names used for surface source
spaces are obtained by adding "-lh.stc" and "-rh.stc" (or "-lh.w"
and "-rh.w") to the stem provided, for the left and the right
hemisphere, respectively.
ftype : str
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
_check_option('ftype', ftype, ['stc', 'w', 'h5'])
lh_data = self.data[:len(self.lh_vertno)]
rh_data = self.data[-len(self.rh_vertno):]
if ftype == 'stc':
if np.iscomplexobj(self.data):
raise ValueError("Cannot save complex-valued STC data in "
"FIFF format; please set ftype='h5' to save "
"in HDF5 format instead, or cast the data to "
"real numbers before saving.")
logger.info('Writing STC to disk...')
_write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.lh_vertno, data=lh_data)
_write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.rh_vertno, data=rh_data)
elif ftype == 'w':
if self.shape[1] != 1:
raise ValueError('w files can only contain a single time '
'point')
logger.info('Writing STC to disk (w format)...')
_write_w(fname + '-lh.w', vertices=self.lh_vertno,
data=lh_data[:, 0])
_write_w(fname + '-rh.w', vertices=self.rh_vertno,
data=rh_data[:, 0])
elif ftype == 'h5':
super().save(fname)
logger.info('[done]')
@verbose
def estimate_snr(self, info, fwd, cov, verbose=None):
r"""Compute time-varying SNR in the source space.
This function should only be used with source estimates with units
nanoAmperes (i.e., MNE-like solutions, *not* dSPM or sLORETA).
See also :footcite:`GoldenholzEtAl2009`.
.. warning:: This function currently only works properly for fixed
orientation.
Parameters
----------
info : instance Info
The measurement info.
fwd : instance of Forward
The forward solution used to create the source estimate.
cov : instance of Covariance
The noise covariance used to estimate the resting cortical
activations. Should be an evoked covariance, not empty room.
%(verbose)s
Returns
-------
snr_stc : instance of SourceEstimate
The source estimate with the SNR computed.
Notes
-----
We define the SNR in decibels for each source location at each
time point as:
.. math::
{\rm SNR} = 10\log_10[\frac{a^2}{N}\sum_k\frac{b_k^2}{s_k^2}]
where :math:`\\b_k` is the signal on sensor :math:`k` provided by the
forward model for a source with unit amplitude, :math:`a` is the
source amplitude, :math:`N` is the number of sensors, and
:math:`s_k^2` is the noise variance on sensor :math:`k`.
References
----------
.. footbibliography::
"""
from .forward import convert_forward_solution, Forward
from .minimum_norm.inverse import _prepare_forward
_validate_type(fwd, Forward, 'fwd')
_validate_type(info, Info, 'info')
_validate_type(cov, Covariance, 'cov')
_check_stc_units(self)
if (self.data >= 0).all():
warn('This STC appears to be from free orientation, currently SNR'
' function is valid only for fixed orientation')
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False)
# G is gain matrix [ch x src], cov is noise covariance [ch x ch]
G, _, _, _, _, _, _, cov, _ = _prepare_forward(
fwd, info, cov, fixed=True, loose=0, rank=None, pca=False,
use_cps=True, exp=None, limit_depth_chs=False, combine_xyz='fro',
allow_fixed_depth=False, limit=None)
G = G['sol']['data']
n_channels = cov['dim'] # number of sensors/channels
b_k2 = (G * G).T
s_k2 = np.diag(cov['data'])
scaling = (1 / n_channels) * np.sum(b_k2 / s_k2, axis=1, keepdims=True)
snr_stc = self.copy()
snr_stc._data[:] = 10 * np.log10((self.data * self.data) * scaling)
return snr_stc
@fill_doc
def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,
subjects_dir=None, surf='sphere'):
"""Compute the center of mass of activity.
This function computes the spatial center of mass on the surface
as well as the temporal center of mass as in :footcite:`LarsonLee2013`.
.. note:: All activity must occur in a single hemisphere, otherwise
an error is raised. The "mass" of each point in space for
computing the spatial center of mass is computed by summing
across time, and vice-versa for each point in time in
computing the temporal center of mass. This is useful for
quantifying spatio-temporal cluster locations, especially
when combined with :func:`mne.vertex_to_mni`.
Parameters
----------
subject : str | None
The subject the stc is defined for.
hemi : int, or None
Calculate the center of mass for the left (0) or right (1)
hemisphere. If None, one of the hemispheres must be all zeroes,
and the center of mass will be calculated for the other
hemisphere (useful for getting COM for clusters).
restrict_vertices : bool | array of int | instance of SourceSpaces
If True, returned vertex will be one from stc. Otherwise, it could
be any vertex from surf. If an array of int, the returned vertex
will come from that array. If instance of SourceSpaces (as of
0.13), the returned vertex will be from the given source space.
For most accuruate estimates, do not restrict vertices.
%(subjects_dir)s
surf : str
The surface to use for Euclidean distance center of mass
finding. The default here is "sphere", which finds the center
of mass on the spherical surface to help avoid potential issues
with cortical folding.
Returns
-------
vertex : int
Vertex of the spatial center of mass for the inferred hemisphere,
with each vertex weighted by the sum of the stc across time. For a
boolean stc, then, this would be weighted purely by the duration
each vertex was active.
hemi : int
Hemisphere the vertex was taken from.
t : float
Time of the temporal center of mass (weighted by the sum across
source vertices).
See Also
--------
mne.Label.center_of_mass
mne.vertex_to_mni
References
----------
.. footbibliography::
"""
if not isinstance(surf, str):
raise TypeError('surf must be a string, got %s' % (type(surf),))
subject = _check_subject(self.subject, subject)
if np.any(self.data < 0):
raise ValueError('Cannot compute COM with negative values')
values = np.sum(self.data, axis=1) # sum across time
vert_inds = [np.arange(len(self.vertices[0])),
np.arange(len(self.vertices[1])) + len(self.vertices[0])]
if hemi is None:
hemi = np.where(np.array([np.sum(values[vi])
for vi in vert_inds]))[0]
if not len(hemi) == 1:
raise ValueError('Could not infer hemisphere')
hemi = hemi[0]
_check_option('hemi', hemi, [0, 1])
vertices = self.vertices[hemi]
values = values[vert_inds[hemi]] # left or right
del vert_inds
vertex = _center_of_mass(
vertices, values, hemi=['lh', 'rh'][hemi], surf=surf,
subject=subject, subjects_dir=subjects_dir,
restrict_vertices=restrict_vertices)
# do time center of mass by using the values across space
masses = np.sum(self.data, axis=0).astype(float)
t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)
t = self.tmin + self.tstep * t_ind
return vertex, hemi, t
class _BaseVectorSourceEstimate(_BaseSourceEstimate):
_data_ndim = 3
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None): # noqa: D102
assert hasattr(self, '_scalar_class')
super().__init__(data, vertices, tmin, tstep, subject, verbose)
def magnitude(self):
"""Compute magnitude of activity without directionality.
Returns
-------
stc : instance of SourceEstimate
The source estimate without directionality information.
"""
data_mag = np.linalg.norm(self.data, axis=1)
return self._scalar_class(
data_mag, self.vertices, self.tmin, self.tstep, self.subject,
self.verbose)
def _get_src_normals(self, src, use_cps):
normals = np.vstack([_get_src_nn(s, use_cps, v) for s, v in
zip(src, self.vertices)])
return normals
@fill_doc
def project(self, directions, src=None, use_cps=True):
"""Project the data for each vertex in a given direction.
Parameters
----------
directions : ndarray, shape (n_vertices, 3) | str
Can be:
- ``'normal'``
Project onto the source space normals.
- ``'pca'``
SVD will be used to project onto the direction of maximal
power for each source.
- :class:`~numpy.ndarray`, shape (n_vertices, 3)
Projection directions for each source.
src : instance of SourceSpaces | None
The source spaces corresponding to the source estimate.
Not used when ``directions`` is an array, optional when
``directions='pca'``.
%(use_cps)s
Should be the same value that was used when the forward model
was computed (typically True).
Returns
-------
stc : instance of SourceEstimate
The projected source estimate.
directions : ndarray, shape (n_vertices, 3)
The directions that were computed (or just used).
Notes
-----
When using SVD, there is a sign ambiguity for the direction of maximal
power. When ``src is None``, the direction is chosen that makes the
resulting time waveform sum positive (i.e., have positive amplitudes).
When ``src`` is provided, the directions are flipped in the direction
of the source normals, i.e., outward from cortex for surface source
spaces and in the +Z / superior direction for volume source spaces.
.. versionadded:: 0.21
"""
_validate_type(directions, (str, np.ndarray), 'directions')
_validate_type(src, (None, SourceSpaces), 'src')
if isinstance(directions, str):
_check_option('directions', directions, ('normal', 'pca'),
extra='when str')
if directions == 'normal':
if src is None:
raise ValueError(
'If directions="normal", src cannot be None')
_check_src_normal('normal', src)
directions = self._get_src_normals(src, use_cps)
else:
assert directions == 'pca'
x = self.data
if not np.isrealobj(self.data):
_check_option('stc.data.dtype', self.data.dtype,
(np.complex64, np.complex128))
dtype = \
np.float32 if x.dtype == np.complex64 else np.float64
x = x.view(dtype)
assert x.shape[-1] == 2 * self.data.shape[-1]
u, _, v = np.linalg.svd(x, full_matrices=False)
directions = u[:, :, 0]
# The sign is arbitrary, so let's flip it in the direction that
# makes the resulting time series the most positive:
if src is None:
signs = np.sum(v[:, 0].real, axis=1, keepdims=True)
else:
normals = self._get_src_normals(src, use_cps)
signs = np.sum(directions * normals, axis=1, keepdims=True)
assert signs.shape == (self.data.shape[0], 1)
signs = np.sign(signs)
signs[signs == 0] = 1.
directions *= signs
_check_option(
'directions.shape', directions.shape, [(self.data.shape[0], 3)])
data_norm = np.matmul(directions[:, np.newaxis], self.data)[:, 0]
stc = self._scalar_class(
data_norm, self.vertices, self.tmin, self.tstep, self.subject,
self.verbose)
return stc, directions
@copy_function_doc_to_method_doc(plot_vector_source_estimates)
def plot(self, subject=None, hemi='lh', colormap='hot', time_label='auto',
smoothing_steps=10, transparent=True, brain_alpha=0.4,
overlay_alpha=None, vector_alpha=1.0, scale_factor=None,
time_viewer='auto', subjects_dir=None, figure=None,
views='lateral',
colorbar=True, clim='auto', cortex='classic', size=800,
background='black', foreground=None, initial_time=None,
time_unit='s', show_traces='auto', src=None, volume_options=1.,
view_layout='vertical', add_data_kwargs=None,
brain_kwargs=None, verbose=None): # noqa: D102
return plot_vector_source_estimates(
self, subject=subject, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, brain_alpha=brain_alpha,
overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,
scale_factor=scale_factor, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit,
show_traces=show_traces, src=src, volume_options=volume_options,
view_layout=view_layout, add_data_kwargs=add_data_kwargs,
brain_kwargs=brain_kwargs, verbose=verbose)
class _BaseVolSourceEstimate(_BaseSourceEstimate):
_src_type = 'volume'
_src_count = None
@copy_function_doc_to_method_doc(plot_source_estimates)
def plot_3d(self, subject=None, surface='white', hemi='both',
colormap='auto', time_label='auto', smoothing_steps=10,
transparent=True, alpha=0.1, time_viewer='auto',
subjects_dir=None,
figure=None, views='axial', colorbar=True, clim='auto',
cortex="classic", size=800, background="black",
foreground=None, initial_time=None, time_unit='s',
backend='auto', spacing='oct6', title=None, show_traces='auto',
src=None, volume_options=1., view_layout='vertical',
add_data_kwargs=None, brain_kwargs=None, verbose=None):
return super().plot(
subject=subject, surface=surface, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha, time_viewer=time_viewer,
subjects_dir=subjects_dir,
figure=figure, views=views, colorbar=colorbar, clim=clim,
cortex=cortex, size=size, background=background,
foreground=foreground, initial_time=initial_time,
time_unit=time_unit, backend=backend, spacing=spacing, title=title,
show_traces=show_traces, src=src, volume_options=volume_options,
view_layout=view_layout, add_data_kwargs=add_data_kwargs,
brain_kwargs=brain_kwargs, verbose=verbose)
@copy_function_doc_to_method_doc(plot_volume_source_estimates)
def plot(self, src, subject=None, subjects_dir=None, mode='stat_map',
bg_img='T1.mgz', colorbar=True, colormap='auto', clim='auto',
transparent='auto', show=True, initial_time=None,
initial_pos=None, verbose=None):
data = self.magnitude() if self._data_ndim == 3 else self
return plot_volume_source_estimates(
data, src=src, subject=subject, subjects_dir=subjects_dir,
mode=mode, bg_img=bg_img, colorbar=colorbar, colormap=colormap,
clim=clim, transparent=transparent, show=show,
initial_time=initial_time, initial_pos=initial_pos,
verbose=verbose)
# Override here to provide the volume-specific options
@verbose
def extract_label_time_course(self, labels, src, mode='auto',
allow_empty=False, *,
mri_resolution=True, verbose=None):
"""Extract label time courses for lists of labels.
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Parameters
----------
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
%(eltc_mri_resolution)s
%(verbose_meth)s
Returns
-------
%(eltc_returns)s
See Also
--------
extract_label_time_course : Extract time courses for multiple STCs.
Notes
-----
%(eltc_mode_notes)s
"""
return extract_label_time_course(
self, labels, src, mode=mode, return_generator=False,
allow_empty=allow_empty,
mri_resolution=mri_resolution, verbose=verbose)
@verbose
def in_label(self, label, mri, src, *, verbose=None):
"""Get a source estimate object restricted to a label.
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : str | int
The label to use. Can be the name of a label if using a standard
FreeSurfer atlas, or an integer value to extract from the ``mri``.
mri : str
Path to the atlas to use.
src : instance of SourceSpaces
The volumetric source space. It must be a single, whole-brain
volume.
%(verbose_meth)s
Returns
-------
stc : VolSourceEstimate | VolVectorSourceEstimate
The source estimate restricted to the given label.
Notes
-----
.. versionadded:: 0.21.0
"""
if len(self.vertices) != 1:
raise RuntimeError('This method can only be used with whole-brain '
'volume source spaces')
_validate_type(label, (str, 'int-like'), 'label')
if isinstance(label, str):
volume_label = [label]
else:
volume_label = {'Volume ID %s' % (label): _ensure_int(label)}
label = _volume_labels(src, (mri, volume_label), mri_resolution=False)
assert len(label) == 1
label = label[0]
vertices = label.vertices
keep = np.in1d(self.vertices[0], label.vertices)
values, vertices = self.data[keep], [self.vertices[0][keep]]
label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,
tstep=self.tstep, subject=self.subject)
return label_stc
def save_as_volume(self, fname, src, dest='mri', mri_resolution=False,
format='nifti1'):
"""Save a volume source estimate in a NIfTI file.
Parameters
----------
fname : str
The name of the generated nifti file.
src : list
The list of source spaces (should all be of type volume).
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution : bool
It True the image is saved in MRI resolution.
.. warning:: If you have many time points, the file produced can be
huge.
format : str
Either 'nifti1' (default) or 'nifti2'.
.. versionadded:: 0.17
Returns
-------
img : instance Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
import nibabel as nib
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
img = self.as_volume(src, dest=dest, mri_resolution=mri_resolution,
format=format)
nib.save(img, fname)
def as_volume(self, src, dest='mri', mri_resolution=False,
format='nifti1'):
"""Export volume source estimate as a nifti object.
Parameters
----------
src : instance of SourceSpaces
The source spaces (should all be of type volume, or part of a
mixed source space).
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution : bool
It True the image is saved in MRI resolution.
.. warning:: If you have many time points, the file produced can be
huge.
format : str
Either 'nifti1' (default) or 'nifti2'.
Returns
-------
img : instance of Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
from .morph import _interpolate_data
data = self.magnitude() if self._data_ndim == 3 else self
return _interpolate_data(data, src, mri_resolution=mri_resolution,
mri_space=True, output=format)
@fill_doc
class VolSourceEstimate(_BaseVolSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to ``np.dot(kernel, sens_data)``.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VolVectorSourceEstimate : A container for volume vector source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : str
The stem of the file name. The stem is extended with "-vl.stc"
or "-vl.w".
ftype : str
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
_check_option('ftype', ftype, ['stc', 'w', 'h5'])
if ftype != 'h5' and len(self.vertices) != 1:
raise ValueError('Can only write to .stc or .w if a single volume '
'source space was used, use .h5 instead')
if ftype != 'h5' and self.data.dtype == 'complex':
raise ValueError('Can only write non-complex data to .stc or .w'
', use .h5 instead')
if ftype == 'stc':
logger.info('Writing STC to disk...')
if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')):
fname += '-vl.stc'
_write_stc(fname, tmin=self.tmin, tstep=self.tstep,
vertices=self.vertices[0], data=self.data)
elif ftype == 'w':
logger.info('Writing STC to disk (w format)...')
if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')):
fname += '-vl.w'
_write_w(fname, vertices=self.vertices[0], data=self.data)
elif ftype == 'h5':
super().save(fname, 'h5')
logger.info('[done]')
@fill_doc
class VolVectorSourceEstimate(_BaseVolSourceEstimate,
_BaseVectorSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
_scalar_class = VolSourceEstimate
# defaults differ: hemi='both', views='axial'
@copy_function_doc_to_method_doc(plot_vector_source_estimates)
def plot_3d(self, subject=None, hemi='both', colormap='hot',
time_label='auto',
smoothing_steps=10, transparent=True, brain_alpha=0.4,
overlay_alpha=None, vector_alpha=1.0, scale_factor=None,
time_viewer='auto', subjects_dir=None, figure=None,
views='axial',
colorbar=True, clim='auto', cortex='classic', size=800,
background='black', foreground=None, initial_time=None,
time_unit='s', show_traces='auto', src=None,
volume_options=1., view_layout='vertical',
add_data_kwargs=None, brain_kwargs=None,
verbose=None): # noqa: D102
return _BaseVectorSourceEstimate.plot(
self, subject=subject, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, brain_alpha=brain_alpha,
overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,
scale_factor=scale_factor, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit,
show_traces=show_traces, src=src, volume_options=volume_options,
view_layout=view_layout, add_data_kwargs=add_data_kwargs,
brain_kwargs=brain_kwargs, verbose=verbose)
@fill_doc
class VectorSourceEstimate(_BaseVectorSourceEstimate,
_BaseSurfaceSourceEstimate):
"""Container for vector surface source estimates.
For each vertex, the magnitude of the current is defined in the X, Y and Z
directions.
Parameters
----------
data : array of shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : float
Time point of the first sample in data.
tstep : float
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VolSourceEstimate : A container for volume source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.15
"""
_scalar_class = SourceEstimate
###############################################################################
# Mixed source estimate (two cortical surfs plus other stuff)
class _BaseMixedSourceEstimate(_BaseSourceEstimate):
_src_type = 'mixed'
_src_count = None
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None): # noqa: D102
if not isinstance(vertices, list) or len(vertices) < 2:
raise ValueError('Vertices must be a list of numpy arrays with '
'one array per source space.')
super().__init__(data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
@property
def _n_surf_vert(self):
return sum(len(v) for v in self.vertices[:2])
def surface(self):
"""Return the cortical surface source estimate.
Returns
-------
stc : instance of SourceEstimate or VectorSourceEstimate
The surface source estimate.
"""
if self._data_ndim == 3:
klass = VectorSourceEstimate
else:
klass = SourceEstimate
return klass(
self.data[:self._n_surf_vert], self.vertices[:2],
self.tmin, self.tstep, self.subject, self.verbose)
def volume(self):
"""Return the volume surface source estimate.
Returns
-------
stc : instance of VolSourceEstimate or VolVectorSourceEstimate
The volume source estimate.
"""
if self._data_ndim == 3:
klass = VolVectorSourceEstimate
else:
klass = VolSourceEstimate
return klass(
self.data[self._n_surf_vert:], self.vertices[2:],
self.tmin, self.tstep, self.subject, self.verbose)
@fill_doc
class MixedSourceEstimate(_BaseMixedSourceEstimate):
"""Container for mixed surface and volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to ``np.dot(kernel, sens_data)``.
vertices : list of array
Vertex numbers corresponding to the data. The list contains arrays
with one array per source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array
Vertex numbers corresponding to the data. The list contains arrays
with one array per source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector source estimates.
VolSourceEstimate : A container for volume source estimates.
VolVectorSourceEstimate : A container for Volume vector source estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
@fill_doc
class MixedVectorSourceEstimate(_BaseVectorSourceEstimate,
_BaseMixedSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array, shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : list of array, shape (n_src,)
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array, shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.21.0
"""
_scalar_class = MixedSourceEstimate
###############################################################################
# Morphing
def _get_vol_mask(src):
"""Get the volume source space mask."""
assert len(src) == 1 # not a mixed source space
shape = src[0]['shape'][::-1]
mask = np.zeros(shape, bool)
mask.flat[src[0]['vertno']] = True
return mask
def _spatio_temporal_src_adjacency_vol(src, n_times):
from sklearn.feature_extraction import grid_to_graph
mask = _get_vol_mask(src)
edges = grid_to_graph(*mask.shape, mask=mask)
adjacency = _get_adjacency_from_edges(edges, n_times)
return adjacency
def _spatio_temporal_src_adjacency_surf(src, n_times):
if src[0]['use_tris'] is None:
# XXX It would be nice to support non oct source spaces too...
raise RuntimeError("The source space does not appear to be an ico "
"surface. adjacency cannot be extracted from"
" non-ico source spaces.")
used_verts = [np.unique(s['use_tris']) for s in src]
offs = np.cumsum([0] + [len(u_v) for u_v in used_verts])[:-1]
tris = np.concatenate([np.searchsorted(u_v, s['use_tris']) + off
for u_v, s, off in zip(used_verts, src, offs)])
adjacency = spatio_temporal_tris_adjacency(tris, n_times)
# deal with source space only using a subset of vertices
masks = [np.in1d(u, s['vertno']) for s, u in zip(src, used_verts)]
if sum(u.size for u in used_verts) != adjacency.shape[0] / n_times:
raise ValueError('Used vertices do not match adjacency shape')
if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:
raise ValueError('Vertex mask does not match number of vertices')
masks = np.concatenate(masks)
missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)
if missing:
warn('%0.1f%% of original source space vertices have been'
' omitted, tri-based adjacency will have holes.\n'
'Consider using distance-based adjacency or '
'morphing data to all source space vertices.' % missing)
masks = np.tile(masks, n_times)
masks = np.where(masks)[0]
adjacency = adjacency.tocsr()
adjacency = adjacency[masks]
adjacency = adjacency[:, masks]
# return to original format
adjacency = adjacency.tocoo()
return adjacency
@verbose
def spatio_temporal_src_adjacency(src, n_times, dist=None, verbose=None):
"""Compute adjacency for a source space activation over time.
Parameters
----------
src : instance of SourceSpaces
The source space. It can be a surface source space or a
volume source space.
n_times : int
Number of time instants.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
# XXX we should compute adjacency for each source space and then
# use scipy.sparse.block_diag to concatenate them
if src[0]['type'] == 'vol':
if dist is not None:
raise ValueError('dist must be None for a volume '
'source space. Got %s.' % dist)
adjacency = _spatio_temporal_src_adjacency_vol(src, n_times)
elif dist is not None:
# use distances computed and saved in the source space file
adjacency = spatio_temporal_dist_adjacency(src, n_times, dist)
else:
adjacency = _spatio_temporal_src_adjacency_surf(src, n_times)
return adjacency
@verbose
def grade_to_tris(grade, verbose=None):
"""Get tris defined for a certain grade.
Parameters
----------
grade : int
Grade of an icosahedral mesh.
%(verbose)s
Returns
-------
tris : list
2-element list containing Nx3 arrays of tris, suitable for use in
spatio_temporal_tris_adjacency.
"""
a = _get_ico_tris(grade, None, False)
tris = np.concatenate((a, a + (np.max(a) + 1)))
return tris
@verbose
def spatio_temporal_tris_adjacency(tris, n_times, remap_vertices=False,
verbose=None):
"""Compute adjacency from triangles and time instants.
Parameters
----------
tris : array
N x 3 array defining triangles.
n_times : int
Number of time points.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
from scipy import sparse
if remap_vertices:
logger.info('Reassigning vertex indices.')
tris = np.searchsorted(np.unique(tris), tris)
edges = mesh_edges(tris)
edges = (edges + sparse.eye(edges.shape[0], format='csr')).tocoo()
return _get_adjacency_from_edges(edges, n_times)
@verbose
def spatio_temporal_dist_adjacency(src, n_times, dist, verbose=None):
"""Compute adjacency from distances in a source space and time instants.
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained
with a call to :func:`mne.setup_source_space` with the
``add_dist=True`` option.
n_times : int
Number of time points.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
from scipy.sparse import block_diag as sparse_block_diag
if src[0]['dist'] is None:
raise RuntimeError('src must have distances included, consider using '
'setup_source_space with add_dist=True')
blocks = [s['dist'][s['vertno'], :][:, s['vertno']] for s in src]
# Ensure we keep explicit zeros; deal with changes in SciPy
for block in blocks:
if isinstance(block, np.ndarray):
block[block == 0] = -np.inf
else:
block.data[block.data == 0] == -1
edges = sparse_block_diag(blocks)
edges.data[:] = np.less_equal(edges.data, dist)
# clean it up and put it in coo format
edges = edges.tocsr()
edges.eliminate_zeros()
edges = edges.tocoo()
return _get_adjacency_from_edges(edges, n_times)
@verbose
def spatial_src_adjacency(src, dist=None, verbose=None):
"""Compute adjacency for a source space activation.
Parameters
----------
src : instance of SourceSpaces
The source space. It can be a surface source space or a
volume source space.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_src_adjacency(src, 1, dist)
@verbose
def spatial_tris_adjacency(tris, remap_vertices=False, verbose=None):
"""Compute adjacency from triangles.
Parameters
----------
tris : array
N x 3 array defining triangles.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_tris_adjacency(tris, 1, remap_vertices)
@verbose
def spatial_dist_adjacency(src, dist, verbose=None):
"""Compute adjacency from distances in a source space.
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained
with a call to :func:`mne.setup_source_space` with the
``add_dist=True`` option.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_dist_adjacency(src, 1, dist)
@verbose
def spatial_inter_hemi_adjacency(src, dist, verbose=None):
"""Get vertices on each hemisphere that are close to the other hemisphere.
Parameters
----------
src : instance of SourceSpaces
The source space. Must be surface type.
dist : float
Maximal Euclidean distance (in m) between vertices in one hemisphere
compared to the other to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
Typically this should be combined (addititively) with another
existing intra-hemispheric adjacency matrix, e.g. computed
using geodesic distances.
"""
from scipy import sparse
from scipy.spatial.distance import cdist
src = _ensure_src(src, kind='surface')
adj = cdist(src[0]['rr'][src[0]['vertno']],
src[1]['rr'][src[1]['vertno']])
adj = sparse.csr_matrix(adj <= dist, dtype=int)
empties = [sparse.csr_matrix((nv, nv), dtype=int) for nv in adj.shape]
adj = sparse.vstack([sparse.hstack([empties[0], adj]),
sparse.hstack([adj.T, empties[1]])])
return adj
@verbose
def _get_adjacency_from_edges(edges, n_times, verbose=None):
"""Given edges sparse matrix, create adjacency matrix."""
from scipy.sparse import coo_matrix
n_vertices = edges.shape[0]
logger.info("-- number of adjacent vertices : %d" % n_vertices)
nnz = edges.col.size
aux = n_vertices * np.tile(np.arange(n_times)[:, None], (1, nnz))
col = (edges.col[None, :] + aux).ravel()
row = (edges.row[None, :] + aux).ravel()
if n_times > 1: # add temporal edges
o = (n_vertices * np.arange(n_times - 1)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
d = (n_vertices * np.arange(1, n_times)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
row = np.concatenate((row, o, d))
col = np.concatenate((col, d, o))
data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),
dtype=np.int64)
adjacency = coo_matrix((data, (row, col)),
shape=(n_times * n_vertices,) * 2)
return adjacency
@verbose
def _get_ico_tris(grade, verbose=None, return_surf=False):
"""Get triangles for ico surface."""
ico = _get_ico_surface(grade)
if not return_surf:
return ico['tris']
else:
return ico
def _pca_flip(flip, data):
from scipy import linalg
U, s, V = linalg.svd(data, full_matrices=False)
# determine sign-flip
sign = np.sign(np.dot(U[:, 0], flip))
# use average power in label for scaling
scale = np.linalg.norm(s) / np.sqrt(len(data))
return sign * scale * V[0]
_label_funcs = {
'mean': lambda flip, data: np.mean(data, axis=0),
'mean_flip': lambda flip, data: np.mean(flip * data, axis=0),
'max': lambda flip, data: np.max(np.abs(data), axis=0),
'pca_flip': _pca_flip,
}
@contextlib.contextmanager
def _temporary_vertices(src, vertices):
orig_vertices = [s['vertno'] for s in src]
for s, v in zip(src, vertices):
s['vertno'] = v
try:
yield
finally:
for s, v in zip(src, orig_vertices):
s['vertno'] = v
def _check_stc_src(stc, src):
if stc is not None and src is not None:
for s, v, hemi in zip(src, stc.vertices, ('left', 'right')):
n_missing = (~np.in1d(v, s['vertno'])).sum()
if n_missing:
raise ValueError('%d/%d %s hemisphere stc vertices '
'missing from the source space, likely '
'mismatch' % (n_missing, len(v), hemi))
def _prepare_label_extraction(stc, labels, src, mode, allow_empty, use_sparse):
"""Prepare indices and flips for extract_label_time_course."""
# If src is a mixed src space, the first 2 src spaces are surf type and
# the other ones are vol type. For mixed source space n_labels will be
# given by the number of ROIs of the cortical parcellation plus the number
# of vol src space.
# If stc=None (i.e. no activation time courses provided) and mode='mean',
# only computes vertex indices and label_flip will be list of None.
from scipy import sparse
from .label import label_sign_flip, Label, BiHemiLabel
# if source estimate provided in stc, get vertices from source space and
# check that they are the same as in the stcs
_check_stc_src(stc, src)
vertno = [s['vertno'] for s in src] if stc is None else stc.vertices
nvert = [len(vn) for vn in vertno]
# initialization
label_flip = list()
label_vertidx = list()
bad_labels = list()
for li, label in enumerate(labels):
if use_sparse:
assert isinstance(label, dict)
vertidx = label['csr']
# This can happen if some labels aren't present in the space
if vertidx.shape[0] == 0:
bad_labels.append(label['name'])
vertidx = None
# Efficiency shortcut: use linearity early to avoid redundant
# calculations
elif mode == 'mean':
vertidx = sparse.csr_matrix(vertidx.mean(axis=0))
label_vertidx.append(vertidx)
label_flip.append(None)
continue
# standard case
_validate_type(label, (Label, BiHemiLabel), 'labels[%d]' % (li,))
if label.hemi == 'both':
# handle BiHemiLabel
sub_labels = [label.lh, label.rh]
else:
sub_labels = [label]
this_vertidx = list()
for slabel in sub_labels:
if slabel.hemi == 'lh':
this_vertices = np.intersect1d(vertno[0], slabel.vertices)
vertidx = np.searchsorted(vertno[0], this_vertices)
elif slabel.hemi == 'rh':
this_vertices = np.intersect1d(vertno[1], slabel.vertices)
vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertices)
else:
raise ValueError('label %s has invalid hemi' % label.name)
this_vertidx.append(vertidx)
# convert it to an array
this_vertidx = np.concatenate(this_vertidx)
this_flip = None
if len(this_vertidx) == 0:
bad_labels.append(label.name)
this_vertidx = None # to later check if label is empty
elif mode not in ('mean', 'max'): # mode-dependent initialization
# label_sign_flip uses two properties:
#
# - src[ii]['nn']
# - src[ii]['vertno']
#
# So if we override vertno with the stc vertices, it will pick
# the correct normals.
with _temporary_vertices(src, stc.vertices):
this_flip = label_sign_flip(label, src[:2])[:, None]
label_vertidx.append(this_vertidx)
label_flip.append(this_flip)
if len(bad_labels):
msg = ('source space does not contain any vertices for %d label%s:\n%s'
% (len(bad_labels), _pl(bad_labels), bad_labels))
if not allow_empty:
raise ValueError(msg)
else:
msg += '\nAssigning all-zero time series.'
if allow_empty == 'ignore':
logger.info(msg)
else:
warn(msg)
return label_vertidx, label_flip
def _vol_src_rr(src):
return apply_trans(
src[0]['src_mri_t'], np.array(
[d.ravel(order='F')
for d in np.meshgrid(
*(np.arange(s) for s in src[0]['shape']),
indexing='ij')],
float).T)
def _volume_labels(src, labels, mri_resolution):
# This will create Label objects that should do the right thing for our
# given volumetric source space when used with extract_label_time_course
from .label import Label
assert src.kind == 'volume'
extra = ' when using a volume source space'
_import_nibabel('use volume atlas labels')
_validate_type(labels, ('path-like', list, tuple), 'labels' + extra)
if _check_path_like(labels):
mri = labels
infer_labels = True
else:
if len(labels) != 2:
raise ValueError('labels, if list or tuple, must have length 2, '
'got %s' % (len(labels),))
mri, labels = labels
infer_labels = False
_validate_type(mri, 'path-like', 'labels[0]' + extra)
logger.info('Reading atlas %s' % (mri,))
vol_info = _get_mri_info_data(str(mri), data=True)
atlas_data = vol_info['data']
atlas_values = np.unique(atlas_data)
if atlas_values.dtype.kind == 'f': # MGZ will be 'i'
atlas_values = atlas_values[np.isfinite(atlas_values)]
if not (atlas_values == np.round(atlas_values)).all():
raise RuntimeError('Non-integer values present in atlas, cannot '
'labelize')
atlas_values = np.round(atlas_values).astype(np.int64)
if infer_labels:
labels = {
k: v for k, v in read_freesurfer_lut()[0].items()
if v in atlas_values}
labels = _check_volume_labels(labels, mri, name='labels[1]')
assert isinstance(labels, dict)
del atlas_values
vox_mri_t = vol_info['vox_mri_t']
want = src[0].get('vox_mri_t', None)
if want is None:
raise RuntimeError(
'Cannot use volumetric atlas if no mri was supplied during '
'source space creation')
vox_mri_t, want = vox_mri_t['trans'], want['trans']
if not np.allclose(vox_mri_t, want, atol=1e-6):
raise RuntimeError(
'atlas vox_mri_t does not match that used to create the source '
'space')
src_shape = tuple(src[0]['mri_' + k] for k in ('width', 'height', 'depth'))
atlas_shape = atlas_data.shape
if atlas_shape != src_shape:
raise RuntimeError('atlas shape %s does not match source space MRI '
'shape %s' % (atlas_shape, src_shape))
atlas_data = atlas_data.ravel(order='F')
if mri_resolution:
# Upsample then just index
out_labels = list()
nnz = 0
interp = src[0]['interpolator']
# should be guaranteed by size checks above and our src interp code
assert interp.shape[0] == np.prod(src_shape)
assert interp.shape == (atlas_data.size, len(src[0]['rr']))
interp = interp[:, src[0]['vertno']]
for k, v in labels.items():
mask = atlas_data == v
csr = interp[mask]
out_labels.append(dict(csr=csr, name=k))
nnz += csr.shape[0] > 0
else:
# Use nearest values
vertno = src[0]['vertno']
rr = _vol_src_rr(src)
del src
src_values = _get_atlas_values(vol_info, rr[vertno])
vertices = [vertno[src_values == val] for val in labels.values()]
out_labels = [Label(v, hemi='lh', name=val)
for v, val in zip(vertices, labels.keys())]
nnz = sum(len(v) != 0 for v in vertices)
logger.info('%d/%d atlas regions had at least one vertex '
'in the source space' % (nnz, len(out_labels)))
return out_labels
def _get_default_label_modes():
return sorted(_label_funcs.keys()) + ['auto']
def _get_allowed_label_modes(stc):
if isinstance(stc, (_BaseVolSourceEstimate,
_BaseVectorSourceEstimate)):
return ('mean', 'max', 'auto')
else:
return _get_default_label_modes()
def _gen_extract_label_time_course(stcs, labels, src, *, mode='mean',
allow_empty=False,
mri_resolution=True, verbose=None):
# loop through source estimates and extract time series
from scipy import sparse
if src is None and mode in ['mean', 'max']:
kind = 'surface'
else:
_validate_type(src, SourceSpaces)
kind = src.kind
_check_option('mode', mode, _get_default_label_modes())
if kind in ('surface', 'mixed'):
if not isinstance(labels, list):
labels = [labels]
use_sparse = False
else:
labels = _volume_labels(src, labels, mri_resolution)
use_sparse = bool(mri_resolution)
n_mode = len(labels) # how many processed with the given mode
n_mean = len(src[2:]) if kind == 'mixed' else 0
n_labels = n_mode + n_mean
vertno = func = None
for si, stc in enumerate(stcs):
_validate_type(stc, _BaseSourceEstimate, 'stcs[%d]' % (si,),
'source estimate')
_check_option(
'mode', mode, _get_allowed_label_modes(stc),
'when using a vector and/or volume source estimate')
if isinstance(stc, (_BaseVolSourceEstimate,
_BaseVectorSourceEstimate)):
mode = 'mean' if mode == 'auto' else mode
else:
mode = 'mean_flip' if mode == 'auto' else mode
if vertno is None:
vertno = copy.deepcopy(stc.vertices) # avoid keeping a ref
nvert = np.array([len(v) for v in vertno])
label_vertidx, src_flip = _prepare_label_extraction(
stc, labels, src, mode, allow_empty, use_sparse)
func = _label_funcs[mode]
# make sure the stc is compatible with the source space
if len(vertno) != len(stc.vertices):
raise ValueError('stc not compatible with source space')
for vn, svn in zip(vertno, stc.vertices):
if len(vn) != len(svn):
raise ValueError('stc not compatible with source space. '
'stc has %s time series but there are %s '
'vertices in source space. Ensure you used '
'src from the forward or inverse operator, '
'as forward computation can exclude vertices.'
% (len(svn), len(vn)))
if not np.array_equal(svn, vn):
raise ValueError('stc not compatible with source space')
logger.info('Extracting time courses for %d labels (mode: %s)'
% (n_labels, mode))
# do the extraction
label_tc = np.zeros((n_labels,) + stc.data.shape[1:],
dtype=stc.data.dtype)
for i, (vertidx, flip) in enumerate(zip(label_vertidx, src_flip)):
if vertidx is not None:
if isinstance(vertidx, sparse.csr_matrix):
assert mri_resolution
assert vertidx.shape[1] == stc.data.shape[0]
this_data = np.reshape(stc.data, (stc.data.shape[0], -1))
this_data = vertidx @ this_data
this_data.shape = \
(this_data.shape[0],) + stc.data.shape[1:]
else:
this_data = stc.data[vertidx]
label_tc[i] = func(flip, this_data)
# extract label time series for the vol src space (only mean supported)
offset = nvert[:-n_mean].sum() # effectively :2 or :0
for i, nv in enumerate(nvert[2:]):
if nv != 0:
v2 = offset + nv
label_tc[n_mode + i] = np.mean(stc.data[offset:v2], axis=0)
offset = v2
# this is a generator!
yield label_tc
@verbose
def extract_label_time_course(stcs, labels, src, mode='auto',
allow_empty=False, return_generator=False,
*, mri_resolution=True, verbose=None):
"""Extract label time course for lists of labels and source estimates.
This function will extract one time course for each label and source
estimate. The way the time courses are extracted depends on the mode
parameter (see Notes).
Parameters
----------
stcs : SourceEstimate | list (or generator) of SourceEstimate
The source estimates from which to extract the time course.
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
return_generator : bool
If True, a generator instead of a list is returned.
%(eltc_mri_resolution)s
%(verbose)s
Returns
-------
%(eltc_returns)s
Notes
-----
%(eltc_mode_notes)s
If encountering a ``ValueError`` due to mismatch between number of
source points in the subject source space and computed ``stc`` object set
``src`` argument to ``fwd['src']`` or ``inv['src']`` to ensure the source
space is the one actually used by the inverse to compute the source
time courses.
"""
# convert inputs to lists
if not isinstance(stcs, (list, tuple, GeneratorType)):
stcs = [stcs]
return_several = False
return_generator = False
else:
return_several = True
label_tc = _gen_extract_label_time_course(
stcs, labels, src, mode=mode, allow_empty=allow_empty,
mri_resolution=mri_resolution)
if not return_generator:
# do the extraction and return a list
label_tc = list(label_tc)
if not return_several:
# input was a single SoureEstimate, return single array
label_tc = label_tc[0]
return label_tc
@verbose
def stc_near_sensors(evoked, trans, subject, distance=0.01, mode='sum',
project=True, subjects_dir=None, src=None, verbose=None):
"""Create a STC from ECoG, sEEG and DBS sensor data.
Parameters
----------
evoked : instance of Evoked
The evoked data. Must contain ECoG, sEEG or DBS channels.
%(trans)s
subject : str
The subject name.
distance : float
Distance (m) defining the activation "ball" of the sensor.
mode : str
Can be "sum" to do a linear sum of weights, "nearest" to
use only the weight of the nearest sensor, or "single" to
do a distance-weight of the nearest sensor. Default is "sum".
See Notes.
project : bool
If True, project the electrodes to the nearest ``'pial`` surface
vertex before computing distances. Only used when doing a
surface projection.
%(subjects_dir)s
src : instance of SourceSpaces
The source space.
.. warning:: If a surface source space is used, make sure that
``surf='pial'`` was used during construction.
%(verbose)s
Returns
-------
stc : instance of SourceEstimate
The surface source estimate. If src is None, a surface source
estimate will be produced, and the number of vertices will equal
the number of pial-surface vertices that were close enough to
the sensors to take on a non-zero volue. If src is not None,
a surface, volume, or mixed source estimate will be produced
(depending on the kind of source space passed) and the
vertices will match those of src (i.e., there may be me
many all-zero values in stc.data).
Notes
-----
For surface projections, this function projects the ECoG sensors to
the pial surface (if ``project``), then the activation at each pial
surface vertex is given by the mode:
- ``'sum'``
Activation is the sum across each sensor weighted by the fractional
``distance`` from each sensor. A sensor with zero distance gets weight
1 and a sensor at ``distance`` meters away (or larger) gets weight 0.
If ``distance`` is less than the distance between any two electrodes,
this will be the same as ``'nearest'``.
- ``'single'``
Same as ``'sum'`` except that only the nearest electrode is used,
rather than summing across electrodes within the ``distance`` radius.
As ``'nearest'`` for vertices with distance zero to the projected
sensor.
- ``'nearest'``
The value is given by the value of the nearest sensor, up to a
``distance`` (beyond which it is zero).
If creating a Volume STC, ``src`` must be passed in, and this
function will project sEEG and DBS sensors to nearby surrounding vertices.
Then the activation at each volume vertex is given by the mode
in the same way as ECoG surface projections.
.. versionadded:: 0.22
"""
from scipy.spatial.distance import cdist, pdist
from .evoked import Evoked
_validate_type(evoked, Evoked, 'evoked')
_validate_type(mode, str, 'mode')
_validate_type(src, (None, SourceSpaces), 'src')
_check_option('mode', mode, ('sum', 'single', 'nearest'))
# create a copy of Evoked using ecog, seeg and dbs
evoked = evoked.copy().pick_types(ecog=True, seeg=True, dbs=True)
# get channel positions that will be used to pinpoint where
# in the Source space we will use the evoked data
pos = evoked._get_channel_positions()
# remove nan channels
nan_inds = np.where(np.isnan(pos).any(axis=1))[0]
nan_chs = [evoked.ch_names[idx] for idx in nan_inds]
if len(nan_chs):
evoked.drop_channels(nan_chs)
pos = [pos[idx] for idx in range(len(pos)) if idx not in nan_inds]
# coord_frame transformation from native mne "head" to MRI coord_frame
trans, _ = _get_trans(trans, 'head', 'mri', allow_none=True)
# convert head positions -> coord_frame MRI
pos = apply_trans(trans, pos)
subject = _check_subject(None, subject, False)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if src is None: # fake a full surface one
rrs = [read_surface(op.join(subjects_dir, subject,
'surf', f'{hemi}.pial'))[0]
for hemi in ('lh', 'rh')]
src = SourceSpaces([
dict(rr=rr / 1000., vertno=np.arange(len(rr)), type='surf',
coord_frame=FIFF.FIFFV_COORD_MRI)
for rr in rrs])
del rrs
keep_all = False
else:
keep_all = True
# ensure it's a usable one
klass = dict(
surface=SourceEstimate,
volume=VolSourceEstimate,
mixed=MixedSourceEstimate,
)
_check_option('src.kind', src.kind, sorted(klass.keys()))
klass = klass[src.kind]
rrs = np.concatenate([s['rr'][s['vertno']] for s in src])
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
rrs = apply_trans(trans, rrs)
# projection will only occur with surfaces
logger.info(
f'Projecting data from {len(pos)} sensor{_pl(pos)} onto {len(rrs)} '
f'{src.kind} vertices: {mode} mode')
if project and src.kind == 'surface':
logger.info(' Projecting electrodes onto surface')
pos = _project_onto_surface(pos, dict(rr=rrs), project_rrs=True,
method='nearest')[2]
min_dist = pdist(pos).min() * 1000
logger.info(
f' Minimum {"projected " if project else ""}intra-sensor distance: '
f'{min_dist:0.1f} mm')
# compute pairwise distance between source space points and sensors
dists = cdist(rrs, pos)
assert dists.shape == (len(rrs), len(pos))
# only consider vertices within our "epsilon-ball"
# characterized by distance kwarg
vertices = np.where((dists <= distance).any(-1))[0]
logger.info(f' {len(vertices)} / {len(rrs)} non-zero vertices')
w = np.maximum(1. - dists[vertices] / distance, 0)
# now we triage based on mode
if mode in ('single', 'nearest'):
range_ = np.arange(w.shape[0])
idx = np.argmax(w, axis=1)
vals = w[range_, idx] if mode == 'single' else 1.
w.fill(0)
w[range_, idx] = vals
missing = np.where(~np.any(w, axis=0))[0]
if len(missing):
warn(f'Channel{_pl(missing)} missing in STC: '
f'{", ".join(evoked.ch_names[mi] for mi in missing)}')
nz_data = w @ evoked.data
if not keep_all:
assert src.kind == 'surface'
data = nz_data
offset = len(src[0]['vertno'])
vertices = [vertices[vertices < offset],
vertices[vertices >= offset] - offset]
else:
data = np.zeros(
(sum(len(s['vertno']) for s in src), len(evoked.times)),
dtype=nz_data.dtype)
data[vertices] = nz_data
vertices = [s['vertno'].copy() for s in src]
return klass(data, vertices, evoked.times[0], 1. / evoked.info['sfreq'],
subject=subject, verbose=verbose)
| bsd-3-clause |
TomAugspurger/pandas | pandas/io/clipboards.py | 5 | 4337 | """ io on the clipboard """
from io import StringIO
import warnings
from pandas.core.dtypes.generic import ABCDataFrame
from pandas import get_option, option_context
def read_clipboard(sep=r"\s+", **kwargs): # pragma: no cover
r"""
Read text from clipboard and pass to read_csv.
Parameters
----------
sep : str, default '\s+'
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
**kwargs
See read_csv for the full argument list.
Returns
-------
DataFrame
A parsed DataFrame object.
"""
encoding = kwargs.pop("encoding", "utf-8")
# only utf-8 is valid for passed value because that's what clipboard
# supports
if encoding is not None and encoding.lower().replace("-", "") != "utf8":
raise NotImplementedError("reading from clipboard only supports utf-8 encoding")
from pandas.io.clipboard import clipboard_get
from pandas.io.parsers import read_csv
text = clipboard_get()
# Try to decode (if needed, as "text" might already be a string here).
try:
text = text.decode(kwargs.get("encoding") or get_option("display.encoding"))
except AttributeError:
pass
# Excel copies into clipboard with \t separation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split("\n")[:-1][:10]
# Need to remove leading white space, since read_csv
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = {x.lstrip().count("\t") for x in lines}
if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
sep = "\t"
# Edge case where sep is specified to be None, return to default
if sep is None and kwargs.get("delim_whitespace") is None:
sep = r"\s+"
# Regex separator currently only works with python engine.
# Default to python if separator is multi-character (regex)
if len(sep) > 1 and kwargs.get("engine") is None:
kwargs["engine"] = "python"
elif len(sep) > 1 and kwargs.get("engine") == "c":
warnings.warn(
"read_clipboard with regex separator does not work properly with c engine"
)
return read_csv(StringIO(text), sep=sep, **kwargs)
def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with PyQt4 modules)
- Windows:
- OS X:
"""
encoding = kwargs.pop("encoding", "utf-8")
# testing if an invalid encoding is passed to clipboard
if encoding is not None and encoding.lower().replace("-", "") != "utf8":
raise ValueError("clipboard only supports utf-8 encoding")
from pandas.io.clipboard import clipboard_set
if excel is None:
excel = True
if excel:
try:
if sep is None:
sep = "\t"
buf = StringIO()
# clipboard_set (pyperclip) expects unicode
obj.to_csv(buf, sep=sep, encoding="utf-8", **kwargs)
text = buf.getvalue()
clipboard_set(text)
return
except TypeError:
warnings.warn(
"to_clipboard in excel mode requires a single character separator."
)
elif sep is not None:
warnings.warn("to_clipboard with excel=False ignores the sep argument")
if isinstance(obj, ABCDataFrame):
# str(df) has various unhelpful defaults, like truncation
with option_context("display.max_colwidth", None):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
clipboard_set(objstr)
| bsd-3-clause |
indhub/mxnet | example/deep-embedded-clustering/dec.py | 20 | 7847 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
import sys
import os
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path = [os.path.join(curr_path, "../autoencoder")] + sys.path
import mxnet as mx
import numpy as np
import data
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
import model
from autoencoder import AutoEncoderModel
from solver import Solver, Monitor
import logging
def cluster_acc(Y_pred, Y):
from sklearn.utils.linear_assignment_ import linear_assignment
assert Y_pred.size == Y.size
D = max(Y_pred.max(), Y.max())+1
w = np.zeros((D,D), dtype=np.int64)
for i in range(Y_pred.size):
w[Y_pred[i], int(Y[i])] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w
class DECModel(model.MXModel):
class DECLoss(mx.operator.NumpyOp):
def __init__(self, num_centers, alpha):
super(DECModel.DECLoss, self).__init__(need_top_grad=False)
self.num_centers = num_centers
self.alpha = alpha
def forward(self, in_data, out_data):
z = in_data[0]
mu = in_data[1]
q = out_data[0]
self.mask = 1.0/(1.0+cdist(z, mu)**2/self.alpha)
q[:] = self.mask**((self.alpha+1.0)/2.0)
q[:] = (q.T/q.sum(axis=1)).T
def backward(self, out_grad, in_data, out_data, in_grad):
q = out_data[0]
z = in_data[0]
mu = in_data[1]
p = in_data[2]
dz = in_grad[0]
dmu = in_grad[1]
self.mask *= (self.alpha+1.0)/self.alpha*(p-q)
dz[:] = (z.T*self.mask.sum(axis=1)).T - self.mask.dot(mu)
dmu[:] = (mu.T*self.mask.sum(axis=0)).T - self.mask.T.dot(z)
def infer_shape(self, in_shape):
assert len(in_shape) == 3
assert len(in_shape[0]) == 2
input_shape = in_shape[0]
label_shape = (input_shape[0], self.num_centers)
mu_shape = (self.num_centers, input_shape[1])
out_shape = (input_shape[0], self.num_centers)
return [input_shape, mu_shape, label_shape], [out_shape]
def list_arguments(self):
return ['data', 'mu', 'label']
def setup(self, X, num_centers, alpha, save_to='dec_model'):
sep = X.shape[0]*9//10
X_train = X[:sep]
X_val = X[sep:]
ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,10], pt_dropout=0.2)
if not os.path.exists(save_to+'_pt.arg'):
ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.save(save_to+'_pt.arg')
logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train))
logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val))
else:
ae_model.load(save_to+'_pt.arg')
self.ae_model = ae_model
self.dec_op = DECModel.DECLoss(num_centers, alpha)
label = mx.sym.Variable('label')
self.feature = self.ae_model.encoder
self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()})
self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
self.num_centers = num_centers
def cluster(self, X, y=None, update_interval=None):
N = X.shape[0]
if not update_interval:
update_interval = N
batch_size = 256
test_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
last_batch_handle='pad')
args = {k: mx.nd.array(v.asnumpy(), ctx=self.xpu) for k, v in self.args.items()}
z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
kmeans = KMeans(self.num_centers, n_init=20)
kmeans.fit(z)
args['dec_mu'][:] = kmeans.cluster_centers_
solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.01)
def ce(label, pred):
return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
solver.set_metric(mx.metric.CustomMetric(ce))
label_buff = np.zeros((X.shape[0], self.num_centers))
train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff}, batch_size=batch_size,
shuffle=False, last_batch_handle='roll_over')
self.y_pred = np.zeros((X.shape[0]))
def refresh(i):
if i%update_interval == 0:
z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
p = np.zeros((z.shape[0], self.num_centers))
self.dec_op.forward([z, args['dec_mu'].asnumpy()], [p])
y_pred = p.argmax(axis=1)
print(np.std(np.bincount(y_pred)), np.bincount(y_pred))
print(np.std(np.bincount(y.astype(np.int))), np.bincount(y.astype(np.int)))
if y is not None:
print(cluster_acc(y_pred, y)[0])
weight = 1.0/p.sum(axis=0)
weight *= self.num_centers/weight.sum()
p = (p**2)*weight
train_iter.data_list[1][:] = (p.T/p.sum(axis=1)).T
print(np.sum(y_pred != self.y_pred), 0.001*y_pred.shape[0])
if np.sum(y_pred != self.y_pred) < 0.001*y_pred.shape[0]:
self.y_pred = y_pred
return True
self.y_pred = y_pred
solver.set_iter_start_callback(refresh)
solver.set_monitor(Monitor(50))
solver.solve(self.xpu, self.loss, args, self.args_grad, None,
train_iter, 0, 1000000000, {}, False)
self.end_args = args
if y is not None:
return cluster_acc(self.y_pred, y)[0]
else:
return -1
def mnist_exp(xpu):
X, Y = data.get_mnist()
dec_model = DECModel(xpu, X, 10, 1.0, 'data/mnist')
acc = []
for i in [10*(2**j) for j in range(9)]:
acc.append(dec_model.cluster(X, Y, i))
logging.log(logging.INFO, 'Clustering Acc: %f at update interval: %d'%(acc[-1], i))
logging.info(str(acc))
logging.info('Best Clustering ACC: %f at update_interval: %d'%(np.max(acc), 10*(2**np.argmax(acc))))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
mnist_exp(mx.gpu(0))
| apache-2.0 |
raoulbq/scipy | scipy/special/c_misc/struve_convergence.py | 76 | 3725 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
try:
import mpmath
except:
from sympy import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import \
_struve_power_series, _struve_asymp_large_z, _struve_bessel_series
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
import os
import sys
if '--main' in sys.argv:
main()
else:
import subprocess
subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'),
'-g', '--python', __file__, '--main'])
| bsd-3-clause |
alvarofierroclavero/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
thjashin/tensorflow | tensorflow/contrib/learn/__init__.py | 2 | 2110 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning.
See the @{$python/contrib.learn} guide.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedClassifier
@@LinearClassifier
@@LinearRegressor
@@LogisticRegressor
@@SVM
@@SKCompat
@@Experiment
@@ExportStrategy
@@TaskType
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
@@InputFnOps
@@ProblemType
@@build_parsing_serving_input_fn
@@make_export_strategy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'models',
'monitors', 'NotFittedError', 'ops', 'preprocessing',
'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
glennq/scikit-learn | examples/svm/plot_oneclass.py | 80 | 2338 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred')
s = 40
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white', s=s)
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s)
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s)
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
Alex2114/Deb-3df | tools/bed_compensation.py | 2 | 6057 | """
This script allows plotting the surface of the bed.
"""
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import random
single = True
# With reset of probe
before = [{"Y": 0.0, "X": 0.065, "Z": -0.0020124999999999991}, {"Y": 0.0325, "X": 0.05629, "Z": -0.0014000000000000004}, {"Y": 0.05629, "X": 0.0325, "Z": -0.00064999999999999943}, {"Y": 0.065, "X": 0.0, "Z": 0.00011250000000000027}, {"Y": 0.05629, "X": -0.0325, "Z": 0.00055000000000000025}, {"Y": 0.0325, "X": -0.05629, "Z": 0.00076250000000000016}, {"Y": 0.0, "X": -0.065, "Z": 0.00064999999999999986}, {"Y": -0.0325, "X": -0.05629, "Z": 0.0003875000000000002}, {"Y": -0.05629, "X": -0.0325, "Z": -0.00021250000000000037}, {"Y": -0.065, "X": -0.0, "Z": -0.00089999999999999943}, {"Y": -0.05629, "X": 0.0325, "Z": -0.0017875000000000005}, {"Y": -0.0325, "X": 0.05629, "Z": -0.0022625000000000002}, {"Y": 0.0, "X": 0.04333, "Z": -0.0015125000000000002}, {"Y": 0.021670000000000002, "X": 0.03753, "Z": -0.0011124999999999998}, {"Y": 0.03753, "X": 0.021670000000000002, "Z": -0.00062500000000000001}, {"Y": 0.04333, "X": 0.0, "Z": -0.00010000000000000053}, {"Y": 0.03753, "X": -0.021670000000000002, "Z": 0.00022499999999999964}, {"Y": 0.021670000000000002, "X": -0.03753, "Z": 0.0003500000000000001}, {"Y": 0.0, "X": -0.04333, "Z": 0.00022499999999999964}, {"Y": -0.021670000000000002, "X": -0.03753, "Z": -6.2500000000000001e-05}, {"Y": -0.03753, "X": -0.021670000000000002, "Z": -0.00056249999999999996}, {"Y": -0.04333, "X": -0.0, "Z": -0.0010124999999999993}, {"Y": -0.03753, "X": 0.021670000000000002, "Z": -0.0014500000000000001}, {"Y": -0.021670000000000002, "X": 0.03753, "Z": -0.0016875}, {"Y": 0.0, "X": 0.021670000000000002, "Z": -0.0010625000000000001}, {"Y": 0.01083, "X": 0.018760000000000002, "Z": -0.00087500000000000002}, {"Y": 0.018760000000000002, "X": 0.01083, "Z": -0.00062500000000000001}, {"Y": 0.021670000000000002, "X": 0.0, "Z": -0.00037500000000000001}, {"Y": 0.018760000000000002, "X": -0.01083, "Z": -0.0001875}, {"Y": 0.01083, "X": -0.018760000000000002, "Z": -0.00013749999999999928}, {"Y": 0.0, "X": -0.021670000000000002, "Z": -0.0001875}, {"Y": -0.01083, "X": -0.018760000000000002, "Z": -0.0003875000000000002}, {"Y": -0.018760000000000002, "X": -0.01083, "Z": -0.00062500000000000001}, {"Y": -0.021670000000000002, "X": -0.0, "Z": -0.00085000000000000049}, {"Y": -0.018760000000000002, "X": 0.01083, "Z": -0.0010499999999999997}, {"Y": -0.01083, "X": 0.018760000000000002, "Z": -0.0011249999999999999}, {"Y": 0.0, "X": 0.0, "Z": -0.00059999999999999962}]
after = [{"Y": 0.0, "X": 0.072, "Z": 0.0}, {"Y": 0.06848, "X": 0.02225, "Z": 0.00062500000000000001}, {"Y": 0.04232, "X": -0.05825, "Z": 0.0017000000000000001}, {"Y": -0.04232, "X": -0.05825, "Z": 0.0024625000000000003}, {"Y": -0.06848, "X": 0.02225, "Z": 0.0014125000000000001}, {"Y": 0.0, "X": 0.048, "Z": 0.00041250000000000011}, {"Y": 0.045649999999999996, "X": 0.01483, "Z": 0.00090000000000000041}, {"Y": 0.028210000000000002, "X": -0.038829999999999996, "Z": 0.0017500000000000003}, {"Y": -0.028210000000000002, "X": -0.038829999999999996, "Z": 0.0021000000000000003}, {"Y": -0.045649999999999996, "X": 0.01483, "Z": 0.0014125000000000001}, {"Y": 0.0, "X": 0.024, "Z": 0.00098750000000000031}, {"Y": 0.02283, "X": 0.0074199999999999995, "Z": 0.0012250000000000002}, {"Y": 0.01411, "X": -0.019420000000000003, "Z": 0.0017125}, {"Y": -0.01411, "X": -0.019420000000000003, "Z": 0.0018250000000000002}, {"Y": -0.02283, "X": 0.0074199999999999995, "Z": 0.0014500000000000001}, {"Y": 0.0, "X": 0.0, "Z": 0.0015000000000000002}]
mat = np.array(
[[ 1. , 0., 0.0193759/2 ],
[ 0., 1., -0.00773554/2],
[-0.01900754/2 ,0.00779585/2 ,1. ]])
mat = ([[ 9.99622303e-01, 1.82391137e-04, -1.96197371e-02],
[ 1.77231385e-04, 9.99914414e-01, 9.20640478e-03],
[ 1.92419604e-02, -9.29200211e-03, 9.99536717e-01]])
mat = np.array([[ 1., 0. , 0.020978/2 ],
[ 0. , 1. , -0.00924437/2],
[-0.02054686/2 , 0.00933064/2 ,1. ]])
x1, y1, z1 = map(list, zip(*map(lambda d: tuple(np.array([d['X']*1000, d['Y']*1000, d['Z']*1000])) , before)))
#x2, y2, z2 = map(list, zip(*map(lambda d: (d['X']*1000, d['Y']*1000, d['Z']*1000) , after )))
x2, y2, z2 = map(list, zip(*map(lambda d: tuple(np.array([d['X']*1000, d['Y']*1000, 0*1000]).dot(np.linalg.inv(mat))) , before )))
fig = plt.figure()
if single:
ax = fig.add_subplot(111, projection='3d')
else:
ax = fig.add_subplot(211, projection='3d')
(x, y, z) = (x1, y1, z1)
# Set up the canonical least squares form
degree = 2
Ax = np.vander(x, degree)
Ay = np.vander(y, degree)
A = np.hstack((Ax, Ay))
A = np.column_stack((np.ones(len(x)), x, y))
# Solve for a least squares estimate
(coeffs, residuals, rank, sing_vals) = np.linalg.lstsq(A, z)
X = np.linspace(min(x), max(x), 3)
Y = np.linspace(min(y), max(y), 3)
X, Y = np.meshgrid(X, Y)
Z = coeffs[0]+coeffs[1]*X + coeffs[2]*Y
ax.plot(x1, y1, z1, linestyle="none", marker="o", mfc="none", markeredgecolor="red")
if single:
ax.plot(x2, y2, z2, linestyle="none", marker="o", mfc="none", markeredgecolor="green")
ax.plot_surface(X, Y, Z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if not single:
ax = fig.add_subplot(212, projection='3d')
(x, y, z) = (x2, y2, z2)
# Set up the canonical least squares form
degree = 2
Ax = np.vander(x, degree)
Ay = np.vander(y, degree)
A = np.hstack((Ax, Ay))
A = np.column_stack((np.ones(len(x)), x, y))
# Solve for a least squares estimate
(coeffs, residuals, rank, sing_vals) = np.linalg.lstsq(A, z)
X = np.linspace(min(x), max(x), 3)
Y = np.linspace(min(y), max(y), 3)
X, Y = np.meshgrid(X, Y)
Z = coeffs[0]+coeffs[1]*X + coeffs[2]*Y
ax.plot_surface(X, Y, Z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
print "Before delta: "+str(max(z1)-min(z1))
print "After delta: "+str(max(z2)-min(z2))
plt.show()
| gpl-3.0 |
ebressert/ScipyNumpy_book_examples | python_examples/scipy_351_ex1.py | 2 | 1579 | import numpy as np
from scipy.cluster import vq
import matplotlib.pyplot as plt
# Creating data
c1 = np.random.randn(100, 2) + 5
c2 = np.random.randn(30, 2) - 5
c3 = np.random.randn(50, 2)
# Pooling all the data into one 150 x 2 array
data = np.vstack([c1, c2, c3])
# Calculating the cluster centriods and variance
# from kmeans
centroids, variance = vq.kmeans(data, 3)
# The identified variable contains the information
# we need to separate the points in clusters
# based on the vq function.
identified, distance = vq.vq(data, centroids)
# Retrieving coordinates for points in each vq
# identified core
vqc1 = data[identified == 0]
vqc2 = data[identified == 1]
vqc3 = data[identified == 2]
#Setting up plot details
x1, x2 = -10, 10
y1, y2 = -10, 10
fig = plt.figure()
fig.subplots_adjust(hspace=0.1, wspace=0.1)
ax1 = fig.add_subplot(121, aspect='equal')
ax1.scatter(c1[:, 0], c1[:, 1], lw=0.5, color='#00CC00')
ax1.scatter(c2[:, 0], c2[:, 1], lw=0.5, color='#028E9B')
ax1.scatter(c3[:, 0], c3[:, 1], lw=0.5, color='#FF7800')
ax1.xaxis.set_visible(False)
ax1.yaxis.set_visible(False)
ax1.set_xlim(x1, x2)
ax1.set_ylim(y1, y2)
ax1.text(-9, 8, 'Original')
ax2 = fig.add_subplot(122, aspect='equal')
ax2.scatter(vqc1[:, 0], vqc1[:, 1], lw=0.5, color='#00CC00')
ax2.scatter(vqc2[:, 0], vqc2[:, 1], lw=0.5, color='#028E9B')
ax2.scatter(vqc3[:, 0], vqc3[:, 1], lw=0.5, color='#FF7800')
ax2.xaxis.set_visible(False)
ax2.yaxis.set_visible(False)
ax2.set_xlim(x1, x2)
ax2.set_ylim(y1, y2)
ax2.text(-9, 8, 'VQ identified')
fig.savefig('scipy_351_ex1.pdf', bbox_inches='tight')
| mit |
gfyoung/pandas | pandas/tests/frame/test_nonunique_indexes.py | 2 | 17523 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, date_range
import pandas._testing as tm
def check(result, expected=None):
if expected is not None:
tm.assert_frame_equal(result, expected)
result.dtypes
str(result)
class TestDataFrameNonuniqueIndexes:
def test_column_dups_operations(self):
# assignment
# GH 3687
arr = np.random.randn(3, 2)
idx = list(range(2))
df = DataFrame(arr, columns=["A", "A"])
df.columns = idx
expected = DataFrame(arr, columns=idx)
check(df, expected)
idx = date_range("20130101", periods=4, freq="Q-NOV")
df = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=["a", "a", "a", "a"]
)
df.columns = idx
expected = DataFrame([[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]], columns=idx)
check(df, expected)
# insert
df = DataFrame(
[[1, 1, 1, 5], [1, 1, 2, 5], [2, 1, 3, 5]],
columns=["foo", "bar", "foo", "hello"],
)
df["string"] = "bah"
expected = DataFrame(
[[1, 1, 1, 5, "bah"], [1, 1, 2, 5, "bah"], [2, 1, 3, 5, "bah"]],
columns=["foo", "bar", "foo", "hello", "string"],
)
check(df, expected)
with pytest.raises(ValueError, match="Length of value"):
df.insert(0, "AnotherColumn", range(len(df.index) - 1))
# insert same dtype
df["foo2"] = 3
expected = DataFrame(
[[1, 1, 1, 5, "bah", 3], [1, 1, 2, 5, "bah", 3], [2, 1, 3, 5, "bah", 3]],
columns=["foo", "bar", "foo", "hello", "string", "foo2"],
)
check(df, expected)
# set (non-dup)
df["foo2"] = 4
expected = DataFrame(
[[1, 1, 1, 5, "bah", 4], [1, 1, 2, 5, "bah", 4], [2, 1, 3, 5, "bah", 4]],
columns=["foo", "bar", "foo", "hello", "string", "foo2"],
)
check(df, expected)
df["foo2"] = 3
# delete (non dup)
del df["bar"]
expected = DataFrame(
[[1, 1, 5, "bah", 3], [1, 2, 5, "bah", 3], [2, 3, 5, "bah", 3]],
columns=["foo", "foo", "hello", "string", "foo2"],
)
check(df, expected)
# try to delete again (its not consolidated)
del df["hello"]
expected = DataFrame(
[[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]],
columns=["foo", "foo", "string", "foo2"],
)
check(df, expected)
# consolidate
df = df._consolidate()
expected = DataFrame(
[[1, 1, "bah", 3], [1, 2, "bah", 3], [2, 3, "bah", 3]],
columns=["foo", "foo", "string", "foo2"],
)
check(df, expected)
# insert
df.insert(2, "new_col", 5.0)
expected = DataFrame(
[[1, 1, 5.0, "bah", 3], [1, 2, 5.0, "bah", 3], [2, 3, 5.0, "bah", 3]],
columns=["foo", "foo", "new_col", "string", "foo2"],
)
check(df, expected)
# insert a dup
with pytest.raises(ValueError, match="cannot insert"):
df.insert(2, "new_col", 4.0)
df.insert(2, "new_col", 4.0, allow_duplicates=True)
expected = DataFrame(
[
[1, 1, 4.0, 5.0, "bah", 3],
[1, 2, 4.0, 5.0, "bah", 3],
[2, 3, 4.0, 5.0, "bah", 3],
],
columns=["foo", "foo", "new_col", "new_col", "string", "foo2"],
)
check(df, expected)
# delete (dup)
del df["foo"]
expected = DataFrame(
[[4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3], [4.0, 5.0, "bah", 3]],
columns=["new_col", "new_col", "string", "foo2"],
)
tm.assert_frame_equal(df, expected)
# dup across dtypes
df = DataFrame(
[[1, 1, 1.0, 5], [1, 1, 2.0, 5], [2, 1, 3.0, 5]],
columns=["foo", "bar", "foo", "hello"],
)
check(df)
df["foo2"] = 7.0
expected = DataFrame(
[[1, 1, 1.0, 5, 7.0], [1, 1, 2.0, 5, 7.0], [2, 1, 3.0, 5, 7.0]],
columns=["foo", "bar", "foo", "hello", "foo2"],
)
check(df, expected)
result = df["foo"]
expected = DataFrame([[1, 1.0], [1, 2.0], [2, 3.0]], columns=["foo", "foo"])
check(result, expected)
# multiple replacements
df["foo"] = "string"
expected = DataFrame(
[
["string", 1, "string", 5, 7.0],
["string", 1, "string", 5, 7.0],
["string", 1, "string", 5, 7.0],
],
columns=["foo", "bar", "foo", "hello", "foo2"],
)
check(df, expected)
del df["foo"]
expected = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "hello", "foo2"]
)
check(df, expected)
# values
df = DataFrame([[1, 2.5], [3, 4.5]], index=[1, 2], columns=["x", "x"])
result = df.values
expected = np.array([[1, 2.5], [3, 4.5]])
assert (result == expected).all().all()
# rename, GH 4403
df4 = DataFrame(
{"RT": [0.0454], "TClose": [22.02], "TExg": [0.0422]},
index=MultiIndex.from_tuples(
[(600809, 20130331)], names=["STK_ID", "RPT_Date"]
),
)
df5 = DataFrame(
{
"RPT_Date": [20120930, 20121231, 20130331],
"STK_ID": [600809] * 3,
"STK_Name": ["饡驦", "饡驦", "饡驦"],
"TClose": [38.05, 41.66, 30.01],
},
index=MultiIndex.from_tuples(
[(600809, 20120930), (600809, 20121231), (600809, 20130331)],
names=["STK_ID", "RPT_Date"],
),
)
k = pd.merge(df4, df5, how="inner", left_index=True, right_index=True)
result = k.rename(columns={"TClose_x": "TClose", "TClose_y": "QT_Close"})
str(result)
result.dtypes
expected = DataFrame(
[[0.0454, 22.02, 0.0422, 20130331, 600809, "饡驦", 30.01]],
columns=[
"RT",
"TClose",
"TExg",
"RPT_Date",
"STK_ID",
"STK_Name",
"QT_Close",
],
).set_index(["STK_ID", "RPT_Date"], drop=False)
tm.assert_frame_equal(result, expected)
# reindex is invalid!
df = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"]
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df.reindex(columns=["bar"])
with pytest.raises(ValueError, match=msg):
df.reindex(columns=["bar", "foo"])
# drop
df = DataFrame(
[[1, 5, 7.0], [1, 5, 7.0], [1, 5, 7.0]], columns=["bar", "a", "a"]
)
result = df.drop(["a"], axis=1)
expected = DataFrame([[1], [1], [1]], columns=["bar"])
check(result, expected)
result = df.drop("a", axis=1)
check(result, expected)
# describe
df = DataFrame(
[[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=["bar", "a", "a"],
dtype="float64",
)
result = df.describe()
s = df.iloc[:, 0].describe()
expected = pd.concat([s, s, s], keys=df.columns, axis=1)
check(result, expected)
# check column dups with index equal and not equal to df's index
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "A"],
)
for index in [df.index, pd.Index(list("edcba"))]:
this_df = df.copy()
expected_ser = Series(index.values, index=this_df.index)
expected_df = DataFrame(
{"A": expected_ser, "B": this_df["B"], "A": expected_ser},
columns=["A", "B", "A"],
)
this_df["A"] = index
check(this_df, expected_df)
# operations
for op in ["__add__", "__mul__", "__sub__", "__truediv__"]:
df = DataFrame({"A": np.arange(10), "B": np.random.rand(10)})
expected = getattr(df, op)(df)
expected.columns = ["A", "A"]
df.columns = ["A", "A"]
result = getattr(df, op)(df)
check(result, expected)
# multiple assignments that change dtypes
# the location indexer is a slice
# GH 6120
df = DataFrame(np.random.randn(5, 2), columns=["that", "that"])
expected = DataFrame(1.0, index=range(5), columns=["that", "that"])
df["that"] = 1.0
check(df, expected)
df = DataFrame(np.random.rand(5, 2), columns=["that", "that"])
expected = DataFrame(1, index=range(5), columns=["that", "that"])
df["that"] = 1
check(df, expected)
def test_column_dups2(self):
# drop buggy GH 6240
df = DataFrame(
{
"A": np.random.randn(5),
"B": np.random.randn(5),
"C": np.random.randn(5),
"D": ["a", "b", "c", "d", "e"],
}
)
expected = df.take([0, 1, 1], axis=1)
df2 = df.take([2, 0, 1, 2, 1], axis=1)
result = df2.drop("C", axis=1)
tm.assert_frame_equal(result, expected)
# dropna
df = DataFrame(
{
"A": np.random.randn(5),
"B": np.random.randn(5),
"C": np.random.randn(5),
"D": ["a", "b", "c", "d", "e"],
}
)
df.iloc[2, [0, 1, 2]] = np.nan
df.iloc[0, 0] = np.nan
df.iloc[1, 1] = np.nan
df.iloc[:, 3] = np.nan
expected = df.dropna(subset=["A", "B", "C"], how="all")
expected.columns = ["A", "A", "B", "C"]
df.columns = ["A", "A", "B", "C"]
result = df.dropna(subset=["A", "C"], how="all")
tm.assert_frame_equal(result, expected)
def test_getitem_boolean_series_with_duplicate_columns(self):
# boolean indexing
# GH 4879
dups = ["A", "A", "C", "D"]
df = DataFrame(
np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"
)
expected = df[df.C > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
result = df[df.C > 6]
check(result, expected)
def test_getitem_boolean_frame_with_duplicate_columns(self):
dups = ["A", "A", "C", "D"]
# where
df = DataFrame(
np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"
)
# `df > 6` is a DataFrame with the same shape+alignment as df
expected = df[df > 6]
expected.columns = dups
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
result = df[df > 6]
check(result, expected)
def test_getitem_boolean_frame_unaligned_with_duplicate_columns(self):
# `df.A > 6` is a DataFrame with a different shape from df
dups = ["A", "A", "C", "D"]
# boolean with the duplicate raises
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df[df.A > 6]
def test_column_dups_indexing(self):
# dup aligning operations should work
# GH 5185
df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])
df2 = DataFrame([1, 2, 3], index=[1, 2, 3])
expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3])
result = df1.sub(df2)
tm.assert_frame_equal(result, expected)
# equality
df1 = DataFrame([[1, 2], [2, np.nan], [3, 4], [4, 4]], columns=["A", "B"])
df2 = DataFrame([[0, 1], [2, 4], [2, np.nan], [4, 5]], columns=["A", "A"])
# not-comparing like-labelled
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
df1 == df2
df1r = df1.reindex_like(df2)
result = df1r == df2
expected = DataFrame(
[[False, True], [True, False], [False, False], [True, False]],
columns=["A", "A"],
)
tm.assert_frame_equal(result, expected)
# mixed column selection
# GH 5639
dfbool = DataFrame(
{
"one": Series([True, True, False], index=["a", "b", "c"]),
"two": Series([False, False, True, False], index=["a", "b", "c", "d"]),
"three": Series([False, True, True, True], index=["a", "b", "c", "d"]),
}
)
expected = pd.concat([dfbool["one"], dfbool["three"], dfbool["one"]], axis=1)
result = dfbool[["one", "three", "one"]]
check(result, expected)
# multi-axis dups
# GH 6121
df = DataFrame(
np.arange(25.0).reshape(5, 5),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "C", "D", "E"],
)
z = df[["A", "C", "A"]].copy()
expected = z.loc[["a", "c", "a"]]
df = DataFrame(
np.arange(25.0).reshape(5, 5),
index=["a", "b", "c", "d", "e"],
columns=["A", "B", "C", "D", "E"],
)
z = df[["A", "C", "A"]]
result = z.loc[["a", "c", "a"]]
check(result, expected)
def test_columns_with_dups(self):
# GH 3468 related
# basic
df = DataFrame([[1, 2]], columns=["a", "a"])
df.columns = ["a", "a.1"]
str(df)
expected = DataFrame([[1, 2]], columns=["a", "a.1"])
tm.assert_frame_equal(df, expected)
df = DataFrame([[1, 2, 3]], columns=["b", "a", "a"])
df.columns = ["b", "a", "a.1"]
str(df)
expected = DataFrame([[1, 2, 3]], columns=["b", "a", "a.1"])
tm.assert_frame_equal(df, expected)
# with a dup index
df = DataFrame([[1, 2]], columns=["a", "a"])
df.columns = ["b", "b"]
str(df)
expected = DataFrame([[1, 2]], columns=["b", "b"])
tm.assert_frame_equal(df, expected)
# multi-dtype
df = DataFrame(
[[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]],
columns=["a", "a", "b", "b", "d", "c", "c"],
)
df.columns = list("ABCDEFG")
str(df)
expected = DataFrame(
[[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a", "a", "a"])
df.columns = ["a", "a.1", "a.2", "a.3"]
str(df)
expected = DataFrame([[1, 2, "foo", "bar"]], columns=["a", "a.1", "a.2", "a.3"])
tm.assert_frame_equal(df, expected)
# dups across blocks
df_float = DataFrame(np.random.randn(10, 3), dtype="float64")
df_int = DataFrame(np.random.randn(10, 3), dtype="int64")
df_bool = DataFrame(True, index=df_float.index, columns=df_float.columns)
df_object = DataFrame("foo", index=df_float.index, columns=df_float.columns)
df_dt = DataFrame(
pd.Timestamp("20010101"), index=df_float.index, columns=df_float.columns
)
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
assert len(df._mgr.blknos) == len(df.columns)
assert len(df._mgr.blklocs) == len(df.columns)
# testing iloc
for i in range(len(df.columns)):
df.iloc[:, i]
# dup columns across dtype GH 2079/2194
vals = [[1, -1, 2.0], [2, -2, 3.0]]
rs = DataFrame(vals, columns=["A", "A", "B"])
xp = DataFrame(vals)
xp.columns = ["A", "A", "B"]
tm.assert_frame_equal(rs, xp)
def test_set_value_by_index(self):
# See gh-12344
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = list("AAA")
expected = df.iloc[:, 2]
df.iloc[:, 0] = 3
tm.assert_series_equal(df.iloc[:, 2], expected)
df = DataFrame(np.arange(9).reshape(3, 3).T)
df.columns = [2, float(2), str(2)]
expected = df.iloc[:, 1]
df.iloc[:, 0] = 3
tm.assert_series_equal(df.iloc[:, 1], expected)
@pytest.mark.parametrize(
"data1,data2,expected_data",
(
(
[[1, 2], [3, 4]],
[[0.5, 6], [7, 8]],
[[np.nan, 3.0], [np.nan, 4.0], [np.nan, 7.0], [6.0, 8.0]],
),
(
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[np.nan, 3.0], [np.nan, 4.0], [5, 7], [6, 8]],
),
),
)
def test_masking_duplicate_columns_mixed_dtypes(
self,
data1,
data2,
expected_data,
):
# GH31954
df1 = DataFrame(np.array(data1))
df2 = DataFrame(np.array(data2))
df = pd.concat([df1, df2], axis=1)
result = df[df > 2]
expected = DataFrame(
{i: np.array(col) for i, col in enumerate(expected_data)}
).rename(columns={2: 0, 3: 1})
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
tmerrick1/spack | var/spack/repos/builtin/packages/paraview/package.py | 4 | 8421 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Paraview(CMakePackage):
"""ParaView is an open-source, multi-platform data analysis and
visualization application."""
homepage = 'http://www.paraview.org'
url = "http://www.paraview.org/files/v5.3/ParaView-v5.3.0.tar.gz"
_urlfmt = 'http://www.paraview.org/files/v{0}/ParaView-v{1}{2}.tar.gz'
version('5.4.1', '4030c70477ec5a85aa72d6fc86a30753')
version('5.4.0', 'b92847605bac9036414b644f33cb7163')
version('5.3.0', '68fbbbe733aa607ec13d1db1ab5eba71')
version('5.2.0', '4570d1a2a183026adb65b73c7125b8b0')
version('5.1.2', '44fb32fc8988fcdfbc216c9e40c3e925')
version('5.0.1', 'fdf206113369746e2276b95b257d2c9b')
version('4.4.0', 'fa1569857dd680ebb4d7ff89c2227378')
variant('plugins', default=True,
description='Install include files for plugins support')
variant('python', default=False, description='Enable Python support')
variant('mpi', default=True, description='Enable MPI support')
variant('osmesa', default=False, description='Enable OSMesa support')
variant('qt', default=False, description='Enable Qt (gui) support')
variant('opengl2', default=True, description='Enable OpenGL2 backend')
variant('examples', default=False, description="Build examples")
variant('hdf5', default=False, description="Use external HDF5")
depends_on('python@2:2.8', when='+python')
depends_on('py-numpy', when='+python', type='run')
depends_on('py-matplotlib', when='+python', type='run')
depends_on('mpi', when='+mpi')
depends_on('qt+opengl', when='@5.3.0:+qt+opengl2')
depends_on('qt~opengl', when='@5.3.0:+qt~opengl2')
depends_on('qt@:4', when='@:5.2.0+qt')
depends_on('mesa+swrender', when='+osmesa')
depends_on('libxt', when='+qt')
conflicts('+qt', when='+osmesa')
depends_on('bzip2')
depends_on('freetype')
# depends_on('hdf5+mpi', when='+mpi')
# depends_on('hdf5~mpi', when='~mpi')
depends_on('hdf5+hl+mpi', when='+hdf5+mpi')
depends_on('hdf5+hl~mpi', when='+hdf5~mpi')
depends_on('jpeg')
depends_on('libpng')
depends_on('libtiff')
depends_on('libxml2')
# depends_on('netcdf')
# depends_on('netcdf-cxx')
# depends_on('protobuf') # version mismatches?
# depends_on('sqlite') # external version not supported
depends_on('zlib')
depends_on('[email protected]:', type='build')
patch('stl-reader-pv440.patch', when='@4.4.0')
# Broken gcc-detection - improved in 5.1.0, redundant later
patch('gcc-compiler-pv501.patch', when='@:5.0.1')
# Broken installation (ui_pqExportStateWizard.h) - fixed in 5.2.0
patch('ui_pqExportStateWizard.patch', when='@:5.1.2')
def url_for_version(self, version):
"""Handle ParaView version-based custom URLs."""
if version < Version('5.1.0'):
return self._urlfmt.format(version.up_to(2), version, '-source')
else:
return self._urlfmt.format(version.up_to(2), version, '')
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
if os.path.isdir(self.prefix.lib64):
lib_dir = self.prefix.lib64
else:
lib_dir = self.prefix.lib
paraview_version = 'paraview-%s' % self.spec.version.up_to(2)
spack_env.set('PARAVIEW_VTK_DIR',
join_path(lib_dir, 'cmake', paraview_version))
def setup_environment(self, spack_env, run_env):
if os.path.isdir(self.prefix.lib64):
lib_dir = self.prefix.lib64
else:
lib_dir = self.prefix.lib
paraview_version = 'paraview-%s' % self.spec.version.up_to(2)
run_env.prepend_path('LIBRARY_PATH', join_path(lib_dir,
paraview_version))
run_env.prepend_path('LD_LIBRARY_PATH', join_path(lib_dir,
paraview_version))
run_env.set('PARAVIEW_VTK_DIR',
join_path(lib_dir, 'cmake', paraview_version))
if '+python' in self.spec:
run_env.prepend_path('PYTHONPATH', join_path(lib_dir,
paraview_version))
run_env.prepend_path('PYTHONPATH', join_path(lib_dir,
paraview_version, 'site-packages'))
run_env.prepend_path('PYTHONPATH', join_path(lib_dir,
paraview_version, 'site-packages', 'vtk'))
def cmake_args(self):
"""Populate cmake arguments for ParaView."""
spec = self.spec
def variant_bool(feature, on='ON', off='OFF'):
"""Ternary for spec variant to ON/OFF string"""
if feature in spec:
return on
return off
def nvariant_bool(feature):
"""Negated ternary for spec variant to OFF/ON string"""
return variant_bool(feature, on='OFF', off='ON')
rendering = variant_bool('+opengl2', 'OpenGL2', 'OpenGL')
includes = variant_bool('+plugins')
cmake_args = [
'-DPARAVIEW_BUILD_QT_GUI:BOOL=%s' % variant_bool('+qt'),
'-DVTK_OPENGL_HAS_OSMESA:BOOL=%s' % variant_bool('+osmesa'),
'-DVTK_USE_X:BOOL=%s' % nvariant_bool('+osmesa'),
'-DVTK_RENDERING_BACKEND:STRING=%s' % rendering,
'-DPARAVIEW_INSTALL_DEVELOPMENT_FILES:BOOL=%s' % includes,
'-DBUILD_TESTING:BOOL=OFF',
'-DBUILD_EXAMPLES:BOOL=%s' % variant_bool('+examples'),
'-DVTK_USE_SYSTEM_FREETYPE:BOOL=ON',
'-DVTK_USE_SYSTEM_HDF5:BOOL=%s' % variant_bool('+hdf5'),
'-DVTK_USE_SYSTEM_JPEG:BOOL=ON',
'-DVTK_USE_SYSTEM_LIBXML2:BOOL=ON',
'-DVTK_USE_SYSTEM_NETCDF:BOOL=OFF',
'-DVTK_USE_SYSTEM_TIFF:BOOL=ON',
'-DVTK_USE_SYSTEM_ZLIB:BOOL=ON',
]
# The assumed qt version changed to QT5 (as of paraview 5.2.1),
# so explicitly specify which QT major version is actually being used
if '+qt' in spec:
cmake_args.extend([
'-DPARAVIEW_QT_VERSION=%s' % spec['qt'].version[0],
])
if '+python' in spec:
cmake_args.extend([
'-DPARAVIEW_ENABLE_PYTHON:BOOL=ON',
'-DPYTHON_EXECUTABLE:FILEPATH=%s' % spec['python'].command.path
])
if '+mpi' in spec:
cmake_args.extend([
'-DPARAVIEW_USE_MPI:BOOL=ON',
'-DMPIEXEC:FILEPATH=%s/bin/mpiexec' % spec['mpi'].prefix,
'-DMPI_CXX_COMPILER:PATH=%s' % spec['mpi'].mpicxx,
'-DMPI_C_COMPILER:PATH=%s' % spec['mpi'].mpicc,
'-DMPI_Fortran_COMPILER:PATH=%s' % spec['mpi'].mpifc
])
if 'darwin' in spec.architecture:
cmake_args.extend([
'-DVTK_USE_X:BOOL=OFF',
'-DPARAVIEW_DO_UNIX_STYLE_INSTALLS:BOOL=ON',
])
# Hide git from Paraview so it will not use `git describe`
# to find its own version number
if spec.satisfies('@5.4.0:5.4.1'):
cmake_args.extend([
'-DGIT_EXECUTABLE=FALSE'
])
return cmake_args
| lgpl-2.1 |
fatiando/fatiando | gallery/gridder/interpolate.py | 6 | 2505 | """
Interpolate irregular data
--------------------------
The functions :func:`fatiando.gridder.interp` and
:func:`fatiando.gridder.interp_at` offer convenient wrappers around
``scipy.interpolate.griddata``. The scipy function is more general and can
interpolate n-dimensional data. Our functions offer the convenience of
generating the regular grid points and optionally using nearest-neighbor
interpolation to extrapolate outside the convex hull of the data points.
"""
from fatiando import gridder
import matplotlib.pyplot as plt
import numpy as np
# Generate synthetic data measured at random points
area = (0, 1, 0, 1)
x, y = gridder.scatter(area, n=500, seed=0)
data = x*(1 - x)*np.cos(4*np.pi*x)*np.sin(4*np.pi*y**2)**2
# Say we want to interpolate the data onto a regular grid with a given shape
shape = (100, 200)
# The gridder.interp function takes care of selecting the containing area of
# the data and generating the regular grid for us.
# Let's interpolate using the different options offered by gridddata and plot
# them all.
plt.figure(figsize=(10, 8))
xp, yp, nearest = gridder.interp(x, y, data, shape, algorithm='nearest')
plt.subplot(2, 2, 1)
plt.title('Nearest-neighbors')
plt.contourf(yp.reshape(shape), xp.reshape(shape), nearest.reshape(shape),
30, cmap='RdBu_r')
xp, yp, linear = gridder.interp(x, y, data, shape, algorithm='linear')
plt.subplot(2, 2, 2)
plt.title('Linear')
plt.contourf(yp.reshape(shape), xp.reshape(shape), linear.reshape(shape),
30, cmap='RdBu_r')
xp, yp, cubic = gridder.interp(x, y, data, shape, algorithm='cubic')
plt.subplot(2, 2, 3)
plt.title('Cubic')
plt.contourf(yp.reshape(shape), xp.reshape(shape), cubic.reshape(shape),
30, cmap='RdBu_r')
# Notice that the cubic and linear interpolation leave empty the points that
# are outside the convex hull (bounding region) of the original scatter data.
# These data points will have NaN values or be masked in the data array, which
# can cause some problems for processing and inversion (any FFT operation in
# fatiando.gravmag will fail, for example). Use "extrapolate=True" to use
# nearest-neighbors to fill in those missing points.
xp, yp, cubic_ext = gridder.interp(x, y, data, shape, algorithm='cubic',
extrapolate=True)
plt.subplot(2, 2, 4)
plt.title('Cubic with extrapolation')
plt.contourf(yp.reshape(shape), xp.reshape(shape), cubic_ext.reshape(shape),
30, cmap='RdBu_r')
plt.tight_layout()
plt.show()
| bsd-3-clause |
jmikko/fairnessML | zafar_methods/disparate_mistreatment/synthetic_data_demo/fairness_acc_tradeoff.py | 1 | 11588 | import os, sys
import numpy as np
from generate_synthetic_data import *
sys.path.insert(0, '../../fair_classification/') # the code for fair classification is in this directory
import utils as ut
import funcs_disp_mist as fdm
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import plot_syn_boundaries as psb
from copy import deepcopy
import matplotlib.pyplot as plt # for plotting stuff
def test_synthetic_data():
""" Generate the synthetic data """
data_type = 1
X, y, x_control = generate_synthetic_data(data_type=data_type,
plot_data=False) # set plot_data to False to skip the data plot
sensitive_attrs = x_control.keys()
""" Split the data into train and test """
train_fold_size = 0.5
x_train, y_train, x_control_train, x_test, y_test, x_control_test = ut.split_into_train_test(X, y, x_control,
train_fold_size)
cons_params = None # constraint parameters, will use them later
loss_function = "logreg" # perform the experiments with logistic regression
EPS = 1e-4
def train_test_classifier():
w = fdm.train_model_disp_mist(x_train, y_train, x_control_train, loss_function, EPS, cons_params)
train_score, test_score, cov_all_train, cov_all_test, s_attr_to_fp_fn_train, s_attr_to_fp_fn_test = fdm.get_clf_stats(
w, x_train, y_train, x_control_train, x_test, y_test, x_control_test, sensitive_attrs)
# accuracy and FPR are for the test because we need of for plotting
# the covariance is for train, because we need it for setting the thresholds
return w, test_score, s_attr_to_fp_fn_test, cov_all_train
""" Classify the data while optimizing for accuracy """
print("== Unconstrained (original) classifier ==")
w_uncons, acc_uncons, s_attr_to_fp_fn_test_uncons, cov_all_train_uncons = train_test_classifier()
print("\n-----------------------------------------------------------------------------------\n")
""" Now classify such that we optimize for accuracy while achieving perfect fairness """
print("== Classifier with fairness constraint ==")
it = 0.05
mult_range = np.arange(1.0, 0.0 - it, -it).tolist()
acc_arr = []
fpr_per_group = {0: [], 1: []}
fnr_per_group = {0: [], 1: []}
cons_type = 1 # FPR constraint -- just change the cons_type, the rest of parameters should stay the same
tau = 5.0
mu = 1.2
for m in mult_range:
sensitive_attrs_to_cov_thresh = deepcopy(cov_all_train_uncons)
for s_attr in sensitive_attrs_to_cov_thresh.keys():
for cov_type in sensitive_attrs_to_cov_thresh[s_attr].keys():
for s_val in sensitive_attrs_to_cov_thresh[s_attr][cov_type]:
sensitive_attrs_to_cov_thresh[s_attr][cov_type][s_val] *= m
cons_params = {"cons_type": cons_type,
"tau": tau,
"mu": mu,
"sensitive_attrs_to_cov_thresh": sensitive_attrs_to_cov_thresh}
w_cons, acc_cons, s_attr_to_fp_fn_test_cons, cov_all_train_cons = train_test_classifier()
fpr_per_group[0].append(s_attr_to_fp_fn_test_cons["s1"][0.0]["fpr"])
fpr_per_group[1].append(s_attr_to_fp_fn_test_cons["s1"][1.0]["fpr"])
fnr_per_group[0].append(s_attr_to_fp_fn_test_cons["s1"][0.0]["fnr"])
fnr_per_group[1].append(s_attr_to_fp_fn_test_cons["s1"][1.0]["fnr"])
acc_arr.append(acc_cons)
fs = 15
ax = plt.subplot(2, 1, 1)
plt.plot(mult_range, fpr_per_group[0], "-o", color="green", label="Group-0")
plt.plot(mult_range, fpr_per_group[1], "-o", color="blue", label="Group-1")
ax.set_xlim([max(mult_range), min(mult_range)])
plt.ylabel('False positive rate', fontsize=fs)
ax.legend(fontsize=fs)
ax = plt.subplot(2, 1, 2)
plt.plot(mult_range, acc_arr, "-o", color="green", label="")
ax.set_xlim([max(mult_range), min(mult_range)])
plt.xlabel('Covariance multiplicative factor (m)', fontsize=fs)
plt.ylabel('Accuracy', fontsize=fs)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
plt.savefig("img/fairness_acc_tradeoff_cons_type_%d.png" % cons_type)
plt.show()
return
if __name__ == '__main__':
# test_synthetic_data()
from load_data import load_binary_diabetes_uci, load_heart_uci, load_breast_cancer, load_adult, load_adult_race
from sklearn import svm
from sklearn.metrics import accuracy_score
import numpy as np
from measures import equalized_odds_measure_TP, equalized_odds_measure_FP, \
equalized_odds_measure_TP_from_list_of_sensfeat
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
from scipy.optimize import linprog
from hardt import gamma_y_hat, HardtMethod
from scipy.spatial import ConvexHull
from collections import namedtuple
experiment_number = 0
if experiment_number == 0:
dataset_train = load_binary_diabetes_uci()
dataset_test = load_binary_diabetes_uci()
sensible_feature = 1 # sex
elif experiment_number == 1:
dataset_train = load_heart_uci()
dataset_test = load_heart_uci()
sensible_feature = 1 # sex
elif experiment_number == 2:
dataset_train, dataset_test = load_adult(smaller=False)
sensible_feature = 9 # sex
print('Different values of the sensible feature', sensible_feature, ':',
set(dataset_train.data[:, sensible_feature]))
elif experiment_number == 3:
dataset_train, dataset_test = load_adult_race(smaller=False)
sensible_feature = 8 # race
print('Different values of the sensible feature', sensible_feature, ':',
set(dataset_train.data[:, sensible_feature]))
if experiment_number in [0, 1]:
# % for train
ntrain = 5 * len(dataset_train.target) // 10
dataset_train.data = dataset_train.data[:ntrain, :]
dataset_train.target = dataset_train.target[:ntrain]
dataset_test.data = dataset_test.data[ntrain:, :]
dataset_test.target = dataset_test.target[ntrain:]
if experiment_number in [2, 3]:
ntrain = len(dataset_test.target)
# Standard SVM
# Train an SVM using the training set
print('Grid search...')
grid_search_complete = 0
if grid_search_complete:
param_grid = [
{'C': [0.1, 0.5, 1, 10, 100, 1000], 'kernel': ['linear']},
# {'C': [0.1, 0.5, 1, 10, 100, 1000], 'gamma': ['auto', 0.001, 0.0001], 'kernel': ['rbf']},
]
else:
param_grid = [{'C': [10.0], 'kernel': ['linear'], 'gamma': ['auto']}]
svc = svm.SVC()
clf = GridSearchCV(svc, param_grid, n_jobs=1)
clf.fit(dataset_train.data, dataset_train.target)
print('Y:', clf.best_estimator_)
# Accuracy
pred = clf.predict(dataset_test.data)
pred_train = clf.predict(dataset_train.data)
print('Accuracy test:', accuracy_score(dataset_test.target, pred))
print('Accuracy train:', accuracy_score(dataset_train.target, pred_train))
# Fairness measure
print('Eq. opp. test: \n',
equalized_odds_measure_TP(dataset_test, clf, [sensible_feature], ylabel=1))
print('Eq. opp. train: \n',
equalized_odds_measure_TP(dataset_train, clf, [sensible_feature], ylabel=1))
# Zafar method
""" Generate the synthetic data """
X, y, x_control = dataset_train.data, dataset_train.target, {"s1": dataset_train.data[:, sensible_feature]}
sensitive_attrs = x_control.keys()
""" Split the data into train and test """
train_fold_size = 0.5
x_train, y_train, x_control_train, x_test, y_test, x_control_test = dataset_train.data[:, :sensible_feature] + dataset_train.data[:, sensible_feature+1:], dataset_train.target, {"s1": dataset_train.data[:, sensible_feature]}, \
dataset_test.data[:, :sensible_feature] + dataset_test.data[:, sensible_feature + 1:], dataset_test.target, {"s1": dataset_test.data[:, sensible_feature]}
cons_params = None # constraint parameters, will use them later
loss_function = "logreg" # perform the experiments with logistic regression
EPS = 1e-4
def train_test_classifier():
w = fdm.train_model_disp_mist(x_train, y_train, x_control_train, loss_function, EPS, cons_params)
train_score, test_score, cov_all_train, cov_all_test, s_attr_to_fp_fn_train, s_attr_to_fp_fn_test = fdm.get_clf_stats(
w, x_train, y_train, x_control_train, x_test, y_test, x_control_test, sensitive_attrs)
# accuracy and FPR are for the test because we need of for plotting
# the covariance is for train, because we need it for setting the thresholds
return w, test_score, s_attr_to_fp_fn_test, cov_all_train
""" Classify the data while optimizing for accuracy """
print("== Unconstrained (original) classifier ==")
w_uncons, acc_uncons, s_attr_to_fp_fn_test_uncons, cov_all_train_uncons = train_test_classifier()
print("\n-----------------------------------------------------------------------------------\n")
""" Now classify such that we optimize for accuracy while achieving perfect fairness """
print("== Classifier with fairness constraint ==")
it = 0.05
mult_range = np.arange(1.0, 0.0 - it, -it).tolist()
acc_arr = []
fpr_per_group = {0: [], 1: []}
fnr_per_group = {0: [], 1: []}
cons_type = 1 # FPR constraint -- just change the cons_type, the rest of parameters should stay the same
tau = 5.0
mu = 1.2
for m in mult_range:
sensitive_attrs_to_cov_thresh = deepcopy(cov_all_train_uncons)
for s_attr in sensitive_attrs_to_cov_thresh.keys():
for cov_type in sensitive_attrs_to_cov_thresh[s_attr].keys():
for s_val in sensitive_attrs_to_cov_thresh[s_attr][cov_type]:
sensitive_attrs_to_cov_thresh[s_attr][cov_type][s_val] *= m
cons_params = {"cons_type": cons_type,
"tau": tau,
"mu": mu,
"sensitive_attrs_to_cov_thresh": sensitive_attrs_to_cov_thresh}
w_cons, acc_cons, s_attr_to_fp_fn_test_cons, cov_all_train_cons = train_test_classifier()
fpr_per_group[0].append(s_attr_to_fp_fn_test_cons["s1"][0.0]["fpr"])
fpr_per_group[1].append(s_attr_to_fp_fn_test_cons["s1"][1.0]["fpr"])
fnr_per_group[0].append(s_attr_to_fp_fn_test_cons["s1"][0.0]["fnr"])
fnr_per_group[1].append(s_attr_to_fp_fn_test_cons["s1"][1.0]["fnr"])
acc_arr.append(acc_cons)
fs = 15
ax = plt.subplot(2, 1, 1)
plt.plot(mult_range, fpr_per_group[0], "-o", color="green", label="Group-0")
plt.plot(mult_range, fpr_per_group[1], "-o", color="blue", label="Group-1")
ax.set_xlim([max(mult_range), min(mult_range)])
plt.ylabel('False positive rate', fontsize=fs)
ax.legend(fontsize=fs)
ax = plt.subplot(2, 1, 2)
plt.plot(mult_range, acc_arr, "-o", color="green", label="")
ax.set_xlim([max(mult_range), min(mult_range)])
plt.xlabel('Covariance multiplicative factor (m)', fontsize=fs)
plt.ylabel('Accuracy', fontsize=fs)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
plt.savefig("img/fairness_acc_tradeoff_cons_type_%d.png" % cons_type)
plt.show()
| gpl-3.0 |
zorroblue/scikit-learn | sklearn/datasets/tests/test_base.py | 8 | 9532 | import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets import load_wine
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_equal
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
def test_default_load_files():
try:
setup_load_files()
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
finally:
teardown_load_files()
def test_load_files_w_categories_desc_and_encoding():
try:
setup_load_files()
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
finally:
teardown_load_files()
def test_load_files_wo_load_content():
try:
setup_load_files()
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
finally:
teardown_load_files()
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
X_y_tuple = load_digits(return_X_y=True)
bunch = load_digits()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread # noqa
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
assert_equal(len(res.feature_names), 10)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_diabetes(return_X_y=True)
bunch = load_diabetes()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_linnerud(return_X_y=True)
bunch = load_linnerud()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_iris(return_X_y=True)
bunch = load_iris()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_wine():
res = load_wine()
assert_equal(res.data.shape, (178, 13))
assert_equal(res.target.size, 178)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_wine(return_X_y=True)
bunch = load_wine()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_breast_cancer(return_X_y=True)
bunch = load_breast_cancer()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_boston(return_X_y=True)
bunch = load_boston()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
| bsd-3-clause |
466152112/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
wiedenhoeft/HalPal | halpal.py | 1 | 8690 | from __future__ import division
from colormath.color_objects import LabColor, sRGBColor
from colormath.color_conversions import convert_color
from colormath.color_diff import delta_e_cie2000
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.patches as patches
import scipy.stats
def HaltonSequence(base, index=0):
"""Halton sequence generator for a given base (must be prime)."""
while True:
i = index
result = 0
f = 1
while i>0:
f = f/base
result += f *(i % base);
i = np.floor(i / base)
yield result
index += 1
# Get ranges for LAB values from a set of RGB hexes
def getRanges(hexlist):
labcolors = [convert_color(sRGBColor.new_from_rgb_hex(h), LabColor) for h in hexlist]
labtuples = np.array([t.get_value_tuple() for t in labcolors])
maxima = labtuples.max(axis=0)
minima = labtuples.min(axis=0)
L = len(labcolors)
minDist=np.inf
for i in xrange(L-1):
for j in xrange(i+1, L):
minDist = min(minDist, delta_e_cie2000(labcolors[i], labcolors[j]))
return [minima, maxima, minDist]
def PaletteGenerator(
maxNrColors = None,
L_min = 40,
L_max = 95,
a_min = -60,
a_max = 80,
b_min=-60,
b_max=80,
L_base=11,
a_base=2,
b_base=3,
L_index = 0,
a_index = 0,
b_index = 0,
minDistance=0, # color distance must be greater than this
adaptToKeepInput=False, # adapt the parameters so as to keep all input colors in RGBhexes
maxFails=1000,
RGBhexes=[], # a set of colors in RGB hex format. These are the first emitted from the generator, excluding those that violate the minDistance constraint ith respect to the previously generated ones
gradient=False
):
if adaptToKeepInput:
minLab, maxLab, mind = getRanges(RGBhexes)
L_min = min(minLab[0], L_min)
a_min = min(minLab[1], a_min)
a_min = min(minLab[1], b_min)
L_max = max(maxLab[0], L_max)
a_max = max(maxLab[1], a_max)
a_max = max(maxLab[1], b_max)
minDistance = min(minDistance, mind)
print "L\t%f\t%f" % (L_min, L_max)
print "a\t%f\t%f" % (a_min, a_max)
print "b\t%f\t%f" % (b_min, b_max)
print "d\t%f" % minDistance
"""Generate a color palette using Halton sequences in CIE L*a*b* space."""
assert minDistance>0, "Minimum distance must be greater than 0!"
alllab = [] # all previously yielded colors in Lab format
nrFails = 0
# traverse L*a*b* color space using a low-discrepancy sequence (Halton sequence) for each dimension, and reject a color if it is outside the RGB gamut
HL = HaltonSequence(L_base, L_index) # the first argument controls the number of lighness steps
Ha = HaltonSequence(a_base, a_index)
Hb = HaltonSequence(b_base, b_index)
i = 0
while True:
# Yield RGBhexes first, then the ones generated by Halton sequences
processingInput = (i < len(RGBhexes))
if processingInput:
rgbhex = RGBhexes[i]
else:
x = Ha.next()
y = Hb.next()
if not gradient:
z = HL.next()
L= z*(L_max-L_min)+L_min
a = x*(a_max-a_min)+a_min
b = y*(b_max-b_min)+b_min
labcolor = LabColor(L, a, b)
rgbcolor = convert_color(labcolor, sRGBColor)
rgbhex = rgbcolor.get_rgb_hex()
valid=True
# check if RGB is within gamut
if not processingInput: # manually input colors are always within RGB gamut
for v in rgbcolor.get_upscaled_value_tuple():
if v <=0 or v>255: # colormath keeps values out of gamut; it does not use negative values though, so any 00 is potentialy out of gamut as well
valid = False
break
# check if too close to a color in the palette
nearestDist = np.inf
if valid:
# Round the color to RGB integer precision, otherwise the minimum distance might be violated when converting RGB hexes to Lab
# NOTE This is not done earlier since conversions to hex fail if RGB is out of gamut
rgbcolor = sRGBColor.new_from_rgb_hex(rgbhex)
labcolor = convert_color(rgbcolor, LabColor)
# check if the minimum distance is violated
for al in alllab: # TODO fast spatial query structure (kd-tree etc., or binning)
colorDist = delta_e_cie2000(al, labcolor)
nearestDist = min(nearestDist, colorDist)
if colorDist < minDistance:
valid = False
break
if valid:
alllab.append(labcolor)
if gradient:
z = HL.next() # change the lightness only if we found a valid color, which leads to higher contrast in adjacent colors
print "%s\tFound color %d of minimum distance %f after %d iteration%s" % (rgbhex, i, nearestDist, nrFails+1, min(nrFails, 1)*"s")
nrFails = 0
i += 1
yield rgbhex
else:
if processingInput:
print " \tDropping input color %s at distance %f" % (rgbhex, nearestDist)
nrFails += 1
if nrFails >= maxFails:
print " \t[ERROR] Could not find a new color after %d iterations!" % maxFails
yield None
break
if len(alllab) == maxNrColors:
yield None
break
class Palette:
"""This class implements an \"infinite\" color palette, i.e. when querying p[i] new colors will be created until this query can be satisfied, the palette then contains len(p) = i+1 colors. Notice that this means the length potentially changes with each query! This class is useful if you have plotting routine that cannot predict how many colors you are going to need."""
def __init__(
self,
nrColors = None,
L_min = 40,
L_max = 95,
a_min = -60,
a_max = 80,
b_min=-60,
b_max=80,
L_base=11,
a_base=2,
b_base=3,
L_index = 0,
a_index = 0,
b_index = 0,
minDistance = 0,
adaptToKeepInput=False,
maxFails=1000,
RGBhexes = [], # a set of colors in RGB hex format. These are the first emitted from the
gradient=False):
self.generator = PaletteGenerator(None, L_min, L_max, a_min, a_max, b_min, b_max, L_base, a_base, b_base, L_index, a_index, b_index, minDistance, adaptToKeepInput, maxFails, RGBhexes, gradient)
self.allhex = []
self.extendTo(nrColors)
def extendTo(self, nrColors):
"""Extends the palette to <nrColors> colors."""
while len(self.allhex) < nrColors:
color = self.generator.next()
if color is not None:
self.allhex.append(color)
else:
break
def extendBy(self, nrColors):
"""Extends the palette by <nrColors> colors."""
for i in xrange(nrColors):
self.allhex.append(self.generator.next())
def __getitem__(self, i, j=True):
"""self[i] returns the i-th color if it exists, and an IndexError if we are out of bounds. self[[i]] always returns a color for i>=0, as it extends the palette to the necessary number of colors (i+1)."""
if type(i)==list:
self.extendTo(i[0]+1)
return self.allhex[i[0]]
return self.allhex[i]
def __len__(self):
return len(self.allhex)
def plotPalette(self, filename, firstcolor=0, lastcolor=None):
allhex=self.allhex[firstcolor:lastcolor]
if len(allhex)>0:
# plot the resulting palette for reference
rowstretch=2
nrRows = int(np.ceil(np.sqrt(len(allhex))))
nrCols = int(np.ceil(np.sqrt(len(allhex))))
fig = plt.figure(figsize=(nrCols, rowstretch*nrRows))
ax = fig.add_subplot(111, aspect='equal')
ax.set_xticks([])
ax.set_yticks([])
i=0
for r in reversed(xrange(nrRows)):
for c in xrange(nrCols):
ax.add_patch(patches.Rectangle((c, rowstretch*r+0.5), 1, 1, facecolor=allhex[i]))
ax.annotate(str(str(i+firstcolor)), xy=(c+0.5, rowstretch*r), xycoords='data', va="bottom", ha="center", fontsize=24)
ax.annotate(allhex[firstcolor+i], xy=(c+0.5, rowstretch*r+1.5), xycoords='data', va="bottom", ha="center", fontsize=12, family="monospace")
i += 1
if i==len(allhex):
break
if i==len(allhex):
break
plt.xlim([0, nrCols])
plt.ylim([r, rowstretch*nrRows])
plt.tight_layout()
fig.savefig(filename, dpi=90, bbox_inches="tight")
plt.close()
else:
print "Palette empty, nothing to plot!"
def savePalette(self, filename, firstcolor=0, lastcolor=None):
# save the RGB values to a text file
f = file(filename, "w")
f.write("\n".join(self.allhex[firstcolor:lastcolor]))
f.close()
def rgblist(self, firstcolor=0, lastcolor=None):
"""Returns a simple list of colors as RGB hex strings."""
return self.allhex[firstcolor:lastcolor]
if __name__=="__main__":
paired12 = [
#ColorBrewer's Paired12, see http://colorbrewer2.org/
"#a6cee3",
"#1f78b4",
"#b2df8a",
"#33a02c",
"#fb9a99",
"#e31a1c",
"#fdbf6f",
"#ff7f00",
"#cab2d6",
"#6a3d9a",
"#ffff99",
"#b15928"
]
pal = Palette(
56,
L_base=7,
minDistance=20,
adaptToKeepInput=True,
maxFails=500000,
RGBhexes = paired12,
L_min=90,
L_max=95,
a_min=-128,
a_max=128,
b_min=-128,
b_max = 128
)
pal.plotPalette("palette.pdf")
pal.savePalette("palette.txt")
| gpl-3.0 |
MechCoder/scikit-learn | sklearn/datasets/mldata.py | 17 | 7875 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home
from ..utils import Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname :
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name : optional, default: 'label'
Name or index of the column containing the target values.
data_name : optional, default: 'data'
Name or index of the column containing the data.
transpose_data : optional, default: True
If True, transpose the downloaded data array.
data_home : optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the scikit-learn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to scikit-learn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by test runners to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
frombeijingwithlove/dlcv_for_beginners | chap9/visualize_conv1_kernels.py | 1 | 1087 | import sys
import numpy as np
import matplotlib.pyplot as plt
import cv2
sys.path.append('/path/to/caffe/python')
import caffe
ZOOM_IN_SIZE = 50
PAD_SIZE = 4
WEIGHTS_FILE = 'freq_regression_iter_10000.caffemodel'
DEPLOY_FILE = 'deploy.prototxt'
net = caffe.Net(DEPLOY_FILE, WEIGHTS_FILE, caffe.TEST)
kernels = net.params['conv1'][0].data
kernels -= kernels.min()
kernels /= kernels.max()
zoomed_in_kernels = []
for kernel in kernels:
zoomed_in_kernels.append(cv2.resize(kernel[0], (ZOOM_IN_SIZE, ZOOM_IN_SIZE), interpolation=cv2.INTER_NEAREST))
# plot 12*8 squares kernels
half_pad = PAD_SIZE / 2
padded_size = ZOOM_IN_SIZE+PAD_SIZE
padding = ((0, 0), (half_pad, half_pad), (half_pad, half_pad))
padded_kernels = np.pad(zoomed_in_kernels, padding, 'constant', constant_values=1)
padded_kernels = padded_kernels.reshape(8, 12, padded_size, padded_size).transpose(0, 2, 1, 3)
kernels_img = padded_kernels.reshape((8*padded_size, 12*padded_size))[half_pad:-half_pad, half_pad: -half_pad]
plt.imshow(kernels_img, cmap='gray', interpolation='nearest')
plt.axis('off')
plt.show()
| bsd-3-clause |
kentfrazier/Exhibitionist | Examples/pandas/app.py | 1 | 2187 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import pandas as pd
from pandas.util.testing import makeCustomDataframe as mkdf
from exhibitionist.toolbox import UrlDisplay, get_server
import handlers
# make sure the handlers module is on your python path
# you could just use os.chdir to set the working directory
# to the example root directory.
pjoin = os.path.join
dirname = lambda x: os.path.abspath(os.path.dirname(x))
STATIC_DIR = pjoin(dirname(__file__), "static")
TEMPLATE_DIR = pjoin(dirname(__file__), "templates")
server = get_server(template_path=TEMPLATE_DIR,static_path=STATIC_DIR). \
add_handler(handlers).start()
def my_repr(df):
return UrlDisplay(server.get_view_url("dfView", df), "350px")._repr_html_()
# monkey patch pandas to override it's default HTML repr. This could also
# be done # upstream as part of pandas itself.
# a cleaner way would be to use IPNB type-based display hooking
# google "ipython formatters for_type"
# or see
# http://ipython.org/ipython-doc/stable/api/generated/IPython.core.formatters.html
# print fancy link/embedded HTML in qtconsole/ipnb
pd.DataFrame._repr_html_ = my_repr
# now, displaying dataframes in IPython-notebook will open up
# an IFRAME with the grid view
df=mkdf(5000,10)
# now we can display the dataframe from our python prompt
# and view the url or rendered HTML
# >>> df
# try to modify the datdrame inplace, and refresh the grid
# with the bottom left-hand button
# df.ix[0,0]="pooh"
# when you're done, shutdown the server to release the socket
# server.stop()
if __name__ == "__main__":
print("""
You should be running this in an interactive session. If
you are, to actually see a view you need to display an object,
so it's repr() will be displayed. So try:
>>> df
to get the url/HTML view, depending on whether you're in the
terminal, or in IPython-Notebook.
Please look at the source for 'app.py' for more things to try out
in the interactive prompt.
In particular, try modifying the df object:
>>> df.ix[0,0] = "Pooh"
and refreshing the grid (refresh button at bottom left corner).
""");
| bsd-3-clause |
adamrvfisher/TechnicalAnalysisLibrary | PriceRelativeMAStrategy.py | 1 | 5151 | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 4 01:02:22 2017
@author: AmatVictoriaCuramIII
"""
import numpy as np
import random as rand
import pandas as pd
import time as t
from DatabaseGrabber import DatabaseGrabber
from YahooGrabber import YahooGrabber
Empty = []
Dataset = pd.DataFrame()
Portfolio = pd.DataFrame()
Start = t.time()
Counter = 0
#Price Relative Moving Average Strategy
#Input
Ticker1 = 'UVXY'
Ticker2 = '^VIX'
#Here we go
Asset1 = YahooGrabber(Ticker1)
Asset2 = YahooGrabber(Ticker2)
#Match lengths
#Trimmer
trim = abs(len(Asset1) - len(Asset2))
if len(Asset1) == len(Asset2):
pass
else:
if len(Asset1) > len(Asset2):
Asset1 = Asset1[trim:]
else:
Asset2 = Asset2[trim:]
#Log Returns
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
Asset2['LogRet'] = np.log(Asset2['Adj Close']/Asset2['Adj Close'].shift(1))
Asset2['LogRet'] = Asset2['LogRet'].fillna(0)
#Brute Force Optimization
iterations = range(0, 3000)
for i in iterations:
Counter = Counter + 1
a = rand.random()
b = 1 - a
c = rand.random()
d = rand.random()
if c + d > 1:
continue
e = rand.randint(3,20)
window = int(e)
Asset1['Position'] = a
Asset1['Position'] = np.where(Asset3['Adj Close'].shift(1) > Asset3['MA'].shift(1),
c,a)
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = b
Asset2['Position'] = np.where(Asset3['Adj Close'].shift(1) > Asset3['MA'].shift(1),
d,b)
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = (Asset1['Pass']) * (-1) #Pass a short position
Portfolio['Asset2Pass'] = (Asset2['Pass']) #* (-1)
Portfolio['LongShort'] = Portfolio['Asset1Pass'] + Portfolio['Asset2Pass']
if Portfolio['LongShort'].std() == 0:
continue
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
if MaxDD > float(.1):
continue
dailyreturn = Portfolio['LongShort'].mean()
if dailyreturn < .003:
continue
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
print(Counter)
Empty.append(a)
Empty.append(b)
Empty.append(c)
Empty.append(d)
Empty.append(e)
Empty.append(sharpe)
Empty.append(sharpe/MaxDD)
Empty.append(dailyreturn/MaxDD)
Empty.append(MaxDD)
Emptyseries = pd.Series(Empty)
Dataset[0] = Emptyseries.values
Dataset[i] = Emptyseries.values
Empty[:] = []
z1 = Dataset.iloc[6]
w1 = np.percentile(z1, 80)
v1 = [] #this variable stores the Nth percentile of top performers
DS1W = pd.DataFrame() #this variable stores your financial advisors for specific dataset
for h in z1:
if h > w1:
v1.append(h)
for j in v1:
r = Dataset.columns[(Dataset == j).iloc[6]]
DS1W = pd.concat([DS1W,Dataset[r]], axis = 1)
y = max(z1)
k = Dataset.columns[(Dataset == y).iloc[6]] #this is the column number
kfloat = float(k[0])
End = t.time()
print(End-Start, 'seconds later')
print(Dataset[k])
window = int((Dataset[kfloat][4]))
Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset1['Position'] = (Dataset[kfloat][0])
Asset1['Position'] = np.where(Asset3['Adj Close'].shift(1) > Asset3['MA'].shift(1),
Dataset[kfloat][2],Dataset[kfloat][0])
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = (Dataset[kfloat][1])
Asset2['Position'] = np.where(Asset3['Adj Close'].shift(1) > Asset3['MA'].shift(1),
Dataset[kfloat][3],Dataset[kfloat][1])
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = Asset1['Pass'] * (-1)
Portfolio['Asset2Pass'] = Asset2['Pass'] #* (-1)
#Portfolio['PriceRelative'] = Asset1['Adj Close'] / Asset2['Adj Close']
#asone['PriceRelative'][-180:].plot(grid = True, figsize = (8,5))
Portfolio['LongShort'] = Portfolio['Asset1Pass'] + Portfolio['Asset2Pass']
Portfolio['LongShort'][:].cumsum().apply(np.exp).plot(grid=True,
figsize=(8,5))
dailyreturn = Portfolio['LongShort'].mean()
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown2 = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
#conversionfactor = Portfolio['PriceRelative'][-1]
print(max(drawdown2))
#pd.to_pickle(Portfolio, 'VXX:UVXY') | apache-2.0 |
DonRegan/nbodykit | contrib/QPMMock.py | 1 | 4307 | from nbodykit.plugins import DataSource
from nbodykit.utils.pluginargparse import BoxSizeParser
import numpy
import logging
logger = logging.getLogger('QPMMock')
class QPMMockDataSource(DataSource):
"""
Class to read data from the DR12 BOSS QPM periodic box
mocks, which are stored as a plain text ASCII file, and
paint the field onto a density grid. The data is read
from file using `pandas.read_csv` and is stored internally in
a `pandas.DataFrame`
Notes
-----
* `pandas` must be installed to use
* columns are `x`, `y`, `z`, `vx`, `vy`, `vz`
Parameters
----------
path : str
the path of the file to read the data from
scaled : bool, optional
rescale the parallel and perp coordinates by the AP factor
rsd : [x|y|z], optional
direction to do the redshift space distortion
velf : float, optional
multiply the velocity data by this factor
"""
field_type = 'QPMMock'
qpar = 0.9851209643
qperp = 0.9925056798
def __init__(self, d):
super(QPMMockDataSource, self).__init__(d)
self._BoxSize0 = self.BoxSize.copy()
# rescale the box size, if scaled = True
if self.scaled:
if self.rsd is None:
self.BoxSize *= self.qperp
else:
dir = 'xyz'.index(self.rsd)
for i in [0,1,2]:
if i == dir:
self.BoxSize[i] *= self.qpar
else:
self.BoxSize[i] *= self.qperp
@classmethod
def register(kls):
h = kls.add_parser()
h.add_argument("path", help="path to file")
h.add_argument("BoxSize", type=BoxSizeParser,
help="the size of the isotropic box, or the sizes of the 3 box dimensions")
h.add_argument("-scaled", action='store_true',
help='rescale the parallel and perp coordinates by the AP factor')
h.add_argument("-rsd", choices="xyz",
help="direction to do redshift distortion")
h.add_argument("-velf", default=1., type=float,
help="factor to scale the velocities")
def read(self, columns, comm, bunchsize):
if comm.rank == 0:
try:
import pandas as pd
except:
raise ImportError("pandas must be installed to use QPMMockDataSource")
# read in the plain text file using pandas
kwargs = {}
kwargs['comment'] = '#'
kwargs['names'] = ['x', 'y', 'z', 'vx', 'vy', 'vz']
kwargs['header'] = None
kwargs['engine'] = 'c'
kwargs['delim_whitespace'] = True
kwargs['usecols'] = ['x', 'y', 'z', 'vx', 'vy', 'vz']
data = pd.read_csv(self.path, **kwargs)
nobj = len(data)
logger.info("total number of objects read is %d" %nobj)
# get position
pos = data[['x', 'y', 'z']].values.astype('f4')
vel = data[['vx', 'vy', 'vz']].values.astype('f4')
vel *= self.velf
else:
pos = numpy.empty(0, dtype=('f4', 3))
vel = numpy.empty(0, dtype=('f4', 3))
# go to redshift-space and wrap periodically
if self.rsd is not None:
dir = 'xyz'.index(self.rsd)
pos[:, dir] += vel[:, dir]
pos[:, dir] %= self._BoxSize0[dir] # enforce periodic boundary conditions
# rescale by AP factor
if self.scaled:
if comm.rank == 0:
logger.info("multiplying by qperp = %.5f" %self.qperp)
# rescale positions and volume
if self.rsd is None:
pos *= self.qperp
else:
if comm.rank == 0:
logger.info("multiplying by qpar = %.5f" %self.qpar)
for i in [0,1,2]:
if i == dir:
pos[:,i] *= self.qpar
else:
pos[:,i] *= self.qperp
P = {}
P['Position'] = pos
P['Velocity'] = vel
P['Mass'] = None
yield [P[key] for key in columns]
| gpl-3.0 |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/io/formats/common.py | 16 | 1094 | # -*- coding: utf-8 -*-
"""
Common helper methods used in different submodules of pandas.io.formats
"""
def get_level_lengths(levels, sentinel=''):
"""For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
----------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
"""
if len(levels) == 0:
return []
control = [True for x in levels[0]]
result = []
for level in levels:
last_index = 0
lengths = {}
for i, key in enumerate(level):
if control[i] and key == sentinel:
pass
else:
control[i] = False
lengths[last_index] = i - last_index
last_index = i
lengths[last_index] = len(level) - last_index
result.append(lengths)
return result
| mit |
DGrady/pandas | pandas/tests/test_compat.py | 12 | 2367 | # -*- coding: utf-8 -*-
"""
Testing that functions from compat work as expected
"""
from pandas.compat import (range, zip, map, filter, lrange, lzip, lmap,
lfilter, builtins, iterkeys, itervalues, iteritems,
next)
class TestBuiltinIterators(object):
@classmethod
def check_result(cls, actual, expected, lengths):
for (iter_res, list_res), exp, length in zip(actual, expected,
lengths):
assert not isinstance(iter_res, list)
assert isinstance(list_res, list)
iter_res = list(iter_res)
assert len(list_res) == length
assert len(iter_res) == length
assert iter_res == exp
assert list_res == exp
def test_range(self):
actual1 = range(10)
actual2 = lrange(10)
actual = [actual1, actual2],
expected = list(builtins.range(10)),
lengths = 10,
actual1 = range(1, 10, 2)
actual2 = lrange(1, 10, 2)
actual += [actual1, actual2],
lengths += 5,
expected += list(builtins.range(1, 10, 2)),
self.check_result(actual, expected, lengths)
def test_map(self):
func = lambda x, y, z: x + y + z
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual1 = map(func, *lst)
actual2 = lmap(func, *lst)
actual = [actual1, actual2],
expected = list(builtins.map(func, *lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_filter(self):
func = lambda x: x
lst = list(builtins.range(10))
actual1 = filter(func, lst)
actual2 = lfilter(func, lst)
actual = [actual1, actual2],
lengths = 9,
expected = list(builtins.filter(func, lst)),
self.check_result(actual, expected, lengths)
def test_zip(self):
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual = [zip(*lst), lzip(*lst)],
expected = list(builtins.zip(*lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_dict_iterators(self):
assert next(itervalues({1: 2})) == 2
assert next(iterkeys({1: 2})) == 1
assert next(iteritems({1: 2})) == (1, 2)
| bsd-3-clause |
davidsamu/seal | seal/io/export.py | 1 | 3040 | # -*- coding: utf-8 -*-
"""
Functions related to exporting data.
@author: David Samu
"""
import numpy as np
import pandas as pd
from seal.util import util, constants
def export_unit_list(UA, fname):
"""Export unit list and parameters into Excel table."""
unit_params = UA.unit_params()
writer = pd.ExcelWriter(fname)
util.write_table(unit_params, writer)
def export_unit_trial_selection(UA, fname):
"""Export unit and trial selection as Excel table."""
# Gather selection dataframe.
dselect = {}
for i, u in enumerate(UA.iter_thru(excl=True)):
iselect = u.get_utid()
iselect['unit included'] = int(not u.is_excluded())
inc_trs = u.inc_trials()
ftr, ltr = 0, 0
if len(inc_trs):
ftr, ltr = inc_trs.min()+1, inc_trs.max()+1
iselect['first included trial'] = ftr
iselect['last included trial'] = ltr
dselect[i] = iselect
# Sort table to help reading by recording.
SelectDF = pd.concat(dselect, axis=1).T
SelectDF.sort_values(constants.utid_names, inplace=True)
SelectDF.index = range(1, len(SelectDF.index)+1)
# Write out selection dataframe.
writer = pd.ExcelWriter(fname)
util.write_table(SelectDF, writer)
def export_decoding_data(UA, fname, rec, task, trs, uids, prd, nrate):
"""Export decoding data into .mat file."""
# Below inits rely on these params being the same across units, which is
# only true when exporting a single task of a single recording!
if uids is None:
uids = UA.uids([task])[rec]
u = UA.get_unit(uids[0], task)
t1s, t2s = u.pr_times(prd, trs, add_latency=False, concat=False)
prd_str = constants.tr_prds.loc[prd, 'start']
ref_ev = constants.tr_evts.loc[prd_str, 'rel to']
ref_ts = u.ev_times(ref_ev)
if nrate is None:
nrate = u.init_nrate()
# Trial params.
trpars = np.array([util.remove_dim_from_series(u.TrData[par][trs])
for par in u.TrData]).T
trpar_names = ['_'.join(col) if util.is_iterable(col) else col
for col in u.TrData.columns]
# Trial events.
tr_evts = u.Events
trevn_names = tr_evts.columns.tolist()
tr_evts = np.array([util.remove_dim_from_series(tr_evts.loc[trs, evn])
for evn in tr_evts]).T
# Rates.
rates = np.array([np.array(u._Rates[nrate].get_rates(trs, t1s, t2s))
for u in UA.iter_thru([task], uids)])
# Sampling times.
times = np.array(u._Rates[nrate].get_rates(trs, t1s, t2s, ref_ts).columns)
# Create dictionary to export.
export_dict = {'recording': rec, 'task': task,
'period': prd, 'nrate': nrate,
'trial_parameter_names': trpar_names,
'trial_parameters': trpars,
'trial_event_names': trevn_names,
'trial_events': tr_evts,
'times': times, 'rates': rates}
# Export data.
util.write_matlab_object(fname, export_dict)
| gpl-3.0 |
mattgiguere/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
rexshihaoren/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
lazywei/scikit-learn | benchmarks/bench_isotonic.py | 268 | 3046 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
noam09/deluge-telegramer | telegramer/include/future/utils/__init__.py | 8 | 20325 | """
A selection of cross-compatible functions for Python 2 and 3.
This module exports useful functions for 2/3 compatible code:
* bind_method: binds functions to classes
* ``native_str_to_bytes`` and ``bytes_to_native_str``
* ``native_str``: always equal to the native platform string object (because
this may be shadowed by imports from future.builtins)
* lists: lrange(), lmap(), lzip(), lfilter()
* iterable method compatibility:
- iteritems, iterkeys, itervalues
- viewitems, viewkeys, viewvalues
These use the original method if available, otherwise they use items,
keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
* binary_type: str in Python 2, bytes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bchr(c):
Take an integer and make a 1-character byte string
* bord(c)
Take the result of indexing on a byte string and make an integer
* tobytes(s)
Take a text string, a byte string, or a sequence of characters taken
from a byte string, and make a byte string.
* raise_from()
* raise_with_traceback()
This module also defines these decorators:
* ``python_2_unicode_compatible``
* ``with_metaclass``
* ``implements_iterator``
Some of the functions in this module come from the following sources:
* Jinja2 (BSD licensed: see
https://github.com/mitsuhiko/jinja2/blob/master/LICENSE)
* Pandas compatibility module pandas.compat
* six.py by Benjamin Peterson
* Django
"""
import types
import sys
import numbers
import functools
import copy
import inspect
PY3 = sys.version_info[0] == 3
PY35_PLUS = sys.version_info[0:2] >= (3, 5)
PY36_PLUS = sys.version_info[0:2] >= (3, 6)
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0:2] == (2, 6)
PY27 = sys.version_info[0:2] == (2, 7)
PYPY = hasattr(sys, 'pypy_translation_info')
def python_2_unicode_compatible(cls):
"""
A decorator that defines __unicode__ and __str__ methods under Python
2. Under Python 3, this decorator is a no-op.
To support Python 2 and 3 with a single code base, define a __str__
method returning unicode text and apply this decorator to the class, like
this::
>>> from future.utils import python_2_unicode_compatible
>>> @python_2_unicode_compatible
... class MyClass(object):
... def __str__(self):
... return u'Unicode string: \u5b54\u5b50'
>>> a = MyClass()
Then, after this import:
>>> from future.builtins import str
the following is ``True`` on both Python 3 and 2::
>>> str(a) == a.encode('utf-8').decode('utf-8')
True
and, on a Unicode-enabled terminal with the right fonts, these both print the
Chinese characters for Confucius::
>>> print(a)
>>> print(str(a))
The implementation comes from django.utils.encoding.
"""
if not PY3:
cls.__unicode__ = cls.__str__
cls.__str__ = lambda self: self.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
"""
Function from jinja2/_compat.py. License: BSD.
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
# Definitions from pandas.compat and six.py follow:
if PY3:
def bchr(s):
return bytes([s])
def bstr(s):
if isinstance(s, str):
return bytes(s, 'latin-1')
else:
return bytes(s)
def bord(s):
return s
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
else:
# Python 2
def bchr(s):
return chr(s)
def bstr(s):
return str(s)
def bord(s):
return ord(s)
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
###
if PY3:
def tobytes(s):
if isinstance(s, bytes):
return s
else:
if isinstance(s, str):
return s.encode('latin-1')
else:
return bytes(s)
else:
# Python 2
def tobytes(s):
if isinstance(s, unicode):
return s.encode('latin-1')
else:
return ''.join(s)
tobytes.__doc__ = """
Encodes to latin-1 (where the first 256 chars are the same as
ASCII.)
"""
if PY3:
def native_str_to_bytes(s, encoding='utf-8'):
return s.encode(encoding)
def bytes_to_native_str(b, encoding='utf-8'):
return b.decode(encoding)
def text_to_native_str(t, encoding=None):
return t
else:
# Python 2
def native_str_to_bytes(s, encoding=None):
from future.types import newbytes # to avoid a circular import
return newbytes(s)
def bytes_to_native_str(b, encoding=None):
return native(b)
def text_to_native_str(t, encoding='ascii'):
"""
Use this to create a Py2 native string when "from __future__ import
unicode_literals" is in effect.
"""
return unicode(t).encode(encoding)
native_str_to_bytes.__doc__ = """
On Py3, returns an encoded string.
On Py2, returns a newbytes type, ignoring the ``encoding`` argument.
"""
if PY3:
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
else:
import __builtin__
# Python 2-builtin ranges produce lists
lrange = __builtin__.range
lzip = __builtin__.zip
lmap = __builtin__.map
lfilter = __builtin__.filter
def isidentifier(s, dotted=False):
'''
A function equivalent to the str.isidentifier method on Py3
'''
if dotted:
return all(isidentifier(a) for a in s.split('.'))
if PY3:
return s.isidentifier()
else:
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
return bool(_name_re.match(s))
def viewitems(obj, **kwargs):
"""
Function for iterating over dictionary items with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewitems", None)
if not func:
func = obj.items
return func(**kwargs)
def viewkeys(obj, **kwargs):
"""
Function for iterating over dictionary keys with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def viewvalues(obj, **kwargs):
"""
Function for iterating over dictionary values with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewvalues", None)
if not func:
func = obj.values
return func(**kwargs)
def iteritems(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewitems().
"""
func = getattr(obj, "iteritems", None)
if not func:
func = obj.items
return func(**kwargs)
def iterkeys(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewkeys().
"""
func = getattr(obj, "iterkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def itervalues(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewvalues().
"""
func = getattr(obj, "itervalues", None)
if not func:
func = obj.values
return func(**kwargs)
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has an issue with bound/unbound methods
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
def getexception():
return sys.exc_info()[1]
def _get_caller_globals_and_locals():
"""
Returns the globals and locals of the calling frame.
Is there an alternative to frame hacking here?
"""
caller_frame = inspect.stack()[2]
myglobals = caller_frame[0].f_globals
mylocals = caller_frame[0].f_locals
return myglobals, mylocals
def _repr_strip(mystring):
"""
Returns the string without any initial or final quotes.
"""
r = repr(mystring)
if r.startswith("'") and r.endswith("'"):
return r[1:-1]
else:
return r
if PY3:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
myglobals, mylocals = _get_caller_globals_and_locals()
# We pass the exception and cause along with other globals
# when we exec():
myglobals = myglobals.copy()
myglobals['__python_future_raise_from_exc'] = exc
myglobals['__python_future_raise_from_cause'] = cause
execstr = "raise __python_future_raise_from_exc from __python_future_raise_from_cause"
exec(execstr, myglobals, mylocals)
def raise_(tp, value=None, tb=None):
"""
A function that matches the Python 2.x ``raise`` statement. This
allows re-raising exceptions with the cls value and traceback on
Python 2 and 3.
"""
if value is not None and isinstance(tp, Exception):
raise TypeError("instance exception may not have a separate value")
if value is not None:
exc = tp(value)
else:
exc = tp
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
else:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
# Is either arg an exception class (e.g. IndexError) rather than
# instance (e.g. IndexError('my message here')? If so, pass the
# name of the class undisturbed through to "raise ... from ...".
if isinstance(exc, type) and issubclass(exc, Exception):
e = exc()
# exc = exc.__name__
# execstr = "e = " + _repr_strip(exc) + "()"
# myglobals, mylocals = _get_caller_globals_and_locals()
# exec(execstr, myglobals, mylocals)
else:
e = exc
e.__suppress_context__ = False
if isinstance(cause, type) and issubclass(cause, Exception):
e.__cause__ = cause()
e.__suppress_context__ = True
elif cause is None:
e.__cause__ = None
e.__suppress_context__ = True
elif isinstance(cause, BaseException):
e.__cause__ = cause
e.__suppress_context__ = True
else:
raise TypeError("exception causes must derive from BaseException")
e.__context__ = sys.exc_info()[1]
raise e
exec('''
def raise_(tp, value=None, tb=None):
raise tp, value, tb
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc, None, traceback
'''.strip())
raise_with_traceback.__doc__ = (
"""Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback."""
)
# Deprecated alias for backward compatibility with ``future`` versions < 0.11:
reraise = raise_
def implements_iterator(cls):
'''
From jinja2/_compat.py. License: BSD.
Use as a decorator like this::
@implements_iterator
class UppercasingIterator(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def __iter__(self):
return self
def __next__(self):
return next(self._iter).upper()
'''
if PY3:
return cls
else:
cls.next = cls.__next__
del cls.__next__
return cls
if PY3:
get_next = lambda x: x.next
else:
get_next = lambda x: x.__next__
def encode_filename(filename):
if PY3:
return filename
else:
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
def is_new_style(cls):
"""
Python 2.7 has both new-style and old-style classes. Old-style classes can
be pesky in some circumstances, such as when using inheritance. Use this
function to test for whether a class is new-style. (Python 3 only has
new-style classes.)
"""
return hasattr(cls, '__class__') and ('__dict__' in dir(cls)
or hasattr(cls, '__slots__'))
# The native platform string and bytes types. Useful because ``str`` and
# ``bytes`` are redefined on Py2 by ``from future.builtins import *``.
native_str = str
native_bytes = bytes
def istext(obj):
"""
Deprecated. Use::
>>> isinstance(obj, str)
after this import:
>>> from future.builtins import str
"""
return isinstance(obj, type(u''))
def isbytes(obj):
"""
Deprecated. Use::
>>> isinstance(obj, bytes)
after this import:
>>> from future.builtins import bytes
"""
return isinstance(obj, type(b''))
def isnewbytes(obj):
"""
Equivalent to the result of ``isinstance(obj, newbytes)`` were
``__instancecheck__`` not overridden on the newbytes subclass. In
other words, it is REALLY a newbytes instance, not a Py2 native str
object?
"""
# TODO: generalize this so that it works with subclasses of newbytes
# Import is here to avoid circular imports:
from future.types.newbytes import newbytes
return type(obj) == newbytes
def isint(obj):
"""
Deprecated. Tests whether an object is a Py3 ``int`` or either a Py2 ``int`` or
``long``.
Instead of using this function, you can use:
>>> from future.builtins import int
>>> isinstance(obj, int)
The following idiom is equivalent:
>>> from numbers import Integral
>>> isinstance(obj, Integral)
"""
return isinstance(obj, numbers.Integral)
def native(obj):
"""
On Py3, this is a no-op: native(obj) -> obj
On Py2, returns the corresponding native Py2 types that are
superclasses for backported objects from Py3:
>>> from builtins import str, bytes, int
>>> native(str(u'ABC'))
u'ABC'
>>> type(native(str(u'ABC')))
unicode
>>> native(bytes(b'ABC'))
b'ABC'
>>> type(native(bytes(b'ABC')))
bytes
>>> native(int(10**20))
100000000000000000000L
>>> type(native(int(10**20)))
long
Existing native types on Py2 will be returned unchanged:
>>> type(native(u'ABC'))
unicode
"""
if hasattr(obj, '__native__'):
return obj.__native__()
else:
return obj
# Implementation of exec_ is from ``six``:
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
# Defined here for backward compatibility:
def old_div(a, b):
"""
DEPRECATED: import ``old_div`` from ``past.utils`` instead.
Equivalent to ``a / b`` on Python 2 without ``from __future__ import
division``.
TODO: generalize this to other objects (like arrays etc.)
"""
if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
return a // b
else:
return a / b
def as_native_str(encoding='utf-8'):
'''
A decorator to turn a function or method call that returns text, i.e.
unicode, into one that returns a native platform str.
Use it as a decorator like this::
from __future__ import unicode_literals
class MyClass(object):
@as_native_str(encoding='ascii')
def __repr__(self):
return next(self._iter).upper()
'''
if PY3:
return lambda f: f
else:
def encoder(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs).encode(encoding=encoding)
return wrapper
return encoder
# listvalues and listitems definitions from Nick Coghlan's (withdrawn)
# PEP 496:
try:
dict.iteritems
except AttributeError:
# Python 3
def listvalues(d):
return list(d.values())
def listitems(d):
return list(d.items())
else:
# Python 2
def listvalues(d):
return d.values()
def listitems(d):
return d.items()
if PY3:
def ensure_new_type(obj):
return obj
else:
def ensure_new_type(obj):
from future.types.newbytes import newbytes
from future.types.newstr import newstr
from future.types.newint import newint
from future.types.newdict import newdict
native_type = type(native(obj))
# Upcast only if the type is already a native (non-future) type
if issubclass(native_type, type(obj)):
# Upcast
if native_type == str: # i.e. Py2 8-bit str
return newbytes(obj)
elif native_type == unicode:
return newstr(obj)
elif native_type == int:
return newint(obj)
elif native_type == long:
return newint(obj)
elif native_type == dict:
return newdict(obj)
else:
return obj
else:
# Already a new type
assert type(obj) in [newbytes, newstr]
return obj
__all__ = ['PY2', 'PY26', 'PY3', 'PYPY',
'as_native_str', 'bind_method', 'bord', 'bstr',
'bytes_to_native_str', 'encode_filename', 'ensure_new_type',
'exec_', 'get_next', 'getexception', 'implements_iterator',
'is_new_style', 'isbytes', 'isidentifier', 'isint',
'isnewbytes', 'istext', 'iteritems', 'iterkeys', 'itervalues',
'lfilter', 'listitems', 'listvalues', 'lmap', 'lrange',
'lzip', 'native', 'native_bytes', 'native_str',
'native_str_to_bytes', 'old_div',
'python_2_unicode_compatible', 'raise_',
'raise_with_traceback', 'reraise', 'text_to_native_str',
'tobytes', 'viewitems', 'viewkeys', 'viewvalues',
'with_metaclass'
]
| gpl-3.0 |
ckuethe/gnuradio | gr-fec/python/fec/polar/channel_construction_bec.py | 22 | 8068 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy as np
import helper_functions as hf
def bec_channel(eta):
'''
binary erasure channel (BEC)
for each y e Y
W(y|0) * W(y|1) = 0 or W(y|0) = W(y|1)
transistions are 1 -> 1 or 0 -> 0 or {0, 1} -> ? (erased symbol)
'''
# looks like BSC but should be interpreted differently.
w = np.array((1 - eta, eta, 1 - eta), dtype=float)
return w
def odd_rec(iwn):
return iwn ** 2
def even_rec(iwn):
return 2 * iwn - iwn ** 2
def calc_one_recursion(iw0):
iw1 = np.zeros(2 * len(iw0)) # double values
for i in range(len(iw0)):
# careful indices screw you because paper is '1' based :(
iw1[2 * i] = odd_rec(iw0[i])
iw1[2 * i + 1] = even_rec(iw0[i])
return iw1
def calculate_bec_channel_capacities_loop(initial_channel, block_power):
# compare [0, Arikan] eq. 6
iw = np.array([initial_channel, ], dtype=float)
for i in range(block_power):
iw = calc_one_recursion(iw)
return iw
def calc_vector_capacities_one_recursion(iw0):
degraded = odd_rec(iw0)
upgraded = even_rec(iw0)
iw1 = np.empty(2 * len(iw0), dtype=degraded.dtype)
iw1[0::2] = degraded
iw1[1::2] = upgraded
return iw1
def calculate_bec_channel_capacities_vector(initial_channel, block_power):
# compare [0, Arikan] eq. 6
# this version is ~ 180 times faster than the loop version with 2**22 synthetic channels
iw = np.array([initial_channel, ], dtype=float)
for i in range(block_power):
iw = calc_vector_capacities_one_recursion(iw)
return iw
def calculate_bec_channel_capacities(eta, block_size):
# compare [0, Arikan] eq. 6
iw = 1 - eta # holds for BEC as stated in paper
lw = hf.power_of_2_int(block_size)
return calculate_bec_channel_capacities_vector(iw, lw)
def calculate_z_parameters_one_recursion(z_params):
z_next = np.empty(2 * z_params.size, dtype=z_params.dtype)
z_sq = z_params ** 2
z_low = 2 * z_params - z_sq
z_next[0::2] = z_low
z_next[1::2] = z_sq
return z_next
def calculate_bec_channel_z_parameters(eta, block_size):
# compare [0, Arikan] eq. 38
block_power = hf.power_of_2_int(block_size)
z_params = np.array([eta, ], dtype=float)
for block_size in range(block_power):
z_params = calculate_z_parameters_one_recursion(z_params)
return z_params
def design_snr_to_bec_eta(design_snr):
# minimum design snr = -1.5917 corresponds to BER = 0.5
s = 10. ** (design_snr / 10.)
return np.exp(-s)
def bhattacharyya_bounds(design_snr, block_size):
'''
Harish Vangala, Emanuele Viterbo, Yi Hong: 'A Comparative Study of Polar Code Constructions for the AWGN Channel', 2015
In this paper it is called Bhattacharyya bounds channel construction and is abbreviated PCC-0
Best design SNR for block_size = 2048, R = 0.5, is 0dB.
Compare with Arikan: 'Channel Polarization: A Method for Constructing Capacity-Achieving Codes for Symmetric Binary-Input Memoryless Channels.
Proposition 5. inequalities turn into equalities for BEC channel. Otherwise they represent an upper bound.
Also compare [0, Arikan] eq. 6 and 38
For BEC that translates to capacity(i) = 1 - bhattacharyya(i)
:return Z-parameters in natural bit-order. Choose according to desired rate.
'''
eta = design_snr_to_bec_eta(design_snr)
return calculate_bec_channel_z_parameters(eta, block_size)
def plot_channel_capacities(capacity, save_file=None):
block_size = len(capacity)
try:
import matplotlib.pyplot as plt
# FUN with matplotlib LaTeX fonts! http://matplotlib.org/users/usetex.html
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('figure', autolayout=True)
plt.plot(capacity)
plt.xlim([0, block_size])
plt.ylim([-0.01, 1.01])
plt.xlabel('synthetic channel number')
plt.ylabel('channel capacity')
# plt.title('BEC channel construction')
plt.grid()
plt.gcf().set_size_inches(plt.gcf().get_size_inches() * .5)
if save_file:
plt.savefig(save_file)
plt.show()
except ImportError:
pass # only plot in case matplotlib is installed
def plot_average_channel_distance(save_file=None):
eta = 0.5 # design_snr_to_bec_eta(-1.5917)
powers = np.arange(4, 26)
try:
import matplotlib.pyplot as plt
import matplotlib
# FUN with matplotlib LaTeX fonts! http://matplotlib.org/users/usetex.html
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('figure', autolayout=True)
dist = []
medians = []
initial_channel = 1 - eta
for p in powers:
bs = int(2 ** p)
capacities = calculate_bec_channel_capacities(eta, bs)
avg_capacity = np.repeat(initial_channel, len(capacities))
averages = np.abs(capacities - avg_capacity)
avg_distance = np.sum(averages) / float(len(capacities))
dist.append(avg_distance)
variance = np.std(averages)
medians.append(variance)
plt.errorbar(powers, dist, yerr=medians)
plt.grid()
plt.xlabel(r'block size $N$')
plt.ylabel(r'$\frac{1}{N} \sum_i |I(W_N^{(i)}) - 0.5|$')
axes = plt.axes()
tick_values = np.array(axes.get_xticks().tolist())
tick_labels = np.array(tick_values, dtype=int)
tick_labels = ['$2^{' + str(i) + '}$' for i in tick_labels]
plt.xticks(tick_values, tick_labels)
plt.xlim((powers[0], powers[-1]))
plt.ylim((0.2, 0.5001))
plt.gcf().set_size_inches(plt.gcf().get_size_inches() * .5)
if save_file:
plt.savefig(save_file)
plt.show()
except ImportError:
pass
def plot_capacity_histogram(design_snr, save_file=None):
eta = design_snr_to_bec_eta(design_snr)
# capacities = calculate_bec_channel_capacities(eta, block_size)
try:
import matplotlib.pyplot as plt
# FUN with matplotlib LaTeX fonts! http://matplotlib.org/users/usetex.html
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('figure', autolayout=True)
block_sizes = [32, 128, 512]
for b in block_sizes:
capacities = calculate_bec_channel_capacities(eta, b)
w = 1. / float(len(capacities))
weights = [w, ] * b
plt.hist(capacities, bins=b, weights=weights, range=(0.95, 1.0))
plt.grid()
plt.xlabel('synthetic channel capacity')
plt.ylabel('normalized item count')
print(plt.gcf().get_size_inches())
plt.gcf().set_size_inches(plt.gcf().get_size_inches() * .5)
if save_file:
plt.savefig(save_file)
plt.show()
except ImportError:
pass
def main():
print 'channel construction main'
n = 11
block_size = int(2 ** n)
design_snr = -1.59
eta = design_snr_to_bec_eta(design_snr)
# print(calculate_bec_channel_z_parameters(eta, block_size))
# capacity = calculate_bec_channel_capacities(eta, block_size)
# plot_average_channel_distance()
calculate_bec_channel_z_parameters(eta, block_size)
if __name__ == '__main__':
main()
| gpl-3.0 |
Crespo911/pyspace | pySPACE/missions/nodes/scikits_nodes.py | 1 | 29424 | # -*- coding:utf-8; -*-
""" Wrap the algorithms defined in `scikits.learn <http://scikit-learn.org/>`_ in pySPACE nodes
For details on parameter usage look at the
`scikits documentation <http://scikit-learn.org/>`_ or
the wrapped documentation of pySPACE: :ref:`scikit_nodes`.
The parameters given in the node specification are filtered, to check if they
are available, and then directly forwarded to the scikit algorithm.
This module is based heavily on the scikits.learn wrapper for the "Modular
toolkit for Data Processing"
(MDP, version 3.3, http://mdp-toolkit.sourceforge.net/).
All credit goes to the MDP authors.
MDP (version 3.3) is distributed under the following BSD license::
This file is part of Modular toolkit for Data Processing (MDP).
All the code in this package is distributed under the following conditions:
Copyright (c) 2003-2012, MDP Developers <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Modular toolkit for Data Processing (MDP)
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__docformat__ = "restructuredtext en"
try:
import sklearn
_sklearn_prefix = 'sklearn'
except ImportError:
try:
import scikits.learn as sklearn
_sklearn_prefix = 'scikits.learn'
except ImportError:
_sklearn_prefix = False
import inspect
import re
import numpy
import logging
import warnings
import sys
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.missions.nodes import NODE_MAPPING, DEFAULT_NODE_MAPPING
from pySPACE.resources.data_types.prediction_vector import PredictionVector
from pySPACE.resources.data_types.feature_vector import FeatureVector
class ScikitsException(Exception):
"""Base class for exceptions in nodes wrapping scikits algorithms."""
pass
# import all submodules of sklearn (to work around lazy import)
def _version_too_old(version, known_good):
""" version comparison """
for part,expected in zip(version.split('.'), known_good):
try:
p = int(part)
except ValueError:
return None
if p < expected:
return True
if p > expected:
break
return False
if not _sklearn_prefix:
scikits_modules = []
elif _version_too_old(sklearn.__version__, (0, 8)):
scikits_modules = ['ann', 'cluster', 'covariance', 'feature_extraction',
'feature_selection', 'features', 'gaussian_process', 'glm',
'linear_model', 'preprocessing', 'svm',
'pca', 'lda', 'hmm', 'fastica', 'grid_search', 'mixture',
'naive_bayes', 'neighbors', 'qda']
elif _version_too_old(sklearn.__version__, (0, 9)):
# package structure has been changed in 0.8
scikits_modules = ['svm', 'linear_model', 'naive_bayes', 'neighbors',
'mixture', 'hmm', 'cluster', 'decomposition', 'lda',
'covariance', 'cross_val', 'grid_search',
'feature_selection.rfe', 'feature_extraction.image',
'feature_extraction.text', 'pipelines', 'pls',
'gaussian_process', 'qda']
elif _version_too_old(sklearn.__version__, (0, 11)):
# from release 0.9 cross_val becomes cross_validation and hmm is deprecated
scikits_modules = ['svm', 'linear_model', 'naive_bayes', 'neighbors',
'mixture', 'cluster', 'decomposition', 'lda',
'covariance', 'cross_validation', 'grid_search',
'feature_selection.rfe', 'feature_extraction.image',
'feature_extraction.text', 'pipelines', 'pls',
'gaussian_process', 'qda', 'ensemble', 'manifold',
'metrics', 'preprocessing', 'tree']
else:
scikits_modules = ['svm', 'linear_model', 'naive_bayes', 'neighbors',
'mixture', 'cluster', 'decomposition', 'lda',
'covariance', 'cross_validation', 'grid_search',
'feature_selection', 'feature_extraction',
'pipeline', 'pls', 'gaussian_process', 'qda',
'ensemble', 'manifold', 'metrics', 'preprocessing',
'semi_supervised', 'tree', 'hmm']
for name in scikits_modules:
# not all modules may be available due to missing dependencies
# on the user system.
# we just ignore failing imports
try:
__import__(_sklearn_prefix + '.' + name)
except ImportError:
pass
_WS_LINE_RE = re.compile(r'^\s*$')
_WS_PREFIX_RE = re.compile(r'^(\s*)')
_HEADINGS_RE = re.compile(r'''^(Parameters|Attributes|Methods|Examples|Notes)\n
(----+|====+)''', re.M + re.X)
_UNDERLINE_RE = re.compile(r'----+|====+')
_VARWITHUNDER_RE = re.compile(r'(\s|^)([a-zA-Z_][a-zA-Z0-9_]*_)(\s|$|[,.])')
_HEADINGS = set(['Parameters', 'Attributes', 'Methods', 'Examples',
'Notes', 'References'])
_DOC_TEMPLATE = """
%s
This node has been automatically generated by wrapping the
`%s.%s <http://scikit-learn.org/stable/modules/generated/%s.%s.html>`_ class
from the ``sklearn`` library. The wrapped instance can be accessed
through the ``scikits_alg`` attribute.
%s
"""
def _gen_docstring(object, docsource=None):
""" Generate and modify the docstring for each wrapped node """
module = object.__module__
name = object.__name__
if hasattr(eval(".".join(module.split(".")[:-1])), name):
link_module = ".".join(module.split(".")[:-1])
else:
link_module = module
# search for documentation string
if docsource is None:
docsource = object
docstring = docsource.__doc__
if docstring is None:
docstring = object.__doc__
if docstring is None:
docstring = "This algorithm contains no documentation."
# # error search for getting docstring
# print object
# print module
# print object.__dict__
# print docsource
#warnings.warn("No documentation found for %s.%s" % (module, name))
#return None # old case
pass
lines = docstring.strip().split('\n')
for i, line in enumerate(lines):
if _WS_LINE_RE.match(line):
break
header = [line.strip() for line in lines[:i]]
therest = [line.rstrip() for line in lines[i + 1:]]
body = []
if therest:
prefix = min(len(_WS_PREFIX_RE.match(line).group(1))
for line in therest if line)
quoteind = None
for i, line in enumerate(therest):
line = line[prefix:]
if line in _HEADINGS:
body.append('**%s**' % line)
elif _UNDERLINE_RE.match(line):
body.append('')
else:
line = _VARWITHUNDER_RE.sub(r'\1``\2``\3', line)
if quoteind:
if len(_WS_PREFIX_RE.match(line).group(1)) >= quoteind:
line = quoteind * ' ' + '- ' + line[quoteind:]
else:
quoteind = None
body.append('')
body.append(line)
if line.endswith(':'):
body.append('')
if i + 1 < len(therest):
next = therest[i + 1][prefix:]
quoteind = len(_WS_PREFIX_RE.match(next).group(1))
return _DOC_TEMPLATE % ('\n'.join(header), module, name, link_module, name,
'\n'.join(body))
# TODO: generalize dtype support
# TODO: have a look at predict_proba for Classifier.prob
# TODO: inverse <-> generate/rvs
# TODO: deal with input_dim/output_dim
# TODO: change signature of overwritten functions
# TODO: wrap_scikits_instance
# TODO: add sklearn availability to test info strings
# TODO: which tests ? (test that particular algorithm are / are not trainable)
# XXX: if class defines n_components, allow output_dim, otherwise throw exception
# also for classifiers (overwrite _set_output_dim)
# Problem: sometimes they call it 'k' (e.g., algorithms in sklearn.cluster)
def apply_to_scikits_algorithms(current_module, action,
processed_modules=None,
processed_classes=None):
""" Function that traverses a module to find scikits algorithms.
'sklearn' algorithms are identified by the 'fit' 'predict',
or 'transform' methods. The 'action' function is applied to each found
algorithm.
action -- a function that is called with as ``action(class_)``, where
``class_`` is a class that defines the 'fit' or 'predict' method
"""
# only consider modules and classes once
if processed_modules is None:
processed_modules = []
if processed_classes is None:
processed_classes = []
if current_module in processed_modules:
return
processed_modules.append(current_module)
for member_name, member in current_module.__dict__.items():
if not member_name.startswith('_'):
# classes
if inspect.isclass(member) and member not in processed_classes:
if ((hasattr(member, 'fit')
or hasattr(member, 'predict')
or hasattr(member, 'transform'))
and not member.__module__.endswith('_')):
processed_classes.append(member)
action(member)
# other modules
elif (inspect.ismodule(member) and
member.__name__.startswith(_sklearn_prefix)):
apply_to_scikits_algorithms(member, action, processed_modules,
processed_classes)
return processed_classes
_OUTPUTDIM_ERROR = """'output_dim' keyword not supported.
Please set the output dimensionality using sklearn keyword
arguments (e.g., 'n_components', or 'k'). See the docstring of
this class for details."""
def wrap_scikits_classifier(scikits_class):
"""Wrap a sklearn classifier as a BaseNode subclass.
The wrapper maps these node methods to their sklearn equivalents:
- _stop_training -> fit
- _execute -> predict
"""
newaxis = numpy.newaxis
# create a wrapper class for a sklearn classifier
class ScikitsClassifier(BaseNode):
def __init__(self, input_dim=None, output_dim=None, dtype=None,
class_labels=None, **kwargs):
if output_dim is not None:
# output_dim and n_components cannot be defined at the same time
if 'n_components' in kwargs:
msg = ("Dimensionality set both by "
"output_dim=%d and n_components=%d""")
raise ScikitsException(msg % (output_dim,
kwargs['n_components']))
super(ScikitsClassifier, self).__init__(input_dim=input_dim,
output_dim=output_dim,
dtype=dtype)
try:
accepted_args = inspect.getargspec(scikits_class.__init__)[0]
for key in kwargs.keys():
if key not in accepted_args:
kwargs.pop(key)
except TypeError: # happens for GaussianNBSklearnNode
kwargs = {}
self.kwargs = kwargs
self.set_permanent_attributes(kwargs=kwargs,
scikits_alg=scikits_class(**self.kwargs),
data=[],
labels=[],
class_labels=class_labels)
# ---- re-direct training and execution to the wrapped algorithm
def _train(self, data, y):
x = data.view(numpy.ndarray)
self.data.append(x[0])
self.labels.append(y)
def _stop_training(self, **kwargs):
super(ScikitsClassifier, self)._stop_training(self)
if self.class_labels is None:
self.class_labels = sorted(list(set(self.labels)))
data = numpy.array(self.data)
label_values = \
numpy.array(map(lambda s: self.class_labels.index(s),
self.labels))
try:
return self.scikits_alg.fit(data, label_values, **kwargs)
except Exception as e:
raise type(e), \
type(e)("in node %s:\n\t"%self.__class__.__name__+e.args[0]),\
sys.exc_info()[2]
def _execute(self, data):
x = data.view(numpy.ndarray)
try:
prediction = self.scikits_alg.predict(x)[0]
except Exception as e:
raise type(e), \
type(e)("in node %s:\n\t"%self.__class__.__name__+e.args[0]), \
sys.exc_info()[2]
if hasattr(self.scikits_alg, "predict_proba"):
try:
score = self.scikits_alg.predict_proba(x)[0, 1]
except Exception as e:
warnings.warn("%s in node %s:\n\t"\
%(type(e).__name__,self.__class__.__name__)+e.args[0])
try:
score = self.scikits_alg.decision_function(x)[0]
except:
score = prediction
elif hasattr(self.scikits_alg, "decision_function"):
score = self.scikits_alg.decision_function(x)[0]
else:
score = prediction
label = self.class_labels[prediction]
return PredictionVector(label=label, prediction=score,
predictor=self)
# ---- administrative details
@staticmethod
def is_trainable():
"""Return True if the node can be trained, False otherwise."""
return hasattr(scikits_class, 'fit')
@staticmethod
def is_supervised():
"""Return True if the node requires labels for training, False otherwise."""
return True
# NOTE: at this point scikits nodes can only support up to
# 64-bits floats because some call numpy.linalg.svd, which for
# some reason does not support higher precisions
def _get_supported_dtypes(self):
"""Return the list of dtypes supported by this node.
The types can be specified in any format allowed by numpy.dtype."""
return ['float32', 'float64']
# modify class name and docstring
ScikitsClassifier.__name__ = scikits_class.__name__ + 'SklearnNode'
ScikitsClassifier.__doc__ = _gen_docstring(scikits_class)
# Class must be permanently accessible from module level
globals()[ScikitsClassifier.__name__] = ScikitsClassifier
# change the docstring of the methods to match the ones in sklearn
# methods_dict maps ScikitsNode method names to sklearn method names
methods_dict = {'__init__': '__init__',
'stop_training': 'fit',
'execute': 'predict'}
#if hasattr(scikits_class, 'predict_proba'):
# methods_dict['prob'] = 'predict_proba'
for pyspace_name, scikits_name in methods_dict.items():
pyspace_method = getattr(ScikitsClassifier, pyspace_name)
scikits_method = getattr(scikits_class, scikits_name)
if hasattr(scikits_method, 'im_func'):
# some scikits algorithms do not define an __init__ method
# the one inherited from 'object' is a
# "<slot wrapper '__init__' of 'object' objects>"
# which does not have a 'im_func' attribute
pyspace_method.im_func.__doc__ = _gen_docstring(scikits_class,
scikits_method.im_func)
if scikits_class.__init__.__doc__ is None:
ScikitsClassifier.__init__.im_func.__doc__ = _gen_docstring(scikits_class)
return ScikitsClassifier
def wrap_scikits_transformer(scikits_class):
""" Wrap a sklearn transformer as a pySPACE BaseNode subclass
The wrapper maps these pySPACE methods to their sklearn equivalents:
- _stop_training -> fit
- _execute -> transform
"""
# create a wrapper class for a sklearn transformer
class ScikitsTransformer(BaseNode):
def __init__(self, input_dim=None, output_dim=None, dtype=None, **kwargs):
if output_dim is not None:
raise ScikitsException(_OUTPUTDIM_ERROR)
super(ScikitsTransformer, self).__init__(input_dim=input_dim,
output_dim=output_dim,
dtype=dtype)
accepted_args = inspect.getargspec(scikits_class.__init__)[0]
for key in kwargs.keys():
if key not in accepted_args:
kwargs.pop(key)
self.kwargs = kwargs
self.set_permanent_attributes(kwargs=kwargs,
scikits_alg=scikits_class(**self.kwargs),
data=[],
feature_names=None)
# ---- re-direct training and execution to the wrapped algorithm
def _train(self, data):
assert type(data) == FeatureVector, \
"Scikits-Learn Transformer nodes only support FeatureVector inputs."
x = data.view(numpy.ndarray)
self.data.append(x[0])
def _stop_training(self, **kwargs):
super(ScikitsTransformer, self)._stop_training(self)
data = numpy.array(self.data)
return self.scikits_alg.fit(data, **kwargs)
def _execute(self, data):
x = data.view(numpy.ndarray)
out = self.scikits_alg.transform(x[0])
if self.feature_names is None:
self.feature_names = \
["%s_%s" % (self.__class__.__name__, i)
for i in range(out.shape[1])]
return FeatureVector(out, self.feature_names)
# ---- administrative details
@staticmethod
def is_trainable():
"""Return True if the node can be trained, False otherwise."""
return hasattr(scikits_class, 'fit')
@staticmethod
def is_supervised():
"""Return True if the node requires labels for training, False otherwise."""
return False
# NOTE: at this point scikits nodes can only support up to
# 64-bits floats because some call numpy.linalg.svd, which for
# some reason does not support higher precisions
def _get_supported_dtypes(self):
"""Return the list of dtypes supported by this node.
The types can be specified in any format allowed by numpy.dtype."""
return ['float32', 'float64']
# modify class name and docstring
ScikitsTransformer.__name__ = scikits_class.__name__ + 'SklearnNode'
ScikitsTransformer.__doc__ = _gen_docstring(scikits_class)
# Class must be permanently accessible from module level
globals()[ScikitsTransformer.__name__] = ScikitsTransformer
# change the docstring of the methods to match the ones in sklearn
# methods_dict maps ScikitsNode method names to sklearn method names
methods_dict = {'__init__': '__init__',
'stop_training': 'fit',
'execute': 'transform'}
for pyspace_name, scikits_name in methods_dict.items():
pyspace_method = getattr(ScikitsTransformer, pyspace_name)
scikits_method = getattr(scikits_class, scikits_name, None)
if hasattr(scikits_method, 'im_func'):
# some scikits algorithms do not define an __init__ method
# the one inherited from 'object' is a
# "<slot wrapper '__init__' of 'object' objects>"
# which does not have a 'im_func' attribute
pyspace_method.im_func.__doc__ = _gen_docstring(scikits_class,
scikits_method.im_func)
if scikits_class.__init__.__doc__ is None:
ScikitsTransformer.__init__.im_func.__doc__ = _gen_docstring(scikits_class)
return ScikitsTransformer
def wrap_scikits_predictor(scikits_class):
""" Wrap a sklearn predictor as an pySPACE BaseNode subclass
The wrapper maps these pySPACE methods to their sklearn equivalents:
* _stop_training -> fit
* _execute -> predict
"""
# create a wrapper class for a sklearn predictor
class ScikitsPredictor(BaseNode):
def __init__(self, input_dim=None, output_dim=None, dtype=None, **kwargs):
if output_dim is not None:
raise ScikitsException(_OUTPUTDIM_ERROR)
super(ScikitsPredictor, self).__init__(input_dim=input_dim,
output_dim=output_dim,
dtype=dtype)
accepted_args = inspect.getargspec(scikits_class.__init__)[0]
for key in kwargs.keys():
if key not in accepted_args:
kwargs.pop(key)
self.kwargs = kwargs
self.set_permanent_attributes(kwargs=kwargs,
data=[],
labels=[],
scikits_alg=scikits_class(**self.kwargs))
# ---- re-direct training and execution to the wrapped algorithm
def _train(self, data, y):
x = data.view(numpy.ndarray)
self.data.append(x[0])
self.labels.append(numpy.float64(y))
def _stop_training(self, **kwargs):
super(ScikitsPredictor, self)._stop_training(self)
data = numpy.array(self.data)
label_values = numpy.array(self.labels)
try:
return self.scikits_alg.fit(data, label_values, **kwargs)
except Exception as e:
raise type(e), \
type(e)("in node %s:\n\t"%self.__class__.__name__+e.args[0]), \
sys.exc_info()[2]
def _execute(self, data):
x = data.view(numpy.ndarray)
try:
prediction = self.scikits_alg.predict(x)[0]
except Exception as e:
raise type(e), \
type(e)("in node %s:\n\t"%self.__class__.__name__+e.args[0]), \
sys.exc_info()[2]
if hasattr(self.scikits_alg, "predict_proba"):
try:
score = self.scikits_alg.predict_proba(x)[0, 1]
except Exception as e:
warnings.warn("%s in node %s:\n\t" \
%(type(e).__name__,self.__class__.__name__)+e.args[0])
try:
score = self.scikits_alg.decision_function(x)[0]
except:
score = prediction
elif hasattr(self.scikits_alg, "decision_function"):
score = self.scikits_alg.decision_function(x)[0]
else:
# if nothing else works, we set the score of the
# prediction to be equal to the prediction itself.
score = prediction
return PredictionVector(label=prediction, prediction=score,
predictor=self)
# ---- administrative details
def is_trainable(self):
"""Return True if the node can be trained, False otherwise."""
return hasattr(scikits_class, 'fit')
# NOTE: at this point scikits nodes can only support up to 64-bits floats
# because some call numpy.linalg.svd, which for some reason does not
# support higher precisions
def _get_supported_dtypes(self):
"""Return the list of dtypes supported by this node.
The types can be specified in any format allowed by numpy.dtype."""
return ['float32', 'float64']
def is_supervised(self):
return self.is_trainable()
# modify class name and docstring
ScikitsPredictor.__name__ = scikits_class.__name__ + 'SklearnNode'
ScikitsPredictor.__doc__ = _gen_docstring(scikits_class)
# Class must be permanently accessible from module level
globals()[ScikitsPredictor.__name__] = ScikitsPredictor
# change the docstring of the methods to match the ones in sklearn
# methods_dict maps ScikitsPredictor method names to sklearn method names
methods_dict = {'__init__': '__init__',
'stop_training': 'fit',
'execute': 'predict'}
for pyspace_name, scikits_name in methods_dict.items():
pyspace_method = getattr(ScikitsPredictor, pyspace_name)
scikits_method = getattr(scikits_class, scikits_name)
if hasattr(scikits_method, 'im_func'):
# some scikits algorithms do not define an __init__ method
# the one inherited from 'object' is a
# "<slot wrapper '__init__' of 'object' objects>"
# which does not have a 'im_func' attribute
pyspace_method.im_func.__doc__ = _gen_docstring(scikits_class,
scikits_method.im_func)
if scikits_class.__init__.__doc__ is None:
ScikitsPredictor.__init__.im_func.__doc__ = _gen_docstring(scikits_class)
return ScikitsPredictor
#list candidate nodes
def print_public_members(class_):
""" Print methods of sklearn algorithm """
print '\n', '-' * 15
print '%s (%s)' % (class_.__name__, class_.__module__)
for attr_name in dir(class_):
attr = getattr(class_, attr_name)
#print attr_name, type(attr)
if not attr_name.startswith('_') and inspect.ismethod(attr):
print ' -', attr_name
#apply_to_scikits_algorithms(sklearn, print_public_members)
def wrap_scikits_algorithms(scikits_class, nodes_list):
""" Check *scikits_class* and append new wrapped class to *nodes_list*
Currently only classifiers subclassing ``sklearn.base.ClassifierMixin``
and having a *fit* method were integrated and tested.
Algorithms with the *transform* function are also available.
*predict* nodes will be available soon but require more testing especially
of regression in pySPACE.
"""
name = scikits_class.__name__
if (name[:4] == 'Base' or name == 'LinearModel'
or name.startswith('EllipticEnvelop')
or name.startswith('ForestClassifier')):
return
if issubclass(scikits_class, sklearn.base.ClassifierMixin) and \
hasattr(scikits_class, 'fit'):
nodes_list.append(wrap_scikits_classifier(scikits_class))
# Some (abstract) transformers do not implement fit.
elif hasattr(scikits_class, 'transform') and hasattr(scikits_class, 'fit'):
nodes_list.append(wrap_scikits_transformer(scikits_class))
elif hasattr(scikits_class, 'predict') and hasattr(scikits_class, 'fit'):
# WARNING: THIS PART OF PYSPACE IS EXPERIMENTAL ONLY
nodes_list.append(wrap_scikits_predictor(scikits_class))
if _sklearn_prefix:
scikits_nodes = []
apply_to_scikits_algorithms(sklearn,
lambda c: wrap_scikits_algorithms(
c, scikits_nodes))
# add scikits nodes to dictionary
#scikits_module = new.module('scikits')
for wrapped_c in scikits_nodes:
DEFAULT_NODE_MAPPING[wrapped_c.__name__] = wrapped_c
NODE_MAPPING[wrapped_c.__name__] = wrapped_c
NODE_MAPPING[wrapped_c.__name__[:-4]] = wrapped_c
del(wrapped_c) | gpl-3.0 |
jgdwyer/nn-convection | sknn_jgd/nn.py | 3 | 26071 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, unicode_literals, print_function)
__all__ = ['Regressor', 'Classifier', 'Layer', 'Convolution']
import os
import sys
import time
import logging
import itertools
import collections
log = logging.getLogger('sknn')
import numpy
import theano
class ansi:
BOLD = '\033[1;97m'
WHITE = '\033[0;97m'
YELLOW = '\033[0;33m'
RED = '\033[0;31m'
GREEN = '\033[0;32m'
BLUE = '\033[0;94m'
ENDC = '\033[0m'
class Layer(object):
"""
Specification for a layer to be passed to the neural network during construction. This
includes a variety of parameters to configure each layer based on its activation type.
Parameters
----------
type: str
Select which activation function this layer should use, as a string. Specifically,
options are ``Rectifier``, ``Sigmoid``, ``Tanh``, and ``ExpLin`` for non-linear layers
and ``Linear`` or ``Softmax`` for output layers.
name: str, optional
You optionally can specify a name for this layer, and its parameters
will then be accessible to scikit-learn via a nested sub-object. For example,
if name is set to ``layer1``, then the parameter ``layer1__units`` from the network
is bound to this layer's ``units`` variable.
The name defaults to ``hiddenN`` where N is the integer index of that layer, and the
final layer is always ``output`` without an index.
units: int
The number of units (also known as neurons) in this layer. This applies to all
layer types except for convolution.
weight_decay: float, optional
The coefficient for L1 or L2 regularization of the weights. For example, a value of
0.0001 is multiplied by the L1 or L2 weight decay equation.
dropout: float, optional
The ratio of inputs to drop out for this layer during training. For example, 0.25
means that 25% of the inputs will be excluded for each training sample, with the
remaining inputs being renormalized accordingly.
normalize: str, optional
Enable normalization of this layer. Can be either `batch` for batch normalization
or (soon) `weights` for weight normalization. Default is no normalization.
frozen: bool, optional
Specify whether to freeze a layer's parameters so they are not adjusted during the
training. This is useful when relying on pre-trained neural networks.
warning: None
You should use keyword arguments after `type` when initializing this object. If not,
the code will raise an AssertionError.
"""
def __init__(
self,
type,
warning=None,
name=None,
units=None,
weight_decay=None,
dropout=None,
normalize=None,
frozen=False):
assert warning is None,\
"Specify layer parameters as keyword arguments, not positional arguments."
if type not in ['Rectifier', 'Sigmoid', 'Tanh', 'Linear', 'Softmax', 'Gaussian', 'ExpLin']:
raise NotImplementedError("Layer type `%s` is not implemented." % type)
self.name = name
self.type = type
self.units = units
self.weight_decay = weight_decay
self.dropout = dropout
self.normalize = normalize
self.frozen = frozen
def set_params(self, **params):
"""Setter for internal variables that's compatible with ``scikit-learn``.
"""
for k, v in params.items():
if k not in self.__dict__:
raise ValueError("Invalid parameter `%s` for layer `%s`." % (k, self.name))
self.__dict__[k] = v
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
copy = self.__dict__.copy()
del copy['type']
params = ", ".join(["%s=%r" % (k, v) for k, v in copy.items() if v is not None])
return "<sknn.nn.%s `%s`: %s>" % (self.__class__.__name__, self.type, params)
class Native(object):
"""Special type of layer that is handled directly to the backend (e.g. Lasagne). This
can be used to construct more advanced networks that are not yet supported by the
default interface.
Note that using this as a layer type means your code may not be compatible with future
revisions or other backends, and that serialization may be affected.
Parameters
----------
constructor: class or callable
The layer type usable directly by the backend (e.g. Lasagne). This can also
be a callable function that acts as a layer constructor.
*args: list of arguments
All positional arguments are passed directly to the constructor when the
neural network is initialized.
**kwargs: dictionary of named arguments
All named arguments are passed to the constructor directly also, with the exception
of the parameters ``name``, ``units``, ``frozen``, ``weight_decay``, ``normalize``
which take on the same role as in :class:`sknn.nn.Layer`.
"""
def __init__(self, constructor, *args, **keywords):
for attr in ['name', 'units', 'frozen', 'weight_decay', 'normalize']:
setattr(self, attr, keywords.pop(attr, None))
self.type = constructor
self.args = args
self.keywords = keywords
class Convolution(Layer):
"""
Specification for a convolution layer to be passed to the neural network in construction.
This includes a variety of convolution-specific parameters to configure each layer, as well
as activation-specific parameters.
Parameters
----------
type: str
Select which activation function this convolution layer should use, as a string.
For hidden layers, you can use the following convolution types ``Rectifier``,
``ExpLin``, ``Sigmoid``, ``Tanh`` or ``Linear``.
name: str, optional
You optionally can specify a name for this layer, and its parameters
will then be accessible to scikit-learn via a nested sub-object. For example,
if name is set to ``layer1``, then the parameter ``layer1__units`` from the network
is bound to this layer's ``units`` variable.
The name defaults to ``hiddenN`` where N is the integer index of that layer, and the
final layer is always ``output`` without an index.
channels: int
Number of output channels for the convolution layers. Each channel has its own
set of shared weights which are trained by applying the kernel over the image.
kernel_shape: tuple of ints
A two-dimensional tuple of integers corresponding to the shape of the kernel when
convolution is used. For example, this could be a square kernel `(3,3)` or a full
horizontal or vertical kernel on the input matrix, e.g. `(N,1)` or `(1,N)`.
kernel_stride: tuple of ints, optional
A two-dimensional tuple of integers that represents the steps taken by the kernel
through the input image. By default, this is set to `(1,1)` and can be
customized separately to pooling.
border_mode: str
String indicating the way borders in the image should be processed, one of two options:
* `valid` — Only pixels from input where the kernel fits within bounds are processed.
* `full` — All pixels from input are processed, and the boundaries are zero-padded.
* `same` — The output resolution is set to the exact same as the input.
The size of the output will depend on this mode, for `full` it's identical to the input,
but for `valid` (default) it will be smaller or equal.
pool_shape: tuple of ints, optional
A two-dimensional tuple of integers corresponding to the pool size for downsampling.
This should be square, for example `(2,2)` to reduce the size by half, or `(4,4)` to make
the output a quarter of the original.
Pooling is applied after the convolution and calculation of its activation.
pool_type: str, optional
Type of the pooling to be used; can be either `max` or `mean`. If a `pool_shape` is
specified the default is to take the maximum value of all inputs that fall into this
pool. Otherwise, the default is None and no pooling is used for performance.
scale_factor: tuple of ints, optional
A two-dimensional tuple of integers corresponding to upscaling ration. This should be
square, for example `(2,2)` to increase the size by double, or `(4,4)` to make the
output four times the original.
Upscaling is applied before the convolution and calculation of its activation.
weight_decay: float, optional
The coefficient for L1 or L2 regularization of the weights. For example, a value of
0.0001 is multiplied by the L1 or L2 weight decay equation.
dropout: float, optional
The ratio of inputs to drop out for this layer during training. For example, 0.25
means that 25% of the inputs will be excluded for each training sample, with the
remaining inputs being renormalized accordingly.
normalize: str, optional
Enable normalization of this layer. Can be either `batch` for batch normalization
or (soon) `weights` for weight normalization. Default is no normalization.
frozen: bool, optional
Specify whether to freeze a layer's parameters so they are not adjusted during the
training. This is useful when relying on pre-trained neural networks.
warning: None
You should use keyword arguments after `type` when initializing this object. If not,
the code will raise an AssertionError.
"""
def __init__(
self,
type,
warning=None,
name=None,
channels=None,
kernel_shape=None,
kernel_stride=None,
border_mode='valid',
pool_shape=None,
pool_type=None,
scale_factor=None,
weight_decay=None,
dropout=None,
normalize=None,
frozen=False):
assert warning is None,\
"Specify layer parameters as keyword arguments, not positional arguments."
if type not in ['Rectifier', 'Sigmoid', 'Tanh', 'Linear', 'ExpLin']:
raise NotImplementedError("Convolution type `%s` is not implemented." % (type,))
if border_mode not in ['valid', 'full', 'same']:
raise NotImplementedError("Convolution border_mode `%s` is not implemented." % (border_mode,))
super(Convolution, self).__init__(
type,
name=name,
weight_decay=weight_decay,
dropout=dropout,
normalize=normalize,
frozen=frozen)
self.channels = channels
self.kernel_shape = kernel_shape
self.kernel_stride = kernel_stride or (1,1)
self.border_mode = border_mode
self.pool_shape = pool_shape or (1,1)
self.pool_type = pool_type or ('max' if pool_shape else None)
self.scale_factor = scale_factor or (1,1)
class NeuralNetwork(object):
"""
Abstract base class for wrapping all neural network functionality from PyLearn2,
common to multi-layer perceptrons in :mod:`sknn.mlp` and auto-encoders in
in :mod:`sknn.ae`.
Parameters
----------
layers: list of Layer
An iterable sequence of each layer each as a :class:`sknn.mlp.Layer` instance that
contains its type, optional name, and any paramaters required.
* For hidden layers, you can use the following layer types:
``Rectifier``, ``ExpLin``, ``Sigmoid``, ``Tanh``, or ``Convolution``.
* For output layers, you can use the following layer types:
``Linear`` or ``Softmax``.
It's possible to mix and match any of the layer types, though most often
you should probably use hidden and output types as recommended here. Typically,
the last entry in this ``layers`` list should contain ``Linear`` for regression,
or ``Softmax`` for classification.
random_state: int, optional
Seed for the initialization of the neural network parameters (e.g.
weights and biases). This is fully deterministic.
parameters: list of tuple of array-like, optional
A list of ``(weights, biases)`` tuples to be reloaded for each layer, in the same
order as ``layers`` was specified. Useful for initializing with pre-trained
networks.
learning_rule: str, optional
Name of the learning rule used during stochastic gradient descent,
one of ``sgd``, ``momentum``, ``nesterov``, ``adadelta``, ``adagrad`` or
``rmsprop`` at the moment. The default is vanilla ``sgd``.
learning_rate: float, optional
Real number indicating the default/starting rate of adjustment for
the weights during gradient descent. Different learning rules may
take this into account differently. Default is ``0.01``.
learning_momentum: float, optional
Real number indicating the momentum factor to be used for the
learning rule 'momentum'. Default is ``0.9``.
batch_size: int, optional
Number of training samples to group together when performing stochastic
gradient descent (technically, a "minibatch"). By default each sample is
treated on its own, with ``batch_size=1``. Larger batches are usually faster.
n_iter: int, optional
The number of iterations of gradient descent to perform on the
neural network's weights when training with ``fit()``.
n_stable: int, optional
Number of interations after which training should return when the validation
error remains (near) constant. This is usually a sign that the data has been
fitted, or that optimization may have stalled. If no validation set is specified,
then stability is judged based on the training error. Default is ``10``.
f_stable: float, optional
Threshold under which the validation error change is assumed to be stable, to
be used in combination with `n_stable`. This is calculated as a relative ratio
of improvement, so if the results are only 0.1% better training is considered
stable. The training set is used as fallback if there's no validation set. Default
is ``0.001`.
valid_set: tuple of array-like, optional
Validation set (X_v, y_v) to be used explicitly while training. Both
arrays should have the same size for the first dimention, and the second
dimention should match with the training data specified in ``fit()``.
valid_size: float, optional
Ratio of the training data to be used for validation. 0.0 means no
validation, and 1.0 would mean there's no training data! Common values are
0.1 or 0.25.
normalize: string, optional
Enable normalization for all layers. Can be either `batch` for batch normalization
or (soon) `weights` for weight normalization. Default is no normalization.
regularize: string, optional
Which regularization technique to use on the weights, for example ``L2`` (most
common) or ``L1`` (quite rare), as well as ``dropout``. By default, there's no
regularization, unless another parameter implies it should be enabled, e.g. if
``weight_decay`` or ``dropout_rate`` are specified.
weight_decay: float, optional
The coefficient used to multiply either ``L1`` or ``L2`` equations when computing
the weight decay for regularization. If ``regularize`` is specified, this defaults
to 0.0001.
dropout_rate: float, optional
What rate to use for drop-out training in the inputs (jittering) and the
hidden layers, for each training example. Specify this as a ratio of inputs
to be randomly excluded during training, e.g. 0.75 means only 25% of inputs
will be included in the training.
loss_type: string, optional
The cost function to use when training the network. There are two valid options:
* ``mse`` — Use mean squared error, for learning to predict the mean of the data.
* ``mae`` — Use mean average error, for learning to predict the median of the data.
* ``mcc`` — Use mean categorical cross-entropy, particularly for classifiers.
The default option is ``mse`` for regressors and ``mcc`` for classifiers, but ``mae`` can
only be applied to layers of type ``Linear`` or ``Gaussian`` and they must be used as
the output layer (PyLearn2 only).
callback: callable or dict, optional
An observer mechanism that exposes information about the inner training loop. This is
either a single function that takes ``cbs(event, **variables)`` as a parameter, or a
dictionary of functions indexed by on `event` string that conforms to ``cb(**variables)``.
There are multiple events sent from the inner training loop:
* ``on_train_start`` — Called when the main training function is entered.
* ``on_epoch_start`` — Called the first thing when a new iteration starts.
* ``on_batch_start`` — Called before an individual batch is processed.
* ``on_batch_finish`` — Called after that individual batch is processed.
* ``on_epoch_finish`` — Called the first last when the iteration is done.
* ``on_train_finish`` — Called just before the training function exits.
For each function, the ``variables`` dictionary passed contains all local variables within
the training implementation.
debug: bool, optional
Should the underlying training algorithms perform validation on the data
as it's optimizing the model? This makes things slower, but errors can
be caught more effectively. Default is off.
verbose: bool, optional
How to initialize the logging to display the results during training. If there is
already a logger initialized, either ``sknn`` or the root logger, then this function
does nothing. Otherwise:
* ``False`` — Setup new logger that shows only warnings and errors.
* ``True`` — Setup a new logger that displays all debug messages.
* ``None`` — Don't setup a new logger under any condition (default).
Using the built-in python ``logging`` module, you can control the detail and style of
output by customising the verbosity level and formatter for ``sknn`` logger.
warning: None
You should use keyword arguments after `layers` when initializing this object. If not,
the code will raise an AssertionError.
"""
def __init__(
self,
layers,
warning=None,
parameters=None,
random_state=None,
learning_rule='sgd',
learning_rate=0.01,
learning_momentum=0.9,
normalize=None,
regularize=None,
weight_decay=None,
dropout_rate=None,
batch_size=1,
n_iter=None,
n_stable=10,
f_stable=0.001,
valid_set=None,
valid_size=0.0,
loss_type=None,
callback=None,
debug=False,
verbose=None,
**params):
assert warning is None,\
"Specify network parameters as keyword arguments, not positional arguments."
self.layers = []
for i, layer in enumerate(layers):
assert isinstance(layer, Layer) or isinstance(layer, Native),\
"Specify each layer as an instance of a `sknn.mlp.Layer` object."
# Layer names are optional, if not specified then generate one.
if layer.name is None:
layer.name = ("hidden%i" % i) if i < len(layers)-1 else "output"
# sklearn may pass layers in as additional named parameters, remove them.
if layer.name in params:
del params[layer.name]
self.layers.append(layer)
# Don't support any additional parameters that are not in the constructor.
# These are specified only so `get_params()` can return named layers, for double-
# underscore syntax to work.
assert len(params) == 0,\
"The specified additional parameters are unknown: %s." % ','.join(params.keys())
# Basic checking of the freeform string options.
assert regularize in (None, 'L1', 'L2', 'dropout'),\
"Unknown type of regularization specified: %s." % regularize
assert loss_type in ('mse', 'mae', 'mcc', None),\
"Unknown loss function type specified: %s." % loss_type
self.weights = parameters
self.random_state = random_state
self.learning_rule = learning_rule
self.learning_rate = learning_rate
self.learning_momentum = learning_momentum
self.normalize = normalize
self.regularize = regularize or ('dropout' if dropout_rate else None)\
or ('L2' if weight_decay else None)
self.weight_decay = weight_decay
self.dropout_rate = dropout_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.n_stable = n_stable
self.f_stable = f_stable
self.valid_set = valid_set
self.valid_size = valid_size
self.loss_type = loss_type
self.debug = debug
self.verbose = verbose
self.callback = callback
self.auto_enabled = {}
self._backend = None
self._create_logger()
self._setup()
def _setup(self):
raise NotImplementedError("NeuralNetwork is an abstract class; "
"use the mlp.Classifier or mlp.Regressor instead.")
@property
def is_initialized(self):
"""Check if the neural network was setup already.
"""
return self._backend is not None and self._backend.is_initialized
def is_convolution(self, input=None, output=False):
"""Check whether this neural network includes convolution layers in the first
or last position.
Parameters
----------
input : boolean, optional
Whether the first layer should be checked for convolution. Default True.
output : boolean, optional
Whether the last layer should be checked for convolution. Default False.
Returns
-------
is_conv : boolean
True if either of the specified layers are indeed convolution, False otherwise.
"""
check_output = output
check_input = False if check_output and input is None else True
i = check_input and isinstance(self.layers[0], Convolution)
o = check_output and isinstance(self.layers[-1], Convolution)
return i or o
@property
def is_classifier(self):
"""Is this neural network instanced as a classifier or regressor?"""
return False
def _create_logger(self):
# If users have configured logging already, assume they know best.
if len(log.handlers) > 0 or len(log.parent.handlers) > 0 or self.verbose is None:
return
# Otherwise setup a default handler and formatter based on verbosity.
lvl = logging.DEBUG if self.verbose else logging.WARNING
fmt = logging.Formatter("%(message)s")
hnd = logging.StreamHandler(stream=sys.stdout)
hnd.setFormatter(fmt)
hnd.setLevel(lvl)
log.addHandler(hnd)
log.setLevel(lvl)
def get_parameters(self):
"""Extract the neural networks weights and biases layer by layer. Only valid
once the neural network has been initialized, for example via `fit()` function.
Returns
-------
params : list of tuples
For each layer in the order they are passed to the constructor, a named-tuple
of three items `weights`, `biases` (both numpy arrays) and `name` (string)
in that order.
"""
assert self._backend is not None,\
"Backend was not initialized; could not retrieve network parameters."
P = collections.namedtuple('Parameters', 'weights biases layer')
return [P(w, b, s.name) for s, (w, b) in zip(self.layers, self._backend._mlp_to_array())]
def set_parameters(self, storage):
"""Store the given weighs and biases into the neural network. If the neural network
has not been initialized, use the `weights` list as construction parameter instead.
Otherwise if the neural network is initialized, this function will extract the parameters
from the input list or dictionary and store them accordingly.
Parameters
----------
storage : list of tuples, or dictionary of tuples
Either a list of tuples for each layer, storing two items `weights` and `biases` in
the exact same order as construction. Alternatively, if this is a dictionary, a string
to tuple mapping for each layer also storing `weights` and `biases` but not necessarily
for all layers.
"""
# In case the class is not initialized, store the parameters for later during _initialize.
if self._backend is None:
self.weights = storage
return
if isinstance(storage, dict):
layers = [storage.get(l.name, None) for l in self.layers]
else:
layers = storage
return self._backend._array_to_mlp(layers, self._backend.mlp)
| apache-2.0 |
sfletc/scram2_plot | scram_plot/compare_plot.py | 1 | 7867 | from pylab import * # @UnusedWildImport
import matplotlib.pyplot as plt # @Reimport
from bokeh.plotting import figure, output_file, show, ColumnDataSource
from bokeh.io import output_notebook
from bokeh.models import HoverTool
from collections import OrderedDict
import csv
import profile_plot as pp
import math
import os.path
def compare_plot(file_prefix, nt_list, seq1, seq2, plot_type, browser, save_plot, pub, fig_size, xylim):
"""
Compare plot
:param file_prefix: path/to/file prefix
:param nt_list: read length list to plot
:param seq1: x seq name
:param seq2: y seq name
:param plot_type: plot type
:param browser: output to html
:param save_plot: bool to save plot
:param pub: bool to remove axes and legend
:param fig_size: figure dimensions
:param xylim: x/y axi limit
"""
#try:
if nt_list[0]=="mir":
fname = file_prefix + "_miR.csv"
if os.path.isfile(fname):
try:
_format_compare_data(fname, "mir", browser, plot_type, pub, save_plot,
seq1, seq2, fig_size, xylim)
except:
print("\nCannot load and process {}".format(fname))
sys.exit()
else:
print("\n{} does not exist at this location".format(fname))
sys.exit()
else:
for nt in nt_list:
fname = "{0}_{1}.csv".format(file_prefix, nt)
if os.path.isfile(fname):
try:
_format_compare_data(fname, int(nt), browser, plot_type, pub, save_plot,
seq1, seq2, fig_size, xylim)
except:
print("\nCannot load and process {}".format(fname))
sys.exit()
else:
print("\n{} does not exist at this location".format(fname))
sys.exit()
def _format_compare_data(file_name, nt, browser, plot_type, pub, save_plot, seq1, seq2, fig_size, xylim):
if "/" in file_name:
file_path = file_name.rsplit('/', 1)[0]
else:
file_path="."
if browser:
output_file(file_path + '/{0}_{1}_{2}.html'.format(seq1, seq2, nt))
else:
output_notebook(hide_banner=True)
first_line = True
x_vals_line = []
x_vals_point = []
xerr = []
max_x = 0.0
y_vals_line = []
y_vals_point = []
header = []
yerr = []
max_y = 0.0
log_max, max_x = _extract_alignment_data(file_name, first_line, header, max_x, max_y, x_vals_line, x_vals_point,
xerr, xylim, y_vals_line, y_vals_point, yerr)
# Interactive
#Hack as bokeh tooltip text wrapping for large x values not working properly
for plot_point in range(len(header)):
if math.log10(x_vals_point[plot_point]+0.0001)> 0.35*math.log10(max_x):
header[plot_point]=header[plot_point][:40]
_plot_type(fig_size, file_path, header, log_max, nt, plot_type, pub, save_plot, seq1, seq2, x_vals_line,
x_vals_point, xerr, y_vals_line, y_vals_point, yerr)
def _plot_type(fig_size, file_path, header, log_max, nt, plot_type, pub, save_plot, seq1, seq2, x_vals_line,
x_vals_point, xerr, y_vals_line, y_vals_point, yerr):
if plot_type == "log" or plot_type == "all":
_plot_compare_plot(file_path, header, log_max, nt, seq1, seq2, [], x_vals_point, [], y_vals_point, [], [],
save_plot,
pub, fig_size)
if plot_type == "log_error" or plot_type == "all":
_plot_compare_plot(file_path, header, log_max, nt, seq1, seq2, x_vals_line, x_vals_point, y_vals_line,
y_vals_point, xerr, yerr, save_plot, pub, fig_size)
def _extract_alignment_data(file_name, first_line, header, max_x, max_y, x_vals_line, x_vals_point, xerr, xylim,
y_vals_line, y_vals_point, yerr):
with open(file_name) as csvfile:
line_reader = csv.reader(csvfile)
for line in line_reader:
if first_line:
first_line = False
else:
# calc max value
if float(line[-4]) > max_x:
max_x = float(line[-4])
if float(line[-2]) > max_y:
max_y = float(line[-2])
# line
line[0] = line[0].strip()
x_se = [float(line[-4]) - float(line[-3]), float(line[-4]) + float(line[-3])]
y_se = [float(line[-2]) - float(line[-1]), float(line[-2]) + float(line[-1])]
xerr.append(float(line[-3]))
x_vals_line.append(x_se)
y_vals_line.append([float(line[-2]), float(line[-2])])
x_vals_line.append([float(line[-4]), float(line[-4])])
y_vals_line.append(y_se)
yerr.append(float(line[-1]))
# point
x_vals_point.append(float(line[-4]))
y_vals_point.append(float(line[-2]))
header.append(line[0])
if xylim == "auto":
_max = max([max_x, max_y]) # sets up max x and y scale values
log_max = _max + _max / 2
else:
log_max = int(xylim)
csvfile.close()
return log_max, max_x
def _plot_compare_plot(file_path, header, log_max, nt, seq1, seq2, x_vals_line, x_vals_point, y_vals_line, y_vals_point,
xerr, yerr, save_plot, pub_plot, fig_size = 4):
# Std Error bars
hover = HoverTool(
tooltips=[
("(x,y)", "($x, $y)"),
("header", "@Desc")
],
names=["circle", ]
)
p = figure(plot_width=600, plot_height=600,
x_axis_type="log", y_axis_type="log",
x_range=(0.1, log_max), y_range=(0.1, log_max),
toolbar_location="above", tools=[hover, 'save', 'box_zoom', 'reset'])
source_point = ColumnDataSource(data=OrderedDict(x=x_vals_point,
y=y_vals_point, Desc=header, )
)
if nt == "mir":
p.circle('x', 'y', name="circle", source=source_point, size=3, color=pp._nt_colour(nt),
legend="microRNA")
else:
p.circle('x', 'y', name="circle", source=source_point, size=3, color=pp._nt_colour(nt), legend="{0} nt".format(
nt))
p.legend.location = "top_left"
p.line([0.1, log_max], [0.1, log_max])
if xerr != []:
p.multi_line(xs=x_vals_line, ys=y_vals_line, color=pp._nt_colour(nt), alpha=0.5, )
p.xaxis.axis_label = seq1
p.yaxis.axis_label = seq2
show(p)
if save_plot:
_compare_plot_to_file(fig_size, file_path, log_max, nt, pub_plot, seq1, seq2, x_vals_point, xerr, y_vals_point, yerr)
def _compare_plot_to_file(fig_size, file_path, log_max, nt, pub_plot, seq1, seq2, x_vals_point, xerr, y_vals_point, yerr):
fig = plt.figure(figsize=(fig_size, fig_size))
if xerr != []:
plt.errorbar(x_vals_point, y_vals_point, xerr=xerr, yerr=yerr, capsize=0, ls='none', color=pp._nt_colour(
nt),
elinewidth=0.5)
plt.plot([0.1, log_max], [0.1, log_max], alpha=0.9, linewidth=1)
if nt == "mir":
plt.scatter(x_vals_point, y_vals_point, color=pp._nt_colour(nt), s=3, label="microRNA".format(nt))
else:
plt.scatter(x_vals_point, y_vals_point, color=pp._nt_colour(nt), s=3, label="{0} nt".format(nt))
plt.xlim([0.1, log_max])
plt.ylim([0.1, log_max])
plt.xscale('log')
plt.yscale('log')
if pub_plot:
pp._pub_plot()
plt.minorticks_off()
else:
plt.grid(linestyle='-', alpha=0.2)
plt.xlabel(seq1)
plt.ylabel(seq2)
plt.legend()
plt.savefig(file_path + '/{0}_{1}_{2}.png'.format(seq1, seq2, nt), dpi=300)
| mit |
DGrady/pandas | pandas/core/reshape/util.py | 20 | 1915 | import numpy as np
from pandas.core.dtypes.common import is_list_like
from pandas.compat import reduce
from pandas.core.index import Index
from pandas.core import common as com
def match(needles, haystack):
haystack = Index(haystack)
needles = Index(needles)
return haystack.get_indexer(needles)
def cartesian_product(X):
"""
Numpy version of itertools.product or pandas.compat.product.
Sometimes faster (for large inputs)...
Parameters
----------
X : list-like of list-likes
Returns
-------
product : list of ndarrays
Examples
--------
>>> cartesian_product([list('ABC'), [1, 2]])
[array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'),
array([1, 2, 1, 2, 1, 2])]
See also
--------
itertools.product : Cartesian product of input iterables. Equivalent to
nested for-loops.
pandas.compat.product : An alias for itertools.product.
"""
msg = "Input must be a list-like of list-likes"
if not is_list_like(X):
raise TypeError(msg)
for x in X:
if not is_list_like(x):
raise TypeError(msg)
if len(X) == 0:
return []
lenX = np.fromiter((len(x) for x in X), dtype=np.intp)
cumprodX = np.cumproduct(lenX)
a = np.roll(cumprodX, 1)
a[0] = 1
if cumprodX[-1] != 0:
b = cumprodX[-1] / cumprodX
else:
# if any factor is empty, the cartesian product is empty
b = np.zeros_like(cumprodX)
return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]),
np.product(a[i]))
for i, x in enumerate(X)]
def _compose2(f, g):
"""Compose 2 callables"""
return lambda *args, **kwargs: f(g(*args, **kwargs))
def compose(*funcs):
"""Compose 2 or more callables"""
assert len(funcs) > 1, 'At least 2 callables must be passed to compose'
return reduce(_compose2, funcs)
| bsd-3-clause |
jamesmishra/nlp-playground | nlp_playground/scripts/reviews_lda.py | 1 | 1707 | """Run LDA on Amazon reviews."""
import itertools
import click
from gensim.models.ldamulticore import LdaMulticore as LDA
from nlp_playground.data import amazon_book_reviews
from nlp_playground.exceptions import BadValidation
from nlp_playground.lib.data import groups_of_minimum_size
from nlp_playground.lib.gensim.corpora import \
corpus_from_documents_sklearn as corpus_from_documents
from nlp_playground.lib.gensim.lda import print_topics
@click.option(
"--num-topics",
default=10,
help="Number of LDA topics to generate.")
@click.option(
"--iterations",
default=1,
help="Number of iterations to run LDA for.")
@click.option(
"--num-documents",
default=10,
help="Number of reviews to read. 0 = all of them."
)
@click.option(
"--group-size",
default=1,
help="Group by Amazon product and require >= N reviews."
)
def main(num_topics, iterations, num_documents, group_size):
"""Run LDA on Amazon reviews."""
if group_size > num_documents and num_documents != 0:
raise BadValidation("Group size is larger than number of documents")
if num_documents == 0:
reviews = amazon_book_reviews()
else:
reviews = amazon_book_reviews(limit=num_documents)
groups = groups_of_minimum_size(reviews, 'asin', group_size)
print("Identified", len(groups), "groups of size", group_size)
flattened = itertools.chain(*groups.values())
review_texts = (x['reviewText'] for x in flattened)
corpus, dictionary = corpus_from_documents(review_texts)
lda = LDA(
corpus,
num_topics=num_topics,
workers=3,
id2word=dictionary,
iterations=iterations
)
print_topics(lda)
| mit |
JackKelly/neuralnilm_prototype | scripts/e134.py | 2 | 6154 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
"""
def set_save_plot_interval(net, epoch):
net.save_plot_interval = SAVE_PLOT_INTERVAL
def exp_a(name):
# like 132c but with smaller init values and massive pre-training of first 2 layers
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=10
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=5000,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(1),
'b': Uniform(1)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
],
layer_changes={
50001: {
'remove_from': -2,
'new_layers':
[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(1),
'b': Uniform(1)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
},
100001: {
'remove_from': -2,
'callback': set_save_plot_interval,
'new_layers':
[
{
'type': BLSTMLayer,
'num_units': 40,
# 'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
},
100501: {
'remove_from': -3,
'new_layers':
[
{
'type': BLSTMLayer,
'num_units': 80,
# 'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
}
}
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=101001)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
| mit |
Adai0808/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
vex1023/vxTrader | vxTrader/broker/gfTrader.py | 1 | 21984 | # encoding=utf-8
'''
广发证券的交易接口分为:
1. gfTrader ——普通的证券交易接口
2. gfMarginTrader —— 融资融券证券交易接口
'''
import re
import time
import uuid
from io import BytesIO
import pandas as pd
import pytesseract
import requests
from PIL import Image, ImageFilter
from vxUtils.decorator import retry
from vxTrader import logger
from vxTrader.TraderException import VerifyCodeError, TraderAPIError
from vxTrader.broker.WebTrader import LoginSession, WebTrader, BrokerFactory
from vxTrader.util import code_to_symbols
FLOAT_COLUMNS = [
'order_amount', 'order_price', 'lasttrade', 'current_amount', 'enable_amount', 'market_value',
'enable_balance', 'current_balance', 'net_balance', 'asset_balance', 'business_price', 'business_amount',
'order_amount', 'order_price', 'fund_balance']
RENAME_DICT = {
'last_price': 'lasttrade',
'entrust_no': 'order_no',
'stock_name': 'symbol_name',
'stock_code': 'symbol',
'entrust_bs': 'trade_side',
'entrust_price': 'order_price',
'entrust_amount': 'order_amount',
'entrust_status': 'order_status',
'report_time': 'order_time'
}
TIMEOUT = 600
class gfLoginSession(LoginSession):
'''
广发证券登录session管理
'''
def __init__(self, account, password):
# 初始化父类
super(gfLoginSession, self).__init__(account=account, password=password)
# TODO 从系统中读取磁盘编号
self.disknum = "S2ZWJ9AF517295"
self.mac_address = ("".join(c + "-" if i % 2 else c for i, c in \
enumerate(hex(uuid.getnode())[2:].zfill(12)))[:-1]).upper()
# 校验码的正则表达式
self.code_rule = re.compile("^[A-Za-z0-9]{5}$")
# 交易用的sessionId
self._dse_sessionId = None
# 融资融券标志
self.margin_flags = False
def pre_login(self):
'''
初始化session,以及需要的headers
:return:
'''
# session
gfheader = {'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-Hans-CN, zh-Hans; q=0.5',
'Connection': 'Keep-Alive',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'X-Requested-With': 'XMLHttpRequest'}
session = requests.session()
session.headers.update(gfheader)
resq = session.get('https://trade.gf.com.cn/')
resq.raise_for_status()
logger.debug('get trade home pages sucess.')
self._expire_at = 0
self._session = session
return
@property
@retry(10, VerifyCodeError)
def vcode(self):
# 获取校验码
r = self._session.get('https://trade.gf.com.cn/yzm.jpgx')
r.raise_for_status()
# 通过内存保存图片,进行识别
img_buffer = BytesIO(r.content)
img = Image.open(img_buffer)
if hasattr(img, "width"):
width, height = img.width, img.height
else:
width, height = img.size
for x in range(width):
for y in range(height):
if img.getpixel((x, y)) < (100, 100, 100):
img.putpixel((x, y), (256, 256, 256))
gray = img.convert('L')
two = gray.point(lambda x: 0 if 68 < x < 90 else 256)
min_res = two.filter(ImageFilter.MinFilter)
med_res = min_res.filter(ImageFilter.MedianFilter)
for _ in range(1):
med_res = med_res.filter(ImageFilter.MedianFilter)
# 通过tesseract-ocr的工具进行校验码识别
vcode = pytesseract.image_to_string(med_res)
img.close()
img_buffer.close()
vcode = vcode.replace(' ', '')
if self.code_rule.findall(vcode) != []:
logger.debug('vcode is: %s' % vcode)
return vcode
else:
raise VerifyCodeError('verify code error: %s' % vcode)
@retry(10, VerifyCodeError)
def login(self):
# 无论是否登录,都重新创建一个session对象
# self.pre_login()
login_params = {
"authtype": 2,
"disknum": self.disknum,
"loginType": 2,
"origin": "web",
'mac': self.mac_address,
'username': self._account,
'password': self._password,
'tmp_yzm': self.vcode
}
resq = self._session.post(
url='https://trade.gf.com.cn/login',
params=login_params
)
resq.raise_for_status()
logger.debug('login resq: %s' % resq.json())
data = resq.json()
if data['success'] == True:
v = resq.headers
self._dse_sessionId = v['Set-Cookie'][-32:]
# 等待服务器准备就绪
time.sleep(0.1)
logger.info('Login success: %s' % self._dse_sessionId)
return
elif data['success'] == False and 'error_info' not in data.keys():
logger.warning('当前系统无法登陆')
raise TraderAPIError(data)
elif data['error_info'].find('验证码') != -1:
self.dse_sessionId = None
logger.warning('VerifyCode Error: %s' % data)
raise VerifyCodeError(data['error_info'])
else:
self.dse_sessionId = None
logger.warning('API Login Error: %s' % data)
raise TraderAPIError(data['error_info'])
def post_login(self):
if self.margin_flags == True:
margin_login_params = {
'classname': 'com.gf.etrade.control.RZRQUF2Control',
'method': 'ValidataLogin',
'dse_sessionId': self._dse_sessionId
}
r = self._session.post(
url='https://trade.gf.com.cn/entry',
params=margin_login_params)
data = r.json()
logger.debug('ensure_margin_flags: %s' % data)
trade_status = data.pop('success', False)
if trade_status == False:
logger.error(data)
error_info = data.get('error_info', data)
raise TraderAPIError(error_info)
def request(self, method, url, **kwargs):
with self:
params = kwargs.get('params', {})
params.update({'dse_sessionId': self._dse_sessionId})
kwargs['params'] = params
logger.debug('Call params: %s' % kwargs)
r = self._session.request(method=method, url=url, **kwargs)
r.raise_for_status()
logger.debug('return: %s' % r.text)
self._expire_at = time.time() + TIMEOUT
return r
def logout(self):
url = 'https://trade.gf.com.cn/entry'
params = {
'classname': 'com.gf.etrade.control.AuthenticateControl',
'method': 'logout'
}
if self._session:
self._session.get(url, params=params)
self._session = None
self._expire_at = 0
@BrokerFactory('gf', '广发证券')
class gfTrader(WebTrader):
def __init__(self, account, password, **kwargs):
super(gfTrader, self).__init__(account=account, password=password, **kwargs)
self.client = gfLoginSession(account=account, password=password)
@property
def exchange_stock_account(self):
if self._exchange_stock_account:
return self._exchange_stock_account
account_params = {
'classname': 'com.gf.etrade.control.FrameWorkControl',
'method': 'getMainJS'
}
url = 'https://trade.gf.com.cn/entry'
resq = self.client.get(url, params=account_params)
resq.raise_for_status()
jslist = resq.text.split(';')
jsholder = jslist[11]
jsholder = re.findall(r'\[(.*)\]', jsholder)
jsholder = eval(jsholder[0])
self._exchange_stock_account = dict()
for holder in jsholder:
if isinstance(holder, dict):
self._exchange_stock_account[holder['exchange_type']] = holder['stock_account']
return self._exchange_stock_account
@property
def portfolio(self):
# 异步提交持仓和余额
balance = self._trade_api(
classname='com.gf.etrade.control.StockUF2Control',
method='queryAssert'
)
position = self._trade_api(
classname='com.gf.etrade.control.StockUF2Control',
method='queryCC'
)
# 处理持仓
if position.shape[0] > 0:
position = position[
['symbol', 'symbol_name', 'current_amount', 'enable_amount', 'lasttrade', 'market_value']]
else:
position = pd.DataFrame([], columns=['order_no', 'symbol', 'symbol_name', 'trade_side', 'order_price', \
'order_amount', 'business_price', 'business_amount', 'order_status',
'order_time'])
position = position.set_index('symbol')
# 处理现金
asset_balance = balance['asset_balance'].iloc[0]
position.loc['cash', 'symbol_name'] = balance['money_type_dict'].iloc[0]
position.loc['cash', 'current_amount'] = balance['fund_balance'].iloc[0]
position.loc['cash', 'enable_amount'] = balance['enable_balance'].iloc[0]
position.loc['cash', 'lasttrade'] = 1.0
position.loc['cash', 'market_value'] = balance['fund_balance'].iloc[0]
# 计算仓位
position['weight'] = position['market_value'] / asset_balance
position['weight'] = position['weight'].round(4)
position = position.dropna(axis=0)
return position
def _trade_api(self, **kwargs):
url = 'https://trade.gf.com.cn/entry'
resq = self.client.post(url, params=kwargs)
if len(resq.text) == 0:
self.client.reset()
resq = self.client.post(url, params=kwargs)
data = resq.json()
logger.debug('_trade_api() return: %s' % data)
trade_status = data.pop('success', False)
if trade_status == False:
logger.error(data)
error_info = data.get('error_info', data)
raise TraderAPIError(error_info)
df = pd.DataFrame(data['data'])
df.rename(columns=RENAME_DICT, inplace=True)
if 'symbol' in df.columns:
df['symbol'] = df['symbol'].apply(code_to_symbols)
# 去字段的并集,提高效率
cols = list(set(FLOAT_COLUMNS).intersection(set(df.columns)))
for col in cols:
df[col] = pd.to_numeric(df[col], errors='ignore')
return df
@property
def orderlist(self):
orderlist = self._trade_api(
classname='com.gf.etrade.control.StockUF2Control',
method='queryDRWT',
action_in=0,
query_direction=0,
limit=50,
request_num=100
)
# 如果是空的orderlist,处理一下columns
if orderlist.shape[0] == 0:
orderlist = pd.DataFrame([], columns=['order_no', 'symbol', 'symbol_name', 'trade_side', 'order_price', \
'order_amount', 'business_price', 'business_amount', 'order_status',
'order_time'])
else:
orderlist['trade_side'] = orderlist['entrust_bs_dict']
orderlist['order_status'] = orderlist['entrust_status_dict']
orderlist = orderlist[['order_no', 'symbol', 'symbol_name', 'trade_side', 'order_price', \
'order_amount', 'business_price', 'business_amount', 'order_status', 'order_time']]
orderlist.dropna(axis=0, inplace=True)
orderlist.set_index('order_no', inplace=True)
return orderlist
def cancel(self, order_no):
df = self._trade_api(
entrust_no=order_no,
classname='com.gf.etrade.control.StockUF2Control',
method='cancel',
exchange_type=1,
batch_flag=0
)
return df['order_no'].iloc[0]
def buy(self, symbol, price=0, amount=0, volume=0):
symbol = symbol.lower()
if symbol[:2] not in ['sz', 'sh']:
raise ValueError('symbol(%s) is not support' % symbol)
if price == 0:
hq = self.hq(symbol)
price = hq.loc[symbol, 'ask']
if amount == 0:
if volume == 0:
raise ValueError('amount and volume both is 0' % symbol)
else:
amount = volume // price // 100 * 100
exchange_type = '1' if symbol[:2] == 'sh' else '2'
df = self._trade_api(
entrust_amount=amount,
entrust_prop=0, # 委托方式
classname='com.gf.etrade.control.StockUF2Control',
method='entrust',
entrust_bs=1,
stock_account=self.exchange_stock_account[exchange_type],
exchange_type=exchange_type,
stock_code=symbol[2:],
entrust_price=price
)
return df['order_no'].iloc[0]
def sell(self, symbol, price=0, amount=0, volume=0):
symbol = symbol.lower()
if symbol[:2] not in ['sz', 'sh']:
raise ValueError('symbol(%s) is not support' % symbol)
if price == 0:
hq = self.hq(symbol)
price = hq.loc[symbol, 'bid']
if amount == 0:
if volume == 0:
raise ValueError('amount and volume both is 0' % symbol)
else:
amount = volume // price // 100 * 100
exchange_type = '1' if symbol[:2] == 'sh' else '2'
df = self._trade_api(
entrust_amount=amount,
entrust_prop=0, # 委托方式
classname='com.gf.etrade.control.StockUF2Control',
method='entrust',
entrust_bs=2, # 1 买入, 2 卖出
stock_account=self.exchange_stock_account[exchange_type],
exchange_type=exchange_type,
stock_code=symbol[2:],
entrust_price=price
)
return df['order_no'].iloc[0]
def subscribe(self, symbol, volume):
# 转换成交易所sz或者sh开头的symbol
symbol = code_to_symbols(symbol[2:])
exchange_type = '1' if symbol[:2] == 'sh' else '2'
df = self._trade_api(
entrust_amount=volume,
classname='com.gf.etrade.control.StockUF2Control',
method='CNJJSS',
entrust_bs=1, # 1 买入, 2 卖出
stock_account=self.exchange_stock_account[exchange_type],
exchange_type=exchange_type,
stock_code=symbol[2:],
entrust_price=0,
)
return df['order_no'].iloc[0]
def redemption(self, symbol, amount):
# 转换成交易所sz或者sh开头的symbol
symbol = code_to_symbols(symbol[2:])
exchange_type = '1' if symbol[:2] == 'sh' else '2'
df = self._trade_api(
entrust_amount=amount,
classname='com.gf.etrade.control.StockUF2Control',
method='CNJJSS',
entrust_bs=2, # 1 买入, 2 卖出
stock_account=self.exchange_stock_account[exchange_type],
exchange_type=exchange_type,
stock_code=symbol[2:],
entrust_price=0,
)
return df['order_no'].iloc[0]
def merge(self, symbol, amount):
# 转换成交易所sz或者sh开头的symbol
symbol = code_to_symbols(symbol[2:])
exchange_type = '1' if symbol[:2] == 'sh' else '2'
df = self._trade_api(
classname='com.gf.etrade.control.SHLOFFundControl',
method='assetSecuprtTrade',
entrust_bs='',
entrust_amount=amount,
stock_account=self.exchange_stock_account[exchange_type],
exchange_type=exchange_type,
stock_code=symbol[2:],
entrust_prop='LFM',
entrust_price=1
)
return df['order_no'].iloc[0]
def split(self, symbol, amount):
# 转换成交易所sz或者sh开头的symbol
symbol = code_to_symbols(symbol[2:])
exchange_type = '1' if symbol[:2] == 'sh' else '2'
df = self._trade_api(
classname='com.gf.etrade.control.SHLOFFundControl',
method='doDZJYEntrust',
entrust_bs='',
entrust_amount=amount,
stock_account=self.exchange_stock_account[exchange_type],
exchange_type=exchange_type,
stock_code=symbol[2:],
entrust_prop='LFP',
entrust_price=1
)
return df['order_no'].iloc[0]
def ipo_limit(self):
df = self._trade_api(
classname='com.gf.etrade.control.StockUF2Control',
method='querySecuSubequity',
limit=50
)
if df.shape[0] == 0:
df = pd.DataFrame([], columns=['exchange_type', 'exchange_stock_account', 'amount_limits', \
'accountno', 'init_date'])
else:
df = df[['exchange_type', 'stock_account', 'enable_amount', 'client_id', 'init_date']]
rename = {
'stock_account': 'exchange_stock_account',
'enable_amount': 'amount_limits',
'client_id': 'accountno'
}
df.rename(columns=rename, inplace=True)
df.set_index('exchange_type', inplace=True)
return df
def ipo_list(self):
df = self._trade_api(
classname='com.gf.etrade.control.StockUF2Control',
method='queryNewStkcode',
request_num=50,
query_direction=1
)
if df.shape[0] == 0:
df = pd.DataFrame([], columns=['symbol', 'symbol_name', 'exchange_type', 'subscribe_type', \
'max_buy_amount', 'buy_unit', 'money_type', 'ipo_price', \
'ipo_date', 'ipo_status'])
else:
df = df[['symbol', 'symbol_name', 'exchange_type', 'stock_type_dict', \
'high_amount', 'buy_unit', 'money_type_dict', 'lasttrade', 'issue_date', 'stkcode_status_dict']]
rename = {
'stock_type_dict': 'subscribe_type',
'high_amount': 'max_buy_amount',
'money_type_dict': 'money_type',
'lasttrade': 'ipo_price',
'issue_date': 'ipo_date',
'stkcode_status_dict': 'ipo_status'
}
df.rename(columns=rename, inplace=True)
df.set_index('symbol', inplace=True)
return df
@BrokerFactory('gfmargin', '广发证券融资融券')
class gfMarginTrader(WebTrader):
def __init__(self, account, password, **kwargs):
super(gfMarginTrader, self).__init__(account=account, password=password, **kwargs)
self.client = gfLoginSession(account=account, password=password)
def _ensure_margin_flags(self):
'''确保已经登录了融资融券账户'''
if self.client.margin_flags == False:
margin_login_params = {
'classname': 'com.gf.etrade.control.RZRQUF2Control',
'method': 'ValidataLogin'
}
r = self.client.post(
url='https://trade.gf.com.cn/entry',
params=margin_login_params)
data = r.json()
logger.debug('ensure_margin_flags: %s' % data)
trade_status = data.pop('success', False)
if trade_status == False:
logger.error(data)
error_info = data.get('error_info', data)
raise TraderAPIError(error_info)
stockholders = data.get('stockholders', [])
self._exchange_stock_account = {}
for holders in stockholders:
self._exchange_stock_account[holders['exchange_type']] = holders['stock_account']
# 将session 设置为已经登录信用账户的状态
self.client.margin_flags = True
return
@property
def exchange_stock_account(self):
self._ensure_margin_flags()
return self._exchange_stock_account
def _trade_api(self, **kwargs):
# 确保已经正确登录了融资融券账号
self._ensure_margin_flags()
url = 'https://trade.gf.com.cn/entry'
resq = self.client.post(url, params=kwargs)
if len(resq.text) == 0:
self.client.reset()
resq = self.client.post(url, params=kwargs)
data = resq.json()
logger.debug('_trade_api() return: %s' % data)
trade_status = data.pop('success', False)
if trade_status == False:
logger.error(data)
error_info = data.get('error_info', data)
raise TraderAPIError(error_info)
df = pd.DataFrame(data['data'])
df.rename(columns=RENAME_DICT, inplace=True)
if 'symbol' in df.columns:
df['symbol'] = df['symbol'].apply(code_to_symbols)
# 去字段的并集,提高效率
cols = list(set(FLOAT_COLUMNS).intersection(set(df.columns)))
for col in cols:
df[col] = pd.to_numeric(df[col], errors='ignore')
return df
def test(self):
print(self._trade_api(
classname='com.gf.etrade.control.RZRQUF2Control',
method='queryCC',
request_num=500,
start=0,
limit=50
))
# print(self._trade_api(
# classname='com.gf.etrade.control.RZRQUF2Control',
# method='queryAssert'
# ))
| mit |
yandex-load/volta | volta/listeners/sync/sync.py | 1 | 5881 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import logging
from scipy import interpolate
from scipy import signal
from volta.common.interfaces import DataListener
pd.options.mode.chained_assignment = None
logger = logging.getLogger(__name__)
class SyncFinder(DataListener):
""" Calculates sync points for volta current measurements and phone system logs
Attributes:
search_interval (int): amount of seconds will be used for sync (searching for sync events)
sample_rate (int): volta box sample rate - depends on software and which type of volta box you use
"""
def __init__(self, config, core):
super(SyncFinder, self).__init__(config, core)
self.search_interval = config.get_option('sync', 'search_interval')
self.sample_rate = None
self.sync_df = pd.DataFrame()
self.volta_sync_stage_df = pd.DataFrame()
self.core.data_session.manager.subscribe(
self.put_syncs,
{
'type': 'events',
'source': 'phone'
}
)
self.core.data_session.manager.subscribe(
self.put_current,
{
'type': 'metrics',
'name': 'current',
'source': 'voltabox'
}
)
def put_syncs(self, incoming_df):
""" Append sync chunks to sync dataframe
"""
try:
gb = incoming_df.groupby('custom_metric_type')
except KeyError:
pass
else:
for name, df in gb:
if name == 'sync':
self.sync_df = self.sync_df.append(df)
def put_current(self, dtype, incoming_df):
""" Append currents dataframes until search interval won't will be filled up
"""
if len(self.volta_sync_stage_df) < (self.search_interval * self.sample_rate):
self.volta_sync_stage_df = self.volta_sync_stage_df.append(incoming_df)
def find_sync_points(self):
""" Cross correlation and calculate offsets
Returns:
dict: offsets for 'volta timestamp -> system log timestamp' and 'volta timestamp -> custom log timestamp'
"""
try:
logger.info('Starting sync...')
if len(self.sync_df) == 0:
raise ValueError('No sync events found!')
logger.debug('Sync df contents:\n %s', self.sync_df.describe())
self.__prepare_sync_df()
logger.debug('Sync df after preparation:\n %s', self.sync_df.describe())
logger.debug('Sync stage volta currents dataframe:\n %s', self.volta_sync_stage_df.describe())
if len(self.volta_sync_stage_df) < (self.search_interval * self.sample_rate):
raise ValueError('Not enough electrical currents for sync')
refsig = self.ref_signal(self.sync_df)
logger.debug('Refsignal len: %s, Refsignal contents:\n %s', len(refsig), refsig)
cc = self.cross_correlate(
self.volta_sync_stage_df['value'],
refsig,
(self.search_interval * self.sample_rate)
)
logger.debug('Cross correlation: %s', cc)
# [sample_offset] volta sample <-> first sync event
first_sync_offset_sample = np.argmax(cc)
logger.debug('[sample_offset] volta sample <-> first sync event: %s', first_sync_offset_sample)
# [uts_offset] volta uts <-> first sync event
sync_offset = self.volta_sync_stage_df.iloc[first_sync_offset_sample]['ts']
logger.debug('[uts_offset] volta uts <-> first sync event: %s', sync_offset)
return {
# [uts_offset] volta uts <-> phone system uts
'sys_uts_offset': int(
sync_offset - self.sync_df[self.sync_df.message > 0].iloc[0]['sys_uts']
),
# [uts_offset] volta uts <-> phone log uts
'log_uts_offset': int(
sync_offset - self.sync_df[self.sync_df.message > 0].iloc[0]["log_uts"]
),
'sync_sample': first_sync_offset_sample
}
except ValueError:
logger.debug('Failed to calculate sync pts', exc_info=True)
logger.warning('Failed to calculate sync pts')
return {}
def __prepare_sync_df(self):
""" Reset idx, drop excessive sync data, map sync events and make offset """
# map messages
self.sync_df.loc[:, ('message')] = self.sync_df.message.map({'rise': 1, 'fall': 0})
self.sync_df.reset_index(inplace=True)
# drop sync events after search interval - we don't need this
self.sync_df = self.sync_df[self.sync_df.sys_uts < self.sync_df.sys_uts[0] + (self.search_interval * 10 ** 6)]
# offset
self.sync_df.loc[:, ('sample_offset')] = self.sync_df['sys_uts'].map(
lambda x: (
(x - self.sync_df['sys_uts'][0])
) * self.sample_rate // 10**6
)
@staticmethod
def ref_signal(sync):
""" Generate square reference signal """
logger.info("Generating ref signal...")
if len(sync) == 0:
raise ValueError('Sync events not found.')
f = interpolate.interp1d(sync["sample_offset"], sync["message"], kind="zero")
X = np.linspace(0, sync["sample_offset"].values[-1], sync["sample_offset"].values[-1])
rs = f(X)
return rs - np.mean(rs)
@staticmethod
def cross_correlate(sig, ref, first=30000):
""" Calculate cross-correlation with lag. Take only first n lags """
logger.info("Calculating cross-correlation...")
return signal.fftconvolve(sig[:first], ref[::-1], mode="valid")
def close(self):
return
def get_info(self):
return
| mpl-2.0 |
MridulS/sympy | sympy/plotting/plot.py | 14 | 64512 | """Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
from inspect import getargspec
from itertools import chain
from collections import Callable
import warnings
from sympy import sympify, Expr, Tuple, Dummy, Symbol
from sympy.external import import_module
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
np = import_module('numpy')
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
## don't have to check for the success of importing matplotlib in each case;
## we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
self.matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(False)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
## mpl_toolkits.mplot3d is necessary for
## projection='3d'
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z, cmap=self.cm.jet,
rstride=1, cstride=1,
linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor='b', edgecolor='None' )
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", "blue"])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
else:
if all(isinstance(s, LineOver1DRangeSeries) for s in parent._series):
starts = [s.start for s in parent._series]
ends = [s.end for s in parent._series]
self.ax.set_xlim(min(starts), max(ends))
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
if self.ax.legend():
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
vector_a = x - y
vector_b = z - y
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x.name)
kwargs.setdefault('ylabel', 'f(%s)' % x.name)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
Examples
========
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set.union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set.union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
| bsd-3-clause |
adamrp/qiita | qiita_db/test/test_commands.py | 1 | 28975 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os import remove, close, mkdir
from os.path import exists, join, basename
from tempfile import mkstemp, mkdtemp
from shutil import rmtree
from unittest import TestCase, main
from future.utils.six import StringIO
from future import standard_library
from functools import partial
import pandas as pd
from qiita_db.commands import (load_study_from_cmd, load_raw_data_cmd,
load_sample_template_from_cmd,
load_prep_template_from_cmd,
load_processed_data_cmd,
load_preprocessed_data_from_cmd,
load_parameters_from_cmd,
update_preprocessed_data_from_cmd)
from qiita_db.environment_manager import patch
from qiita_db.study import Study, StudyPerson
from qiita_db.user import User
from qiita_db.data import PreprocessedData
from qiita_db.util import (get_count, check_count, get_db_files_base_dir,
get_mountpoint)
from qiita_db.metadata_template import PrepTemplate
from qiita_core.util import qiita_test_checker
from qiita_ware.processing_pipeline import generate_demux_file
with standard_library.hooks():
import configparser
@qiita_test_checker()
class TestMakeStudyFromCmd(TestCase):
def setUp(self):
StudyPerson.create('SomeDude', '[email protected]', 'some',
'111 fake street', '111-121-1313')
User.create('[email protected]', 'password')
self.config1 = CONFIG_1
self.config2 = CONFIG_2
def test_make_study_from_cmd(self):
fh = StringIO(self.config1)
load_study_from_cmd('[email protected]', 'newstudy', fh)
sql = ("select study_id from qiita.study where email = %s and "
"study_title = %s")
study_id = self.conn_handler.execute_fetchone(sql, ('[email protected]',
'newstudy'))
self.assertTrue(study_id is not None)
fh2 = StringIO(self.config2)
with self.assertRaises(configparser.NoOptionError):
load_study_from_cmd('[email protected]', 'newstudy2', fh2)
@qiita_test_checker()
class TestImportPreprocessedData(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
fd, self.file1 = mkstemp(dir=self.tmpdir)
close(fd)
fd, self.file2 = mkstemp(dir=self.tmpdir)
close(fd)
with open(self.file1, "w") as f:
f.write("\n")
with open(self.file2, "w") as f:
f.write("\n")
self.files_to_remove = [self.file1, self.file2]
self.dirs_to_remove = [self.tmpdir]
self.db_test_ppd_dir = join(get_db_files_base_dir(),
'preprocessed_data')
def tearDown(self):
for fp in self.files_to_remove:
if exists(fp):
remove(fp)
for dp in self.dirs_to_remove:
if exists(dp):
rmtree(dp)
def test_import_preprocessed_data(self):
initial_ppd_count = get_count('qiita.preprocessed_data')
initial_fp_count = get_count('qiita.filepath')
ppd = load_preprocessed_data_from_cmd(
1, 'preprocessed_sequence_illumina_params',
self.tmpdir, 'preprocessed_fasta', 1, False, 1, None)
self.files_to_remove.append(
join(self.db_test_ppd_dir,
'%d_%s' % (ppd.id, basename(self.file1))))
self.files_to_remove.append(
join(self.db_test_ppd_dir,
'%d_%s' % (ppd.id, basename(self.file2))))
self.assertEqual(ppd.id, 3)
self.assertTrue(check_count('qiita.preprocessed_data',
initial_ppd_count + 1))
self.assertTrue(check_count('qiita.filepath', initial_fp_count+2))
def test_import_preprocessed_data_data_type(self):
initial_ppd_count = get_count('qiita.preprocessed_data')
initial_fp_count = get_count('qiita.filepath')
ppd = load_preprocessed_data_from_cmd(
1, 'preprocessed_sequence_illumina_params',
self.tmpdir, 'preprocessed_fasta', 1, False, None, '16S')
self.files_to_remove.append(
join(self.db_test_ppd_dir,
'%d_%s' % (ppd.id, basename(self.file1))))
self.files_to_remove.append(
join(self.db_test_ppd_dir,
'%d_%s' % (ppd.id, basename(self.file2))))
self.assertEqual(ppd.id, 3)
self.assertTrue(check_count('qiita.preprocessed_data',
initial_ppd_count + 1))
self.assertTrue(check_count('qiita.filepath', initial_fp_count+2))
@qiita_test_checker()
class TestLoadSampleTemplateFromCmd(TestCase):
def setUp(self):
# Create a sample template file
self.st_contents = SAMPLE_TEMPLATE
# create a new study to attach the sample template
info = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"number_samples_collected": 4,
"number_samples_promised": 4,
"study_alias": "TestStudy",
"study_description": "Description of a test study",
"study_abstract": "No abstract right now...",
"emp_person_id": StudyPerson(2),
"principal_investigator_id": StudyPerson(3),
"lab_person_id": StudyPerson(1)
}
self.study = Study.create(User('[email protected]'),
"Test study", [1], info)
def test_load_sample_template_from_cmd(self):
"""Correctly adds a sample template to the DB"""
fh = StringIO(self.st_contents)
st = load_sample_template_from_cmd(fh, self.study.id)
self.assertEqual(st.id, self.study.id)
@qiita_test_checker()
class TestLoadPrepTemplateFromCmd(TestCase):
def setUp(self):
self.pt_contents = PREP_TEMPLATE
def test_load_prep_template_from_cmd(self):
"""Correctly adds a prep template to the DB"""
fh = StringIO(self.pt_contents)
st = load_prep_template_from_cmd(fh, 1, '18S')
self.assertEqual(st.id, 2)
@qiita_test_checker()
class TestLoadRawDataFromCmd(TestCase):
def setUp(self):
fd, self.forward_fp = mkstemp(suffix='_forward.fastq.gz')
close(fd)
fd, self.reverse_fp = mkstemp(suffix='_reverse.fastq.gz')
close(fd)
fd, self.barcodes_fp = mkstemp(suffix='_barcodes.fastq.gz')
close(fd)
with open(self.forward_fp, "w") as f:
f.write("\n")
with open(self.reverse_fp, "w") as f:
f.write("\n")
with open(self.barcodes_fp, "w") as f:
f.write("\n")
self.files_to_remove = []
self.files_to_remove.append(self.forward_fp)
self.files_to_remove.append(self.reverse_fp)
self.files_to_remove.append(self.barcodes_fp)
self.db_test_raw_dir = join(get_db_files_base_dir(), 'raw_data')
def tearDown(self):
for fp in self.files_to_remove:
if exists(fp):
remove(fp)
def test_load_data_from_cmd(self):
filepaths = [self.forward_fp, self.reverse_fp, self.barcodes_fp]
filepath_types = ['raw_forward_seqs', 'raw_reverse_seqs',
'raw_barcodes']
filetype = 'FASTQ'
metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'ILLUMINA',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}}
metadata = pd.DataFrame.from_dict(metadata_dict, orient='index')
pt1 = PrepTemplate.create(metadata, Study(1), "16S")
prep_templates = [pt1.id]
initial_raw_count = get_count('qiita.raw_data')
initial_fp_count = get_count('qiita.filepath')
initial_raw_fp_count = get_count('qiita.raw_filepath')
new = load_raw_data_cmd(filepaths, filepath_types, filetype,
prep_templates)
raw_data_id = new.id
self.files_to_remove.append(
join(self.db_test_raw_dir,
'%d_%s' % (raw_data_id, basename(self.forward_fp))))
self.files_to_remove.append(
join(self.db_test_raw_dir,
'%d_%s' % (raw_data_id, basename(self.reverse_fp))))
self.files_to_remove.append(
join(self.db_test_raw_dir,
'%d_%s' % (raw_data_id, basename(self.barcodes_fp))))
self.assertTrue(check_count('qiita.raw_data', initial_raw_count + 1))
self.assertTrue(check_count('qiita.filepath',
initial_fp_count + 3))
self.assertTrue(check_count('qiita.raw_filepath',
initial_raw_fp_count + 3))
# Ensure that the ValueError is raised when a filepath_type is not
# provided for each and every filepath
with self.assertRaises(ValueError):
load_raw_data_cmd(filepaths, filepath_types[:-1], filetype,
prep_templates)
@qiita_test_checker()
class TestLoadProcessedDataFromCmd(TestCase):
def setUp(self):
fd, self.otu_table_fp = mkstemp(suffix='_otu_table.biom')
close(fd)
fd, self.otu_table_2_fp = mkstemp(suffix='_otu_table2.biom')
close(fd)
with open(self.otu_table_fp, "w") as f:
f.write("\n")
with open(self.otu_table_2_fp, "w") as f:
f.write("\n")
self.files_to_remove = []
self.files_to_remove.append(self.otu_table_fp)
self.files_to_remove.append(self.otu_table_2_fp)
self.db_test_processed_data_dir = join(get_db_files_base_dir(),
'processed_data')
def tearDown(self):
for fp in self.files_to_remove:
if exists(fp):
remove(fp)
def test_load_processed_data_from_cmd(self):
filepaths = [self.otu_table_fp, self.otu_table_2_fp]
filepath_types = ['biom', 'biom']
initial_processed_data_count = get_count('qiita.processed_data')
initial_processed_fp_count = get_count('qiita.processed_filepath')
initial_fp_count = get_count('qiita.filepath')
new = load_processed_data_cmd(filepaths, filepath_types,
'processed_params_uclust', 1, 1, None)
processed_data_id = new.id
self.files_to_remove.append(
join(self.db_test_processed_data_dir,
'%d_%s' % (processed_data_id, basename(self.otu_table_fp))))
self.files_to_remove.append(
join(self.db_test_processed_data_dir,
'%d_%s' % (processed_data_id,
basename(self.otu_table_2_fp))))
self.assertTrue(check_count('qiita.processed_data',
initial_processed_data_count + 1))
self.assertTrue(check_count('qiita.processed_filepath',
initial_processed_fp_count + 2))
self.assertTrue(check_count('qiita.filepath',
initial_fp_count + 2))
# Ensure that the ValueError is raised when a filepath_type is not
# provided for each and every filepath
with self.assertRaises(ValueError):
load_processed_data_cmd(filepaths, filepath_types[:-1],
'processed_params_uclust', 1, 1, None)
@qiita_test_checker()
class TestLoadParametersFromCmd(TestCase):
def setUp(self):
fd, self.fp = mkstemp(suffix='_params.txt')
close(fd)
fd, self.fp_wrong = mkstemp(suffix='_params.txt')
close(fd)
with open(self.fp, 'w') as f:
f.write(PARAMETERS)
with open(self.fp_wrong, 'w') as f:
f.write(PARAMETERS_ERROR)
self.files_to_remove = [self.fp, self.fp_wrong]
def tearDown(self):
for fp in self.files_to_remove:
if exists(fp):
remove(fp)
def test_load_parameters_from_cmd_error(self):
with self.assertRaises(ValueError):
load_parameters_from_cmd("test", self.fp, "does_not_exist")
def test_load_parameters_from_cmd_error_format(self):
with self.assertRaises(ValueError):
load_parameters_from_cmd("test", self.fp_wrong,
"preprocessed_sequence_illumina_params")
def test_load_parameters_from_cmd(self):
new = load_parameters_from_cmd(
"test", self.fp, "preprocessed_sequence_illumina_params")
obs = new.to_str()
exp = ("--barcode_type hamming_8 --max_bad_run_length 3 "
"--max_barcode_errors 1.5 --min_per_read_length_fraction 0.75 "
"--phred_quality_threshold 3 --sequence_max_n 0")
self.assertEqual(obs, exp)
@qiita_test_checker()
class TestPatch(TestCase):
def setUp(self):
self.patches_dir = mkdtemp()
self.py_patches_dir = join(self.patches_dir, 'python_patches')
mkdir(self.py_patches_dir)
patch2_fp = join(self.patches_dir, '2.sql')
patch10_fp = join(self.patches_dir, '10.sql')
with open(patch2_fp, 'w') as f:
f.write("CREATE TABLE qiita.patchtest2 (testing integer);\n")
f.write("INSERT INTO qiita.patchtest2 VALUES (1);\n")
f.write("INSERT INTO qiita.patchtest2 VALUES (9);\n")
with open(patch10_fp, 'w') as f:
f.write("CREATE TABLE qiita.patchtest10 (testing integer);\n")
def tearDown(self):
rmtree(self.patches_dir)
def _check_patchtest2(self, exists=True):
if exists:
assertion_fn = self.assertTrue
else:
assertion_fn = self.assertFalse
obs = self.conn_handler.execute_fetchone(
"""SELECT EXISTS(SELECT * FROM information_schema.tables
WHERE table_name = 'patchtest2')""")[0]
assertion_fn(obs)
if exists:
exp = [[1], [9]]
obs = self.conn_handler.execute_fetchall(
"""SELECT * FROM qiita.patchtest2 ORDER BY testing""")
self.assertEqual(obs, exp)
def _check_patchtest10(self):
obs = self.conn_handler.execute_fetchone(
"""SELECT EXISTS(SELECT * FROM information_schema.tables
WHERE table_name = 'patchtest10')""")[0]
self.assertTrue(obs)
exp = []
obs = self.conn_handler.execute_fetchall(
"""SELECT * FROM qiita.patchtest10""")
self.assertEqual(obs, exp)
def _assert_current_patch(self, patch_to_check):
current_patch = self.conn_handler.execute_fetchone(
"""SELECT current_patch FROM settings""")[0]
self.assertEqual(current_patch, patch_to_check)
def test_unpatched(self):
"""Test patching from unpatched state"""
# Reset the settings table to the unpatched state
self.conn_handler.execute(
"""UPDATE settings SET current_patch = 'unpatched'""")
self._assert_current_patch('unpatched')
patch(self.patches_dir)
self._check_patchtest2()
self._check_patchtest10()
self._assert_current_patch('10.sql')
def test_skip_patch(self):
"""Test patching from a patched state"""
self.conn_handler.execute(
"""UPDATE settings SET current_patch = '2.sql'""")
self._assert_current_patch('2.sql')
# If it tried to apply patch 2.sql again, this will error
patch(self.patches_dir)
self._assert_current_patch('10.sql')
self._check_patchtest10()
# Since we "tricked" the system, patchtest2 should not exist
self._check_patchtest2(exists=False)
def test_nonexistent_patch(self):
"""Test case where current patch does not exist"""
self.conn_handler.execute(
"""UPDATE settings SET current_patch = 'nope.sql'""")
self._assert_current_patch('nope.sql')
with self.assertRaises(RuntimeError):
patch(self.patches_dir)
def test_python_patch(self):
# Write a test python patch
patch10_py_fp = join(self.py_patches_dir, '10.py')
with open(patch10_py_fp, 'w') as f:
f.write(PY_PATCH)
# Reset the settings table to the unpatched state
self.conn_handler.execute(
"""UPDATE settings SET current_patch = 'unpatched'""")
self._assert_current_patch('unpatched')
patch(self.patches_dir)
obs = self.conn_handler.execute_fetchall(
"""SELECT testing FROM qiita.patchtest10""")
exp = [[1], [100]]
self.assertEqual(obs, exp)
self._assert_current_patch('10.sql')
@qiita_test_checker()
class TestUpdatePreprocessedDataFromCmd(TestCase):
def setUp(self):
# Create a directory with the test split libraries output
self.test_slo = mkdtemp(prefix='test_slo_')
path_builder = partial(join, self.test_slo)
fna_fp = path_builder('seqs.fna')
fastq_fp = path_builder('seqs.fastq')
log_fp = path_builder('split_library_log.txt')
demux_fp = path_builder('seqs.demux')
with open(fna_fp, 'w') as f:
f.write(FASTA_SEQS)
with open(fastq_fp, 'w') as f:
f.write(FASTQ_SEQS)
with open(log_fp, 'w') as f:
f.write("Test log\n")
generate_demux_file(self.test_slo)
self._filepaths_to_remove = [fna_fp, fastq_fp, demux_fp, log_fp]
self._dirpaths_to_remove = [self.test_slo]
# Generate a directory with test split libraries output missing files
self.missing_slo = mkdtemp(prefix='test_missing_')
path_builder = partial(join, self.test_slo)
fna_fp = path_builder('seqs.fna')
fastq_fp = path_builder('seqs.fastq')
with open(fna_fp, 'w') as f:
f.write(FASTA_SEQS)
with open(fastq_fp, 'w') as f:
f.write(FASTQ_SEQS)
self._filepaths_to_remove.append(fna_fp)
self._filepaths_to_remove.append(fastq_fp)
self._dirpaths_to_remove.append(self.missing_slo)
# Create a study with no preprocessed data
info = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"number_samples_collected": 25,
"number_samples_promised": 28,
"study_alias": "FCM",
"study_description": "Microbiome of people who eat nothing but "
"fried chicken",
"study_abstract": "Exploring how a high fat diet changes the "
"gut microbiome",
"emp_person_id": StudyPerson(2),
"principal_investigator_id": StudyPerson(3),
"lab_person_id": StudyPerson(1)
}
self.no_ppd_study = Study.create(
User('[email protected]'), "Test study", [1], info)
# Get the directory where the preprocessed data is usually copied.
_, self.db_ppd_dir = get_mountpoint('preprocessed_data')[0]
def tearDown(self):
for fp in self._filepaths_to_remove:
if exists(fp):
remove(fp)
for dp in self._dirpaths_to_remove:
if exists(fp):
rmtree(dp)
def test_update_preprocessed_data_from_cmd_error_no_ppd(self):
with self.assertRaises(ValueError):
update_preprocessed_data_from_cmd(self.test_slo,
self.no_ppd_study.id)
def test_update_preprocessed_data_from_cmd_error_missing_files(self):
with self.assertRaises(IOError):
update_preprocessed_data_from_cmd(self.missing_slo, 1)
def test_update_preprocessed_data_from_cmd_error_wrong_ppd(self):
with self.assertRaises(ValueError):
update_preprocessed_data_from_cmd(self.test_slo, 1, 100)
def test_update_preprocessed_data_from_cmd(self):
exp_ppd = PreprocessedData(Study(1).preprocessed_data()[0])
exp_fps = exp_ppd.get_filepaths()
# The original paths mush exist, but they're not included in the test
# so create them here
for _, fp, _ in exp_fps:
with open(fp, 'w') as f:
f.write("")
next_fp_id = get_count('qiita.filepath') + 1
exp_fps.append(
(next_fp_id,
join(self.db_ppd_dir, "%s_split_library_log.txt" % exp_ppd.id),
'log'))
ppd = update_preprocessed_data_from_cmd(self.test_slo, 1)
# Check that the modified preprocessed data is the correct one
self.assertEqual(ppd.id, exp_ppd.id)
# Check that the filepaths returned are correct
# We need to sort the list returned from the db because the ordering
# on that list is based on db modification time, rather than id
obs_fps = sorted(ppd.get_filepaths())
self.assertEqual(obs_fps, sorted(exp_fps))
# Check that the checksums have been updated
sql = "SELECT checksum FROM qiita.filepath WHERE filepath_id=%s"
# Checksum of the fasta file
obs_checksum = self.conn_handler.execute_fetchone(
sql, (obs_fps[0][0],))[0]
self.assertEqual(obs_checksum, '3532748626')
# Checksum of the fastq file
obs_checksum = self.conn_handler.execute_fetchone(
sql, (obs_fps[1][0],))[0]
self.assertEqual(obs_checksum, '2958832064')
# Checksum of the demux file
# The checksum is generated dynamically, so the checksum changes
# We are going to test that the checksum is not the one that was
# before, which corresponds to an empty file
obs_checksum = self.conn_handler.execute_fetchone(
sql, (obs_fps[2][0],))[0]
self.assertTrue(isinstance(obs_checksum, str))
self.assertNotEqual(obs_checksum, '852952723')
self.assertTrue(len(obs_checksum) > 0)
# Checksum of the log file
obs_checksum = self.conn_handler.execute_fetchone(
sql, (obs_fps[3][0],))[0]
self.assertEqual(obs_checksum, '626839734')
def test_update_preprocessed_data_from_cmd_ppd(self):
exp_ppd = PreprocessedData(2)
next_fp_id = get_count('qiita.filepath') + 1
exp_fps = []
path_builder = partial(join, self.db_ppd_dir)
suffix_types = [("seqs.fna", "preprocessed_fasta"),
("seqs.fastq", "preprocessed_fastq"),
("seqs.demux", "preprocessed_demux"),
("split_library_log.txt", "log")]
for id_, vals in enumerate(suffix_types, start=next_fp_id):
suffix, fp_type = vals
exp_fps.append(
(id_, path_builder("%s_%s" % (exp_ppd.id, suffix)), fp_type))
ppd = update_preprocessed_data_from_cmd(self.test_slo, 1, 2)
# Check that the modified preprocessed data is the correct one
self.assertEqual(ppd.id, exp_ppd.id)
# Check that the filepaths returned are correct
# We need to sort the list returned from the db because the ordering
# on that list is based on db modification time, rather than id
obs_fps = sorted(ppd.get_filepaths())
self.assertEqual(obs_fps, exp_fps)
# Check that the checksums have been updated
sql = "SELECT checksum FROM qiita.filepath WHERE filepath_id=%s"
# Checksum of the fasta file
obs_checksum = self.conn_handler.execute_fetchone(
sql, (obs_fps[0][0],))[0]
self.assertEqual(obs_checksum, '3532748626')
# Checksum of the fastq file
obs_checksum = self.conn_handler.execute_fetchone(
sql, (obs_fps[1][0],))[0]
self.assertEqual(obs_checksum, '2958832064')
# Checksum of the demux file
# The checksum is generated dynamically, so the checksum changes
# We are going to test that the checksum is not the one that was
# before, which corresponds to an empty file
obs_checksum = self.conn_handler.execute_fetchone(
sql, (obs_fps[2][0],))[0]
self.assertTrue(isinstance(obs_checksum, str))
self.assertNotEqual(obs_checksum, '852952723')
self.assertTrue(len(obs_checksum) > 0)
# Checksum of the log file
obs_checksum = self.conn_handler.execute_fetchone(
sql, (obs_fps[3][0],))[0]
self.assertEqual(obs_checksum, '626839734')
FASTA_SEQS = """>a_1 orig_bc=abc new_bc=abc bc_diffs=0
xyz
>b_1 orig_bc=abw new_bc=wbc bc_diffs=4
qwe
>b_2 orig_bc=abw new_bc=wbc bc_diffs=4
qwe
"""
FASTQ_SEQS = """@a_1 orig_bc=abc new_bc=abc bc_diffs=0
xyz
+
ABC
@b_1 orig_bc=abw new_bc=wbc bc_diffs=4
qwe
+
DFG
@b_2 orig_bc=abw new_bc=wbc bc_diffs=4
qwe
+
DEF
"""
CONFIG_1 = """[required]
timeseries_type_id = 1
metadata_complete = True
mixs_compliant = True
principal_investigator = SomeDude, [email protected], some
reprocess = False
study_alias = 'test study'
study_description = 'test study description'
study_abstract = 'study abstract'
efo_ids = 1,2,3,4
[optional]
number_samples_collected = 50
number_samples_promised = 25
lab_person = SomeDude, [email protected], some
funding = 'funding source'
vamps_id = vamps_id
"""
CONFIG_2 = """[required]
timeseries_type_id = 1
metadata_complete = True
principal_investigator = SomeDude, [email protected], some
reprocess = False
study_alias = 'test study'
study_description = 'test study description'
study_abstract = 'study abstract'
efo_ids = 1,2,3,4
[optional]
number_samples_collected = 50
number_samples_promised = 25
lab_person = SomeDude, [email protected], some
funding = 'funding source'
vamps_id = vamps_id
"""
SAMPLE_TEMPLATE = (
"sample_name\trequired_sample_info_status\tcollection_timestamp\t"
"sample_type\tphysical_specimen_remaining\tphysical_specimen_location\t"
"dna_extracted\thost_subject_id\tTreatment\tDOB\tlatitude\tlongitude"
"\ttaxon_id\tscientific_name\tDescription\n"
"PC.354\treceived\t2014-06-18 16:44\ttype_1\tTrue\tLocation_1\tTrue\t"
"HS_ID_PC.354\tControl\t20061218\t1.88401499993\t56.0003871552\t"
"9606\thomo sapiens\tControl_mouse_I.D._354\n"
"PC.593\treceived\t2014-06-18 16:44\ttype_1\tTrue\tLocation_1\tTrue\t"
"HS_ID_PC.593\tControl\t20071210\t35.4079458313\t83.2595338611\t"
"9606\thomo sapiens\tControl_mouse_I.D._593\n"
"PC.607\treceived\t2014-06-18 16:44\ttype_1\tTrue\tLocation_1\tTrue\t"
"HS_ID_PC.607\tFast\t20071112\t18.3175615444\t91.3713989729\t"
"9606\thomo sapiens\tFasting_mouse_I.D._607\n"
"PC.636\treceived\t2014-06-18 16:44\ttype_1\tTrue\tLocation_1\tTrue\t"
"HS_ID_PC.636\tFast\t20080116\t31.0856060708\t4.16781143893\t"
"9606\thomo sapiens\tFasting_mouse_I.D._636")
PREP_TEMPLATE = (
'sample_name\tbarcode\tcenter_name\tcenter_project_name\t'
'description\tebi_submission_accession\temp_status\tprimer\t'
'run_prefix\tstr_column\tplatform\tlibrary_construction_protocol\t'
'experiment_design_description\n'
'SKB7.640196\tCCTCTGAGAGCT\tANL\tTest Project\tskb7\tNone\tEMP\t'
'GTGCCAGCMGCCGCGGTAA\tts_G1_L001_sequences\tValue for sample 3\tA\tB\tC\n'
'SKB8.640193\tGTCCGCAAGTTA\tANL\tTest Project\tskb8\tNone\tEMP\t'
'GTGCCAGCMGCCGCGGTAA\tts_G1_L001_sequences\tValue for sample 1\tA\tB\tC\n'
'SKD8.640184\tCGTAGAGCTCTC\tANL\tTest Project\tskd8\tNone\tEMP\t'
'GTGCCAGCMGCCGCGGTAA\tts_G1_L001_sequences\tValue for sample 2\tA\tB\tC\n')
PY_PATCH = """
from qiita_db.study import Study
from qiita_db.sql_connection import TRN
study = Study(1)
with TRN:
sql = "INSERT INTO qiita.patchtest10 (testing) VALUES (%s)"
TRN.add(sql, [[study.id], [study.id*100]], many=True)
TRN.execute()
"""
PARAMETERS = """max_bad_run_length\t3
min_per_read_length_fraction\t0.75
sequence_max_n\t0
rev_comp_barcode\tFalse
rev_comp_mapping_barcodes\tFalse
rev_comp\tFalse
phred_quality_threshold\t3
barcode_type\thamming_8
max_barcode_errors\t1.5
"""
PARAMETERS_ERROR = """max_bad_run_length\t3\tmin_per_read_length_fraction\t0.75
sequence_max_n\t0
rev_comp_barcode\tFalse
rev_comp_mapping_barcodes\tFalse
rev_comp\tFalse
phred_quality_threshold\t3
barcode_type\thamming_8
max_barcode_errors\t1.5
"""
if __name__ == "__main__":
main()
| bsd-3-clause |
loli/sklearn-ensembletrees | sklearn/svm/setup.py | 2 | 3246 | import os
from os.path import join
import numpy
import warnings
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/backends/tkagg.py | 8 | 1297 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import tkinter as Tk
import numpy as np
from matplotlib.backends import _tkagg
def blit(photoimage, aggimage, bbox=None, colormode=1):
tk = photoimage.tk
if bbox is not None:
bbox_array = bbox.__array__()
else:
bbox_array = None
data = np.asarray(aggimage)
try:
tk.call(
"PyAggImagePhoto", photoimage,
id(data), colormode, id(bbox_array))
except Tk.TclError:
try:
try:
_tkagg.tkinit(tk.interpaddr(), 1)
except AttributeError:
_tkagg.tkinit(id(tk), 0)
tk.call("PyAggImagePhoto", photoimage,
id(data), colormode, id(bbox_array))
except (ImportError, AttributeError, Tk.TclError):
raise
def test(aggimage):
import time
r = Tk.Tk()
c = Tk.Canvas(r, width=aggimage.width, height=aggimage.height)
c.pack()
p = Tk.PhotoImage(width=aggimage.width, height=aggimage.height)
blit(p, aggimage)
c.create_image(aggimage.width,aggimage.height,image=p)
blit(p, aggimage)
while 1: r.update_idletasks()
| mit |
jrbourbeau/cr-composition | comptools/io.py | 1 | 14909 |
from __future__ import print_function, division
import os
import glob
import numpy as np
import pandas as pd
from dask.diagnostics import ProgressBar
import dask.dataframe as dd
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
from .base import get_config_paths
from .simfunctions import get_sim_configs
from .datafunctions import get_data_configs
def validate_dataframe(df):
if not isinstance(df, pd.DataFrame):
raise TypeError('Expecting a DataFrame, but got {}'.format(type(df)))
def validate_datatype(datatype):
assert datatype in ['sim', 'data'], 'datatype must be either \'sim\' or \'data\''
def apply_quality_cuts(df, datatype='sim', return_cut_dict=False, verbose=True):
validate_dataframe(df)
validate_datatype(datatype)
# Quality Cuts #
# Adapted from PHYSICAL REVIEW D 88, 042004 (2013)
cut_dict = {}
# IT specific cuts
cut_dict['passed_IceTopQualityCuts'] = df['passed_IceTopQualityCuts'].astype(bool)
cut_dict['NStations'] = df['NStations'] >= 5
# InIce specific cuts
cut_dict['eloss_positive'] = df['eloss_1500_standard'] > 0
cut_dict['passed_InIceQualityCuts'] = df['passed_InIceQualityCuts'].astype(bool) & cut_dict['eloss_positive']
for i in ['1_60']:
cut_dict['NChannels_' + i] = df['NChannels_' + i] >= 8
cut_dict['max_qfrac_' + i] = df['max_qfrac_' + i] < 0.3
cut_dict['FractionContainment_Laputop_InIce'] = df['FractionContainment_Laputop_InIce'] < 1.0
for i in ['1_60']:
cut_dict['num_hits_'+i] = cut_dict['NChannels_'+i] & cut_dict['NStations']
if return_cut_dict:
print('Returning without applying quality cuts')
return df, cut_dict
else:
selection_mask = np.ones(len(df), dtype=bool)
standard_cut_keys = ['passed_IceTopQualityCuts',
'passed_InIceQualityCuts',
'FractionContainment_Laputop_InIce',
'num_hits_1_60',
]
for key in standard_cut_keys:
selection_mask *= cut_dict[key]
# Print cut event flow
if verbose:
n_total = len(df)
print('Starting out with {} {} events'.format(n_total, datatype))
cumulative_cut_mask = np.array([True] * n_total)
print('{} quality cut event flow:'.format(datatype))
for key in standard_cut_keys:
cumulative_cut_mask *= cut_dict[key]
print('{:>30}: {:>5.3} {:>5.3}'.format(key, np.sum(
cut_dict[key]) / n_total, np.sum(cumulative_cut_mask) / n_total))
print('\n')
# df_cut = df[selection_mask]
df_cut = df.loc[selection_mask, :].reset_index(drop=True)
return df_cut
def add_convenience_variables(df, datatype='sim'):
validate_dataframe(df)
if datatype == 'sim':
df['MC_log_energy'] = np.nan_to_num(np.log10(df['MC_energy']))
# Add log-scale columns to df
df['lap_log_energy'] = np.nan_to_num(np.log10(df['lap_energy']))
# df['InIce_log_charge_1_60'] = np.nan_to_num(np.log10(df['InIce_charge_1_60']))
for i in ['1_60']:
# df['InIce_log_charge_'+i] = np.nan_to_num(np.log10(df['InIce_charge_'+i]))
df['InIce_log_charge_'+i] = np.log10(df['InIce_charge_'+i])
df['log_NChannels_'+i] = np.log10(df['NChannels_'+i])
df['log_NHits_'+i] = np.log10(df['NHits_'+i])
df['lap_cos_zenith'] = np.cos(df['lap_zenith'])
for dist in ['50', '80', '125', '180', '250', '500']:
df['log_s'+dist] = np.log10(df['lap_s'+dist])
df['log_dEdX'] = np.log10(df['eloss_1500_standard'])
df['log_d4r_peak_energy'] = np.log10(df['d4r_peak_energy'])
df['log_d4r_peak_sigma'] = np.log10(df['d4r_peak_sigma'])
# df['log_IceTop_charge'] = np.log10(df['IceTop_charge'])
# df['log_IceTop_charge_175m'] = np.log10(df['IceTop_charge_175m'])
# df['IT_charge_ratio'] = df['IceTop_charge_175m']/df['IceTop_charge']
# df['charge_ratio'] = df['InIce_charge_1_60']/df['IceTop_charge']
# Add ratio of features (could help improve RF classification)
# df['charge_nchannels_ratio'] = df['InIce_charge_1_30'] / df['NChannels_1_30']
# df['charge_nhits_ratio'] = df['InIce_charge_1_30'] / df['NHits_1_30']
# df['nhits_nchannels_ratio'] = df['NHits_1_30'] / df['NChannels_1_30']
# df['stationdensity_charge_ratio'] = df[
# 'StationDensity'] / df['InIce_charge_1_30']
# df['stationdensity_nchannels_ratio'] = df[
# 'StationDensity'] / df['NChannels_1_30']
# df['stationdensity_nhits_ratio'] = df['StationDensity'] / df['NHits_1_30']
return df
def _load_basic_dataframe(df_file=None, datatype='sim', config='IC86.2012',
energy_reco=True, energy_cut_key='reco_log_energy',
log_energy_min=None, log_energy_max=None,
columns=None, n_jobs=1, verbose=False,
compute=True):
validate_datatype(datatype)
if df_file is not None:
files = df_file
else:
paths = get_config_paths()
file_pattern = os.path.join(paths.comp_data_dir,
config,
datatype,
'processed_hdf',
'nominal' if datatype == 'sim' else '',
'*.hdf')
files = sorted(glob.glob(file_pattern))
ddf = dd.read_hdf(files,
key='dataframe',
mode='r',
columns=columns,
chunksize=10000)
# Energy reconstruction
if energy_reco:
model_dict = load_trained_model('linearregression_energy_{}'.format(config),
return_metadata=True)
pipeline = model_dict['pipeline']
feature_list = list(model_dict['training_features'])
def add_reco_energy(partition):
partition['reco_log_energy'] = pipeline.predict(partition[feature_list])
partition['reco_energy'] = 10**partition['reco_log_energy']
return partition
ddf = ddf.map_partitions(add_reco_energy)
# Energy range cut
if log_energy_min is not None and log_energy_max is not None:
def apply_energy_cut(partition):
energy_mask = (partition[energy_cut_key] > log_energy_min) & (partition[energy_cut_key] < log_energy_max)
return partition.loc[energy_mask, :]
ddf = ddf.map_partitions(apply_energy_cut)
if compute:
if verbose:
pbar = ProgressBar()
pbar.register()
scheduler = 'processes' if n_jobs > 1 else 'synchronous'
df = ddf.compute(scheduler=scheduler, num_workers=n_jobs)
df = df.reset_index(drop=True)
else:
df = ddf
return df
_load_parameters_docstring = """Parameters
----------
df_file : path, optional
If specified, the given path to a pandas.DataFrame will be loaded
(default is None, so the file path will be determined from the
datatype and config).
config : str, optional
Detector configuration (default is 'IC86.2012').
test_size : int, float, optional
Fraction or number of events to be split off into a seperate testing
set (default is 0.3). test_size will be passed to
sklearn.model_selection.ShuffleSplit.
energy_reco : bool, optional
Option to perform energy reconstruction for each event
(default is True).
energy_cut_key : str, optional
Energy key to apply energy range cuts to (default is 'lap_log_energy').
log_energy_min : int, float, optional
Option to set a lower limit on the reconstructed log energy in GeV
(default is 6.0).
log_energy_max : int, float, optional
Option to set a upper limit on the reconstructed log energy in GeV
(default is 8.0).
columns : array_like, optional
Option to specify the columns that should be in the returned
DataFrame(s) (default is None, all columns are returned).
n_jobs : int, optional
Number of chunks to load in parallel (default is 1).
verbose : bool, optional
Option for verbose progress bar output (default is True)."""
def load_sim(df_file=None, config='IC86.2012', test_size=0.5,
energy_reco=True, energy_cut_key='reco_log_energy',
log_energy_min=6.0, log_energy_max=8.0, columns=None, n_jobs=1,
verbose=False, compute=True):
if config not in get_sim_configs():
raise ValueError('config must be in {}'.format(get_sim_configs()))
if not isinstance(test_size, (int, float)):
raise TypeError('test_size must be a floating-point number')
df = _load_basic_dataframe(df_file=df_file,
datatype='sim',
config=config,
energy_reco=energy_reco,
energy_cut_key=energy_cut_key,
columns=columns,
log_energy_min=log_energy_min,
log_energy_max=log_energy_max,
n_jobs=n_jobs,
verbose=verbose,
compute=compute)
# If specified, split into training and testing DataFrames
if test_size > 0:
output = train_test_split(df, test_size=test_size, shuffle=True,
random_state=2)
else:
output = df
return output
load_sim.__doc__ = """ Function to load processed simulation DataFrame
{_load_parameters_docstring}
Returns
-------
pandas.DataFrame, tuple of pandas.DataFrame
Return a single DataFrame if test_size is 0, otherwise return
a 2-tuple of training and testing DataFrame.
""".format(_load_parameters_docstring=_load_parameters_docstring)
def load_data(df_file=None, config='IC86.2012', energy_reco=True,
energy_cut_key='reco_log_energy', log_energy_min=6.0,
log_energy_max=8.0, columns=None, n_jobs=1, verbose=False,
compute=True, processed=True):
if config not in get_data_configs():
raise ValueError('config must be in {}'.format(get_data_configs()))
if processed:
# Load processed dataset with quality cuts already applied
paths = get_config_paths()
data_file = os.path.join(paths.comp_data_dir,
config,
'data',
'data_dataframe_quality_cuts.hdf'
)
ddf = dd.read_hdf(data_file,
key='dataframe',
mode='r',
columns=columns,
chunksize=100000)
scheduler = 'synchronous'
if verbose:
with ProgressBar():
df = ddf.compute(scheduler=scheduler, num_workers=n_jobs)
else:
df = ddf.compute(scheduler=scheduler, num_workers=n_jobs)
else:
print('FYI: Loading non-processed dataset. This takes longer than '
'loading the processed dataset...')
df = _load_basic_dataframe(df_file=df_file,
datatype='data',
config=config,
energy_reco=energy_reco,
energy_cut_key=energy_cut_key,
columns=columns,
log_energy_min=log_energy_min,
log_energy_max=log_energy_max,
n_jobs=n_jobs,
verbose=verbose,
compute=compute)
return df
load_data.__doc__ = """ Function to load processed data DataFrame
{_load_parameters_docstring}
processed : bool, optional
Whether to load processed (quality + energy cuts applied) or
pre-processed data (default is True).
Returns
-------
pandas.DataFrame
Return a DataFrame with processed data
""".format(_load_parameters_docstring=_load_parameters_docstring)
def load_tank_charges(config='IC79.2010', datatype='sim', return_dask=False):
paths = get_config_paths()
file_pattern = os.path.join(paths.comp_data_dir,
'{}_{}'.format(config, datatype),
'dataframe_files',
'dataframe_*.hdf5')
tank_charges = dd.read_hdf(file_pattern, 'tank_charges')
if return_dask:
return tank_charges
else:
return tank_charges.compute()
def dataframe_to_array(df, columns, drop_null=True):
validate_dataframe(df)
if not isinstance(columns, (list, tuple, np.ndarray, str)):
raise ValueError('columns must be a string or array-like')
# Select desired columns from DataFrame
df = df.loc[:, columns]
# If specified, drop rows that contain a null value
if drop_null:
df = df.replace([np.inf, -np.inf], np.nan).dropna(axis=0, how='any')
array = df.values
return array
def dataframe_to_X_y(df, feature_list, target='comp_target_2', drop_null=True):
validate_dataframe(df)
X = dataframe_to_array(df, feature_list, drop_null=drop_null)
y = dataframe_to_array(df, target, drop_null=drop_null)
return X, y
def load_trained_model(pipeline_str='BDT', config='IC86.2012',
return_metadata=False):
""" Function to load pre-trained model to avoid re-training
Parameters
----------
pipeline_str : str, optional
Name of model to load (default is 'BDT').
config : str, optional
Detector configuration (default is 'IC86.2012').
return_metadata : bool, optional
Option to return metadata associated with saved model (e.g. list of
training features used, scikit-learn version, etc) (default is False).
Returns
-------
pipeline : sklearn.Pipeline
Trained scikit-learn pipeline.
model_dict : dict
Dictionary containing trained model as well as relevant metadata.
"""
paths = get_config_paths()
model_file = os.path.join(paths.comp_data_dir,
config,
'models',
'{}.pkl'.format(pipeline_str))
if not os.path.exists(model_file):
raise IOError('There is no saved model file {}'.format(model_file))
model_dict = joblib.load(model_file)
if return_metadata:
return model_dict
else:
return model_dict['pipeline']
| mit |
mlindauer/AutoFolio | autofolio/pre_solving/aspeed_schedule.py | 1 | 6037 | import os
import sys
import logging
import math
import numpy as np
import pandas as pd
import subprocess
from ConfigSpace.hyperparameters import CategoricalHyperparameter, \
UniformFloatHyperparameter, UniformIntegerHyperparameter
from ConfigSpace.conditions import EqualsCondition, InCondition
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace import Configuration
from aslib_scenario.aslib_scenario import ASlibScenario
__author__ = "Marius Lindauer"
__license__ = "BSD"
class Aspeed(object):
@staticmethod
def add_params(cs: ConfigurationSpace, cutoff: int):
'''
adds parameters to ConfigurationSpace
Arguments
---------
cs: ConfigurationSpace
configuration space to add new parameters and conditions
cutoff: int
maximal possible time for aspeed
'''
pre_solving = CategoricalHyperparameter(
"presolving", choices=[True, False], default_value=False)
cs.add_hyperparameter(pre_solving)
pre_cutoff = UniformIntegerHyperparameter(
"pre:cutoff", lower=1, upper=cutoff, default_value=math.ceil(cutoff * 0.1), log=True)
cs.add_hyperparameter(pre_cutoff)
cond = InCondition(child=pre_cutoff, parent=pre_solving, values=[True])
cs.add_condition(cond)
def __init__(self, clingo: str=None, runsolver: str=None, enc_fn: str=None):
'''
Constructor
Arguments
---------
clingo: str
path to clingo binary
runsolver: str
path to runsolver binary
enc_fn: str
path to encoding file name
'''
self.logger = logging.getLogger("Aspeed")
if not runsolver:
self.runsolver = os.path.join(
os.path.dirname(sys.argv[0]), "..", "aspeed", "runsolver")
else:
self.runsolver = runsolver
if not clingo:
self.clingo = os.path.join(
os.path.dirname(sys.argv[0]), "..", "aspeed", "clingo")
else:
self.clingo = clingo
if not enc_fn:
self.enc_fn = os.path.join(
os.path.dirname(sys.argv[0]), "..", "aspeed", "enc1.lp")
else:
self.enc_fn = enc_fn
self.mem_limit = 2000 # mb
self.cutoff = 60
self.data_threshold = 300 # minimal number of instances to use
self.data_fraction = 0.3 # fraction of instances to use
self.schedule = []
def fit(self, scenario: ASlibScenario, config: Configuration):
'''
fit pca object to ASlib scenario data
Arguments
---------
scenario: data.aslib_scenario.ASlibScenario
ASlib Scenario with all data in pandas
config: ConfigSpace.Configuration
configuration
classifier_class: selector.classifier.*
class for classification
'''
if config["presolving"]:
self.logger.info("Compute Presolving Schedule with Aspeed")
X = scenario.performance_data.values
# if the instance set is too large, we subsample it
if X.shape[0] > self.data_threshold:
random_indx = np.random.choice(
range(X.shape[0]),
size=min(X.shape[0], max(int(X.shape[0] * self.data_fraction), self.data_threshold)),
replace=True)
X = X[random_indx, :]
self.logger.debug("#Instances for pre-solving schedule: %d" %(X.shape[0]))
times = ["time(i%d, %d, %d)." % (i, j, max(1,math.ceil(X[i, j])))
for i in range(X.shape[0]) for j in range(X.shape[1])]
kappa = "kappa(%d)." % (config["pre:cutoff"])
data_in = " ".join(times) + " " + kappa
# call aspeed and save schedule
self._call_clingo(data_in=data_in, algorithms=scenario.performance_data.columns)
def _call_clingo(self, data_in: str, algorithms: list):
'''
call clingo on self.enc_fn and facts from data_in
Arguments
---------
data_in: str
facts in format time(I,A,T) and kappa(C)
algorithms: list
list of algorithm names
'''
cmd = "%s -C %d -M %d -w /dev/null %s %s -" % (
self.runsolver, self.cutoff, self.mem_limit, self.clingo, self.enc_fn)
self.logger.info("Call: %s" % (cmd))
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True, universal_newlines=True)
stdout, stderr = p.communicate(input=data_in)
self.logger.debug(stdout)
schedule_dict = {}
for line in stdout.split("\n"):
if line.startswith("slice"):
schedule_dict = {} # reinitizalize for every found schedule
slices_str = line.split(" ")
for slice in slices_str:
s_tuple = slice.replace("slice(", "").rstrip(")").split(",")
algo = algorithms[int(s_tuple[1])]
budget = int(s_tuple[2])
schedule_dict[algo] = budget
self.schedule = sorted(schedule_dict.items(), key=lambda x: x[1])
self.logger.info("Fitted Schedule: %s" % (self.schedule))
def predict(self, scenario: ASlibScenario):
'''
transform ASLib scenario data
Arguments
---------
scenario: data.aslib_scenario.ASlibScenario
ASlib Scenario with all data in pandas
Returns
-------
schedule:{inst -> (solver, time)}
schedule of solvers with a running time budget
'''
return dict((inst, self.schedule) for inst in scenario.instances)
| bsd-2-clause |
LohithBlaze/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
sahat/bokeh | examples/app/applet/example.py | 3 | 7687 | import bokeh.server
from bokeh.plotting import line, circle, curdoc
from bokeh.widgetobjects import (VBoxModelForm, HBox,
BokehApplet, TextInput, PreText,
Select, Slider)
from bokeh.objects import Plot, ColumnDataSource
from bokeh.plot_object import PlotObject
from bokeh.properties import (Dict, Float, String, Instance)
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG)
class MyModel(VBoxModelForm):
"""Input Widgets, define the fields you want to
read from the input here as bokeh properties
input_specs is a list of dictionary, specifying
how the kind of input widget you want for each
property. the name field must match
one of the properties, for example here,
we use names of offset and scale. You can
also specify title, if you want a different
label in the generated form
"""
offset = Float(1.0)
scale = Float(1.0)
title = String(default="my sin wave")
input_specs = [
{"widget" : TextInput,
"name" : "title",
"value" : "my sin wave"},
{"widget" : Slider,
"name" : "offset",
"value" : 1.0,
"start" : 0.0,
"end" : 5.0},
{"widget" : Slider,
"name" : "scale",
"value" : 1.0,
"start" : -5.0,
"end" : 5.0},
]
class MyApp(BokehApplet):
plot = Instance(Plot)
source = Instance(ColumnDataSource)
def create(self, doc):
"""
This function is called once, and is responsible for
creating all objects (plots, datasources, etc)
"""
self.modelform = MyModel()
self.modelform.create_inputs(doc)
self.source = ColumnDataSource(data={'x':[], 'y':[]})
self.update_data()
self.plot = line('x', 'y', source=self.source,
plot_width=400, plot_height=400,
title=self.modelform.title
)
self.children.append(self.modelform)
self.children.append(self.plot)
def input_change(self, obj, attrname, old, new):
"""
This function is called whenever the input form changes
This is responsible for updating the plot, or whatever
you want. The signature is
obj : the object that changed
attrname : the attr that changed
old : old value of attr
new : new value of attr
"""
self.update_data()
self.plot.title = self.modelform.title
def update_data(self):
N = 80
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
logging.debug ("PARAMS %s %s", self.modelform.offset, self.modelform.scale)
y = self.modelform.offset + y * self.modelform.scale
self.source.data = {'x' : x, 'y' : y}
# the following addes "/exampleapp" as a url which renders MyApp
bokeh_url = "http://localhost:5006"
MyApp.add_route("/exampleapp", bokeh_url)
"""
Example 2
you need to run download.py to get the data from quantquote
"""
import os
from os.path import join, dirname, splitext
import pandas as pd
data_dir = join(dirname(__file__), "daily")
tickers = os.listdir(data_dir)
tickers = [splitext(x)[0].split("table_")[-1] for x in tickers]
class StockInputModel(VBoxModelForm):
"""Input Widgets, define the fields you want to
read from the input here as bokeh properties
input_specs is a list of dictionary, specifying
how the kind of input widget you want for each
property. the name field must match
one of the properties, for example here,
we use names of offset and scale. You can
also specify title, if you want a different
label in the generated form
"""
ticker1 = String(default="AAPL")
ticker2 = String(default="GOOG")
input_specs = [
{"widget" : Select,
"name" : "ticker1",
"value" : "AAPL",
"options" : ["AAPL","GOOG","INTC","BRCM","YHOO"]
},
{"widget" : Select,
"name" : "ticker2",
"value" : "GOOG",
"options" : ["AAPL","GOOG","INTC","BRCM","YHOO"]
}
]
class StockApp(BokehApplet):
plot = Instance(Plot)
source = Instance(ColumnDataSource)
pretext = Instance(PreText)
def get_data(self, ticker1, ticker2):
fname = join(data_dir, "table_%s.csv" % ticker1.lower())
data1 = pd.read_csv(fname,
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'],
header=False,
parse_dates=['date'])
data1 = data1.set_index('date')
fname = join(data_dir, "table_%s.csv" % ticker2.lower())
data2 = pd.read_csv(fname,
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'],
header=False,
parse_dates=['date'])
data2 = data2.set_index('date')
data = pd.DataFrame({ticker1 : data1.c, ticker2 : data2.c})
data[ticker1 + "_returns"] = data[ticker1].diff()
data[ticker2 + "_returns"] = data[ticker2].diff()
data = data.dropna()
return data
def create(self, doc):
"""
This function is called once, and is responsible for
creating all objects (plots, datasources, etc)
"""
self.modelform = StockInputModel()
self.modelform.create_inputs(doc)
ticker1 = self.modelform.ticker1
ticker2 = self.modelform.ticker2
self.pretext = PreText(text="")
self.make_source(ticker1, ticker2)
self.make_plots(ticker1, ticker2)
self.make_stats()
self.set_children()
def make_source(self, ticker1, ticker2):
df = self.get_data(ticker1, ticker2)
self.source = ColumnDataSource(data=df)
def make_plots(self, ticker1, ticker2):
self.plot = circle(ticker1 + "_returns", ticker2 + "_returns",
title="%s vs %s" %(ticker1, ticker2),
source=self.source,
plot_width=400, plot_height=400,
tools="pan,wheel_zoom,select"
)
def set_children(self):
self.children = [self.modelform, self.plot, self.pretext]
curdoc()._plotcontext.children = [self]
curdoc().add_all()
def input_change(self, obj, attrname, old, new):
"""
This function is called whenever the input form changes
This is responsible for updating the plot, or whatever
you want. The signature is
obj : the object that changed
attrname : the attr that changed
old : old value of attr
new : new value of attr
"""
if attrname in ("ticker1", "ticker2"):
ticker1 = self.modelform.ticker1
ticker2 = self.modelform.ticker2
self.make_source(ticker1, ticker2)
self.make_plots(ticker1, ticker2)
self.set_children()
def setup_events(self):
super(StockApp, self).setup_events()
if self.source:
self.source.on_change('selected', self, 'selection_change')
def make_stats(self):
pandas_df = pd.DataFrame(self.source.data)
selected = self.source.selected
if selected:
pandas_df = pandas_df.iloc[selected, :]
stats = pandas_df.describe()
self.pretext.text = str(stats)
def selection_change(self, obj, attrname, old, new):
self.make_stats()
# the following addes "/exampleapp" as a url which renders StockApp
bokeh_url = "http://localhost:5006"
StockApp.add_route("/stocks", bokeh_url)
if __name__ == "__main__":
bokeh.server.run()
| bsd-3-clause |
memo/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 92 | 4535 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
r-mart/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
bikong2/scikit-learn | sklearn/neural_network/rbm.py | 206 | 12292 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
jrleeman/MetPy | examples/plots/Simple_Sounding.py | 3 | 3067 | # Copyright (c) 2015,2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Simple Sounding
===============
Use MetPy as straightforward as possible to make a Skew-T LogP plot.
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, SkewT
from metpy.units import units
###########################################
# Change default to be better for skew-T
plt.rcParams['figure.figsize'] = (9, 9)
###########################################
# Upper air data can be obtained using the siphon package, but for this example we will use
# some of MetPy's sample data.
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('jan20_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
###########################################
# We will pull the data out of the example dataset into individual variables and
# assign units.
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
###########################################
skew = SkewT()
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
skew.ax.set_ylim(1000, 100)
# Add the MetPy logo!
fig = plt.gcf()
add_metpy_logo(fig, 115, 100)
###########################################
# Example of defining your own vertical barb spacing
skew = SkewT()
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
# Set spacing interval--Every 50 mb from 1000 to 100 mb
my_interval = np.arange(100, 1000, 50) * units('mbar')
# Get indexes of values closest to defined interval
ix = mpcalc.resample_nn_1d(p, my_interval)
# Plot only values nearest to defined interval values
skew.plot_barbs(p[ix], u[ix], v[ix])
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
skew.ax.set_ylim(1000, 100)
# Add the MetPy logo!
fig = plt.gcf()
add_metpy_logo(fig, 115, 100)
# Show the plot
plt.show()
| bsd-3-clause |
NSLS-II-SRX/ipython_ophyd | profile_xf05id1/startup/91-wirescan.py | 1 | 15049 | # -*- coding: utf-8 -*-
"""
set up for wire scan for HF mode
"""
import bluesky.plans as bp
from bluesky.callbacks import LiveRaster
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import time
import epics
import os
import numpy
#matplotlib.pyplot.ticklabel_format(style='plain')
def get_stock_md():
md = {}
md['beamline_status'] = {'energy': energy.energy.position
#'slt_wb': str(slt_wb.position),
#'slt_ssa': str(slt_ssa.position)
}
md['initial_sample_position'] = {'hf_stage_x': hf_stage.x.position,
'hf_stage_y': hf_stage.y.position,
'hf_stage_z': hf_stage.z.position}
md['wb_slits'] = {'v_gap' : slt_wb.v_gap.position,
'h_gap' : slt_wb.h_gap.position,
'v_cen' : slt_wb.v_cen.position,
'h_cen' : slt_wb.h_cen.position
}
md['hfm'] = {'y' : hfm.y.position,
'bend' : hfm.bend.position}
md['ssa_slits'] = {'v_gap' : slt_ssa.v_gap.position,
'h_gap' : slt_ssa.h_gap.position,
'v_cen' : slt_ssa.v_cen.position,
'h_cen' : slt_ssa.h_cen.position
}
return md
def get_stock_md_xfm():
md = {}
md['beamline_status'] = {'energy': energy.energy.position
#'slt_wb': str(slt_wb.position),
#'slt_ssa': str(slt_ssa.position)
}
md['initial_sample_position'] = {'stage27a_x': stage.x.position,
'stage27a_y': stage.y.position,
'stage27a_z': stage.z.position}
md['wb_slits'] = {'v_gap' : slt_wb.v_gap.position,
'h_gap' : slt_wb.h_gap.position,
'v_cen' : slt_wb.v_cen.position,
'h_cen' : slt_wb.h_cen.position
}
md['hfm'] = {'y' : hfm.y.position,
'bend' : hfm.bend.position}
md['ssa_slits'] = {'v_gap' : slt_ssa.v_gap.position,
'h_gap' : slt_ssa.h_gap.position,
'v_cen' : slt_ssa.v_cen.position,
'h_cen' : slt_ssa.h_cen.position
}
return md
def hf2dwire(*, xstart, xnumstep, xstepsize,
zstart, znumstep, zstepsize,
acqtime, numrois=1, i0map_show=True, itmap_show=False,
energy=None, u_detune=None):
'''
input:
xstart, xnumstep, xstepsize (float)
zstart, znumstep, zstepsize (float)
acqtime (float): acqusition time to be set for both xspress3 and F460
numrois (integer): number of ROIs set to display in the live raster scans. This is for display ONLY.
The actualy number of ROIs saved depend on how many are enabled and set in the read_attr
However noramlly one cares only the raw XRF spectra which are all saved and will be used for fitting.
i0map_show (boolean): When set to True, map of the i0 will be displayed in live raster, default is True
itmap_show (boolean): When set to True, map of the trasnmission diode will be displayed in the live raster, default is True
energy (float): set energy, use with caution, hdcm might become misaligned
u_detune (float): amount of undulator to detune in the unit of keV
'''
#record relevant meta data in the Start document, defined in 90-usersetup.py
md = get_stock_md()
#setup the detector
# TODO do this with configure
current_preamp.exp_time.put(acqtime-0.09)
xs.settings.acquire_time.put(acqtime)
xs.total_points.put((xnumstep+1)*(znumstep+1))
# det = [current_preamp, xs]
det = [xs]
#setup the live callbacks
livecallbacks = []
livetableitem = [hf_stage.x, hf_stage.z, 'current_preamp_ch0', 'current_preamp_ch2', 'xs_channel1_rois_roi01_value']
xstop = xstart + xnumstep*xstepsize
zstop = zstart + znumstep*zstepsize
print('xstop = '+str(xstop))
print('zstop = '+str(zstop))
for roi_idx in range(numrois):
roi_name = 'roi{:02}'.format(roi_idx+1)
roi_key = getattr(xs.channel1.rois, roi_name).value.name
livetableitem.append(roi_key)
# livetableitem.append('saturn_mca_rois_roi'+str(roi_idx)+'_net_count')
# livetableitem.append('saturn_mca_rois_roi'+str(roi_idx)+'_count')
# #roimap = LiveRaster((xnumstep, znumstep), 'saturn_mca_rois_roi'+str(roi_idx)+'_net_count', clim=None, cmap='viridis', xlabel='x', ylabel='y', extent=None)
colormap = 'inferno' #previous set = 'viridis'
# roimap = LiveRaster((znumstep, xnumstep), 'saturn_mca_rois_roi'+str(roi_idx)+'_count', clim=None, cmap='inferno',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# roimap = myLiveRaster((znumstep+1, xnumstep+1), roi_key, clim=None, cmap='inferno', aspect='equal',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
roimap = LiveRaster((znumstep+1, xnumstep+1), roi_key, clim=None, cmap='inferno', aspect=0.01,
xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# liveplotfig = plt.figure('through focus')
# roiplot = LivePlot(roi_key,x=hf_stage.x.name, fig=liveplotfig)
livecallbacks.append(roimap)
# livecallbacks.append(roiplot)
# if i0map_show is True:
# i0map = myLiveRaster((znumstep+1, xnumstep+1), 'current_preamp_ch2', clim=None, cmap='inferno',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# livecallbacks.append(i0map)
# if itmap_show is True:
# itmap = myLiveRaster((znumstep+1, xnumstep+1), 'current_preamp_ch0', clim=None, cmap='inferno',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# livecallbacks.append(itmap)
# commented out liveTable in 2D scan for now until the prolonged time issue is resolved
livecallbacks.append(LiveTable(livetableitem))
#setup the plan
if energy is not None:
if u_detune is not None:
# TODO maybe do this with set
energy.detune.put(u_detune)
# TODO fix name shadowing
yield from bp.abs_set(energy, energy, wait=True)
# shut_b.open_cmd.put(1)
# while (shut_b.close_status.get() == 1):
# epics.poll(.5)
# shut_b.open_cmd.put(1)
hf2dwire_scanplan = bp.grid_scan(det,
hf_stage.z, zstart, zstop, znumstep+1,
hf_stage.x, xstart, xstop, xnumstep+1, True,
md=md)
hf2dwire_scanplan = bp.subs_wrapper(hf2dwire_scanplan, livecallbacks)
scaninfo = yield from hf2dwire_scanplan
# shut_b.close_cmd.put(1)
# while (shut_b.close_status.get() == 0):
# epics.poll(.5)
# shut_b.close_cmd.put(1)
#write to scan log
logscan('2dwire')
return scaninfo
class myLiveRaster(CallbackBase):
"""Simple callback that fills in values based on a raster
This simply wraps around a `AxesImage`. seq_num is used to
determine which pixel to fill in
Parameters
----------
raster_shap : tuple
The (row, col) shape of the raster
I : str
The field to use for the color of the markers
clim : tuple, optional
The color limits
cmap : str or colormap, optional
The color map to use
"""
def __init__(self, raster_shape, I, *,
clim=None, cmap='viridis',
xlabel='x', ylabel='y', extent=None):
fig, ax = plt.subplots()
self.I = I
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_aspect(.001)
self.ax = ax
self.fig = fig
self._Idata = np.ones(raster_shape) * np.nan
self._norm = mcolors.Normalize()
if clim is not None:
self._norm.vmin, self._norm.vmax = clim
self.clim = clim
self.cmap = cmap
self.raster_shape = raster_shape
self.im = None
self.extent = extent
def start(self, doc):
if self.im is not None:
raise RuntimeError("Can not re-use LiveRaster")
self._Idata = np.ones(self.raster_shape) * np.nan
im = self.ax.imshow(self._Idata, norm=self._norm,
cmap=self.cmap, interpolation='none',
extent=self.extent)
self.im = im
self.ax.set_title('scan {uid} [{sid}]'.format(sid=doc['scan_id'],
uid=doc['uid'][:6]))
self.snaking = doc.get('snaking', (False, False))
cb = self.fig.colorbar(im)
cb.set_label(self.I)
def event(self, doc):
if self.I not in doc['data']:
return
seq_num = doc['seq_num'] - 1
pos = list(np.unravel_index(seq_num, self.raster_shape))
if self.snaking[1] and (pos[0] % 2):
pos[1] = self.raster_shape[1] - pos[1] - 1
pos = tuple(pos)
self._Idata[pos] = doc['data'][self.I]
if self.clim is None:
self.im.set_clim(np.nanmin(self._Idata), np.nanmax(self._Idata))
self.im.set_array(self._Idata)
def hf2dwire_y(*, xstart, xnumstep, xstepsize,
zstart, znumstep, zstepsize,
acqtime, numrois=1, i0map_show=True, itmap_show=False,
energy=None, u_detune=None):
'''
input:
xstart, xnumstep, xstepsize (float)
zstart, znumstep, zstepsize (float)
acqtime (float): acqusition time to be set for both xspress3 and F460
numrois (integer): number of ROIs set to display in the live raster scans. This is for display ONLY.
The actualy number of ROIs saved depend on how many are enabled and set in the read_attr
However noramlly one cares only the raw XRF spectra which are all saved and will be used for fitting.
i0map_show (boolean): When set to True, map of the i0 will be displayed in live raster, default is True
itmap_show (boolean): When set to True, map of the trasnmission diode will be displayed in the live raster, default is True
energy (float): set energy, use with caution, hdcm might become misaligned
u_detune (float): amount of undulator to detune in the unit of keV
'''
#record relevant meta data in the Start document, defined in 90-usersetup.py
md = get_stock_md()
#setup the detector
# TODO do this with configure
current_preamp.exp_time.put(acqtime-0.09)
xs.settings.acquire_time.put(acqtime)
xs.total_points.put((xnumstep+1)*(znumstep+1))
# det = [current_preamp, xs]
det = [xs]
#setup the live callbacks
livecallbacks = []
livetableitem = [hf_stage.y, hf_stage.z, 'current_preamp_ch0', 'current_preamp_ch2', 'xs_channel1_rois_roi01_value']
xstop = xstart + xnumstep*xstepsize
zstop = zstart + znumstep*zstepsize
print('xstop = '+str(xstop))
print('zstop = '+str(zstop))
for roi_idx in range(numrois):
roi_name = 'roi{:02}'.format(roi_idx+1)
roi_key = getattr(xs.channel1.rois, roi_name).value.name
livetableitem.append(roi_key)
# livetableitem.append('saturn_mca_rois_roi'+str(roi_idx)+'_net_count')
# livetableitem.append('saturn_mca_rois_roi'+str(roi_idx)+'_count')
# #roimap = LiveRaster((xnumstep, znumstep), 'saturn_mca_rois_roi'+str(roi_idx)+'_net_count', clim=None, cmap='viridis', xlabel='x', ylabel='y', extent=None)
colormap = 'inferno' #previous set = 'viridis'
# roimap = LiveRaster((znumstep, xnumstep), 'saturn_mca_rois_roi'+str(roi_idx)+'_count', clim=None, cmap='inferno',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# roimap = myLiveRaster((znumstep+1, xnumstep+1), roi_key, clim=None, cmap='inferno', aspect='equal',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
roimap = LiveRaster((znumstep+1, xnumstep+1), roi_key, clim=None, cmap='inferno', aspect=0.01,
xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# liveplotfig = plt.figure('through focus')
# roiplot = LivePlot(roi_key,x=hf_stage.x.name, fig=liveplotfig)
livecallbacks.append(roimap)
# livecallbacks.append(roiplot)
# if i0map_show is True:
# i0map = myLiveRaster((znumstep+1, xnumstep+1), 'current_preamp_ch2', clim=None, cmap='inferno',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# livecallbacks.append(i0map)
# if itmap_show is True:
# itmap = myLiveRaster((znumstep+1, xnumstep+1), 'current_preamp_ch0', clim=None, cmap='inferno',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# livecallbacks.append(itmap)
# commented out liveTable in 2D scan for now until the prolonged time issue is resolved
livecallbacks.append(LiveTable(livetableitem))
#setup the plan
if energy is not None:
if u_detune is not None:
# TODO maybe do this with set
energy.detune.put(u_detune)
# TODO fix name shadowing
yield from bp.abs_set(energy, energy, wait=True)
# shut_b.open_cmd.put(1)
# while (shut_b.close_status.get() == 1):
# epics.poll(.5)
# shut_b.open_cmd.put(1)
hf2dwire_scanplan = bp.grid_scan(det,
hf_stage.z, zstart, zstop, znumstep+1,
hf_stage.y, xstart, xstop, xnumstep+1, True,
md=md)
hf2dwire_scanplan = bp.subs_wrapper( hf2dwire_scanplan, livecallbacks)
scaninfo = yield from hf2dwire_scanplan
# shut_b.close_cmd.put(1)
# while (shut_b.close_status.get() == 0):
# epics.poll(.5)
# shut_b.close_cmd.put(1)
#write to scan log
logscan('2dwire')
return scaninfo
| bsd-2-clause |
RPGOne/Skynet | scikit-learn-0.18.1/benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
Determined22/Assignments-PatternRecognition-2016Fall | bayesRule_MNIST.py | 1 | 11029 | ## Bayes Rule:Linear Discriminant Function、Quadratic Discriminant Function(with Shrinkage)
import numpy as np
from load_MNIST import load_X, load_y
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import os
def inputData():
"""
载入训练集和测试集
:return: ndarray
train_X: 全部训练样本的特征矢量; train_y: 全部训练样本的标签
test_X: 全部测试样本的特征矢量; test_y: 全部测试样本的标签
"""
trainfile_X = 'MNIST/train-images-idx3-ubyte'
trainfile_y = 'MNIST/train-labels-idx1-ubyte'
testfile_X = 'MNIST/t10k-images-idx3-ubyte'
testfile_y = 'MNIST/t10k-labels-idx1-ubyte'
if not os.path.exists('train_X.npy'): # 避免重复载入, 如果已存在就跳过这步
load_X(trainfile_X, 'train_X.npy')
load_y(trainfile_y, 'train_y.npy')
load_X(testfile_X, 'test_X.npy')
load_y(testfile_y, 'test_y.npy')
return np.load('train_X.npy'), np.ravel(np.load('train_y.npy')), \
np.load('test_X.npy'), np.ravel(np.load('test_y.npy'))
def pcaFeatures(train_X, test_X, k):
"""
特征降维
:param train_X: 全部训练样本的特征矢量
:param test_X: 全部测试样本的特征矢量
:param k: 指定降维之后的特征维数
:return: 降维后训练样本和测试样本的特征矢量
"""
pca = PCA(n_components=k)
train_X = pca.fit_transform(train_X)
test_X = pca.transform(test_X)
print(sum(pca.explained_variance_ratio_))
return train_X, test_X
def linearDF(train_X, train_y, test_X, test_y, beta=0.0, prior=None):
"""
线性判别函数LDF, 包括训练和测试
:param train_X: 全部训练样本的特征矢量
:param train_y: 全部训练样本的标签
:param test_X: 全部测试样本的特征矢量
:param test_y: 全部测试样本的标签
:param beta: Shrinkage(将协方差矩阵向单位矩阵缩并)的参数值
:param prior: 各个类的先验概率, 如不指定将自行计算
:return: 模型在测试集上的accuracy
"""
n_samples, n_features = train_X.shape
## 计算各个类别的先验概率
classLabel, y = np.unique(train_y, return_inverse=True)
if prior is None:
prior = np.bincount(y) / float(n_samples)
C = len(classLabel)
## I. 训练, 使用MLE来估计各个类别的均值向量
Mu = np.zeros(shape=(C, n_features))
for c in classLabel:
train_Xc = train_X[y==c, :]
Mu[c] = np.mean(train_Xc, axis=0)
# 各个类别的协方差矩阵相同, 需要在归一化方差为1之后求得, 并求协方差矩阵的逆矩阵
Var = np.var(train_X, axis=0)
for i in range(n_features):
if Var[i] == 0:
Var[i] = 1.0e-4
Sigma = np.cov(train_X / Var, rowvar=False)
# Shrinkage策略, 将协方差矩阵向单位矩阵缩并
Sigma = (1 - beta) * Sigma + beta * np.eye(n_features)
# 如果协方差矩阵奇异(只有alpha是默认值0时才有可能), 就强制缩并
if np.linalg.matrix_rank(Sigma) < n_features:
Sigma = (1-1.0e-4)*Sigma + 1.0e-4*np.eye(n_features)
Sigma_inv = np.linalg.inv(Sigma)
## II. 测试, 评估对测试样本的标签做预测
# 定义线性判别函数
def gc_Xi(Xi, Mu_c, Sigma_inv, prior_c):
Mu_c.shape = (Mu_c.shape[0], 1) # np的一维数组转成二维才能做转置
w = np.dot(Sigma_inv, Mu_c)
b = -0.5*np.dot(np.dot(np.transpose(Mu_c),Sigma_inv), Mu_c) + np.log(prior_c)
return np.dot(np.transpose(w), Xi) + b
# 预测测试样本的标签, 计算正确率
accuracy = 0
n_testsamples = test_X.shape[0]
g_Xi = np.zeros(shape=(C,))
# test_X = test_X / Var
for i in range(n_testsamples):
Xi = test_X[i]
for c in range(C):
g_Xi[c] = gc_Xi(Xi, Mu[c], Sigma_inv, prior[c])
if np.where(g_Xi==max(g_Xi))[0] == test_y[i]:
accuracy += 1
accuracy /= n_testsamples
return accuracy
def quadraticDF(train_X, train_y, test_X, test_y, alpha=0.0, prior=None):
"""
二次判别函数QDF, 包括训练和测试
:param train_X: 全部训练样本的特征矢量
:param train_y: 全部训练样本的标签
:param test_X: 全部测试样本的特征矢量
:param test_y: 全部测试样本的标签
:param alpha: Shrinkage缩并策略的参数值
:param prior: 各个类的先验概率, 如不指定将自行计算
:return: 模型在测试集上的accuracy
"""
n, n_features = train_X.shape
## 计算各个类别的先验概率
classLabel, y = np.unique(train_y, return_inverse=True)
n_i = np.bincount(y) # 各个类别的样本数
if prior is None:
prior = np.bincount(y) / float(n)
C = len(classLabel)
# 预先求出Shrinkage策略中要用到的共同协方差矩阵
Var = np.var(train_X, axis=0)
for i in range(n_features):
if Var[i] == 0:
Var[i] = 1.0e-4
Sigma_All = np.cov(train_X / Var, rowvar=False)
if np.linalg.matrix_rank(Sigma_All) < n_features:
Sigma_All = (1 - 1.0e-4)*Sigma_All + 1.0e-4*np.eye(n_features)
## I. 训练, 使用MLE来估计各个类别的均值向量和协方差矩阵, 并求协方差矩阵的逆矩阵
Mu = np.zeros(shape=(C, n_features))
Sigma = np.zeros(shape=(C, n_features, n_features))
Sigma_inv = np.zeros(shape=(C, n_features, n_features))
for c in classLabel:
train_Xc = train_X[y==c, :]
Mu[c] = np.mean(train_Xc, axis=0)
Sigma[c] = np.cov(train_Xc - Mu[c], rowvar=False)
# Shrinkage策略, 将协方差矩阵向同一矩阵缩并
Sigma[c] = ((1 - alpha)*n_i[c]*Sigma[c] + alpha*n*Sigma_All) / ((1 - alpha)*n_i[c] + alpha*n)
# 如果协方差矩阵奇异, 就强制缩并
if np.linalg.matrix_rank(Sigma[c]) < n_features:
alpha = 1.0e-4
Sigma[c] = ((1 - alpha)*n_i[c]*Sigma[c] + alpha*n*Sigma_All) / ((1 - alpha)*n_i[c] + alpha*n)
Sigma_inv[c] = np.linalg.inv(Sigma[c])
## II. 测试, 评估对测试样本的标签做预测
# 定义二次判别函数
def gc_Xi(Xi, Mu_c, Sigma_c, Sigma_inv_c, prior_c):
Mu_c.shape = (Mu_c.shape[0], 1) # np的一维数组转成二维才能做转置
W = -0.5 * Sigma_inv_c
w = np.dot(Sigma_inv_c, Mu_c)
# 矩阵太大, 直接用np.linalg.det()会overflow, 用sign*np.exp(logdet)也会overflow. 这里直接求了行列式的ln, 避开了overflow
(sign, logdet) = np.linalg.slogdet(Sigma_c)
ln_det_Sigma_c = np.log(sign) + logdet
b = -0.5*np.dot(np.dot(np.transpose(Mu_c),Sigma_inv_c), Mu_c) - 0.5*ln_det_Sigma_c + np.log(prior_c)
return np.dot(np.dot(np.transpose(Xi), W), Xi) + np.dot(np.transpose(w), Xi) + b
# 预测测试样本的标签, 计算正确率
accuracy = 0
n_testsamples = test_X.shape[0]
g_Xi = np.zeros(shape=(C,))
for i in range(n_testsamples):
Xi = test_X[i]
for c in range(C):
g_Xi[c] = gc_Xi(Xi, Mu[c], Sigma[c], Sigma_inv[c], prior[c])
if np.where(g_Xi==max(g_Xi))[0] == test_y[i]:
accuracy += 1
accuracy /= n_testsamples
return accuracy
def validationPara(model, train_X, train_y, para_init, para_max, step):
"""
超参数调节, 将训练集的十分之一作为验证集
:param model: 模型, LDF或QDF
:param train_X: 训练集的特征矢量
:param train_y: 训练集的标签
:param para_init: 超参数初值
:param para_max: 超参数最大值
:param step: 超参数更新步长
:return: para_choose: 选定的超参数
para: 尝试过的超参数取值
target: 各para在验证集上的效果
"""
para_choose = para_init; para = []
m = int(train_X.shape[0] * 0.9) # 训练数据的样本数
max_target = 0; target = []
while para_init <= para_max:
para.append(para_init)
target_v = model(train_X[ :m], train_y[ :m], train_X[m: ], train_y[m: ], para_init)
target.append(target_v)
if target_v > max_target:
para_choose = para_init
max_target = target_v
para_init = round(para_init + step, 2)
return para_choose, para, target
def experimentsVisualize(para, target, title, filename):
"""
可视化调参过程中, 验证集上的效果
:param para: 尝试过的超参数取值
:param target: 各para在验证集上的效果
:return: 无
"""
target = 100 * np.array(target)
fig1 = plt.figure() # figsize=(10, 8)
ax1 = fig1.add_subplot(111)
ax1.plot(para, target, marker = 's', c='red', lw=1.5, label='$Accuracy$')
index = np.where(target == max(target))[0][0]
plt.text(para[index]-0.03, target[index]+1, str(round(target[index],2)))
plt.legend(loc=4)
plt.xlabel('$' + title + '$')
plt.ylabel('$Accuracy(\%)$')
plt.title('$Accuracy(\%)$' + ' on validation-set with ' + 'Hyperparameter $' + title + '$')
plt.ylim(60, 100)
plt.xlim(min(para)-0.05, max(para)+0.05)
plt.savefig(filename + '.pdf', dpi=400)
# plt.show()
if __name__=='__main__':
train_X, train_y, test_X, test_y = inputData()
train_X, test_X = pcaFeatures(train_X, test_X, 50)
## (一) 线性判别函数LDF
# 1. 在验证集上调参
beta_choose, beta, accuracy = validationPara(linearDF, train_X, train_y, 0.0, 1.0, 0.05)
# 2. 选取最优超参数后, 训练模型并测试
accuracy_final = linearDF(train_X, train_y, test_X, test_y, beta=beta_choose)
print('LDF: beta = %.2f, accuracy = %.2f%%' % (beta_choose, 100 * accuracy_final))
experimentsVisualize(beta, accuracy, r'\beta', '5_9')
# 3. 与sklearn库的结果进行对比
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDF
from sklearn.metrics import accuracy_score
ldf = LDF(solver='lsqr') # sklearn的LDF中的shrinkage有一点区别
ldf.fit(train_X, train_y)
accuracy_final = 100 * accuracy_score(test_y, ldf.predict(test_X))
print('LDF(sklearn): accuracy = %.2f%%' % accuracy_final)
## (二) 二次判别函数QDF
# 1. 在验证集上调参
alpha_choose, alpha, accuracy = validationPara(quadraticDF, train_X, train_y, 0.0, 1.0, 0.05)
# 2. 选取最优超参数后, 训练模型并测试
accuracy_final = quadraticDF(train_X, train_y, test_X, test_y, alpha=alpha_choose)
print('QDF: alpha = %.2f, accuracy = %.2f%%' % (alpha_choose, 100 * accuracy_final))
experimentsVisualize(alpha, accuracy, r'\alpha', '5_10')
# 3. 与sklearn库的结果进行对比, sklearn的速度明显要快非常多
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDF
qdf = QDF() # sklearn的QDF中的shrinkage是直接向单位矩阵所并
qdf.fit(train_X, train_y)
accuracy_final = 100 * accuracy_score(test_y, qdf.predict(test_X))
print('QDF(sklearn): accuracy = %.2f%%' % accuracy_final)
| mit |
pprett/scikit-learn | sklearn/linear_model/tests/test_omp.py | 76 | 7752 | # Author: Vlad Niculae
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
nlholdem/icodoom | ICO1/deep_feedback_learning_old/vizdoom/backprop1.py | 1 | 8991 | #!/usr/bin/python3
from __future__ import print_function
from vizdoom import *
import sys
import threading
import math
from random import choice
from time import sleep
from matplotlib import pyplot as plt
sys.path.append('../../deep_feedback_learning')
import numpy as np
import cv2
import deep_feedback_learning
# Create DoomGame instance. It will run the game and communicate with you.
game = DoomGame()
# Now it's time for configuration!
# load_config could be used to load configuration instead of doing it here with code.
# If load_config is used in-code configuration will also work - most recent changes will add to previous ones.
# game.load_config("../../scenarios/basic.cfg")
# Sets path to additional resources wad file which is basically your scenario wad.
# If not specified default maps will be used and it's pretty much useless... unless you want to play good old Doom.
game.set_doom_scenario_path("./basic.wad")
# Sets map to start (scenario .wad files can contain many maps).
game.set_doom_map("map01")
# Sets resolution. Default is 320X240
game.set_screen_resolution(ScreenResolution.RES_640X480)
# create masks for left and right visual fields - note that these only cover the upper half of the image
# this is to help prevent the tracking getting confused by the floor pattern
width = 640
widthNet = 320
height = 480
heightNet = 240
# Sets the screen buffer format. Not used here but now you can change it. Defalut is CRCGCB.
game.set_screen_format(ScreenFormat.RGB24)
# Enables depth buffer.
game.set_depth_buffer_enabled(True)
# Enables labeling of in game objects labeling.
game.set_labels_buffer_enabled(True)
# Enables buffer with top down map of the current episode/level.
game.set_automap_buffer_enabled(True)
# Sets other rendering options
game.set_render_hud(False)
game.set_render_minimal_hud(False) # If hud is enabled
game.set_render_crosshair(True)
game.set_render_weapon(False)
game.set_render_decals(False)
game.set_render_particles(False)
game.set_render_effects_sprites(False)
game.set_render_messages(False)
game.set_render_corpses(False)
# Adds buttons that will be allowed.
# game.add_available_button(Button.MOVE_LEFT)
# game.add_available_button(Button.MOVE_RIGHT)
game.add_available_button(Button.MOVE_LEFT_RIGHT_DELTA, 50)
game.add_available_button(Button.ATTACK)
game.add_available_button(Button.TURN_LEFT_RIGHT_DELTA)
# Adds game variables that will be included in state.
game.add_available_game_variable(GameVariable.AMMO2)
# Causes episodes to finish after 200 tics (actions)
game.set_episode_timeout(500)
# Makes episodes start after 10 tics (~after raising the weapon)
game.set_episode_start_time(10)
# Makes the window appear (turned on by default)
game.set_window_visible(True)
# Turns on the sound. (turned off by default)
game.set_sound_enabled(True)
# Sets the livin reward (for each move) to -1
game.set_living_reward(-1)
# Sets ViZDoom mode (PLAYER, ASYNC_PLAYER, SPECTATOR, ASYNC_SPECTATOR, PLAYER mode is default)
game.set_mode(Mode.PLAYER)
# Enables engine output to console.
#game.set_console_enabled(True)
nFiltersInput = 3
nFiltersHidden = 3
minT = 3
maxT = 30
nHidden0 = 4
nHidden1 = 2
net = deep_feedback_learning.DeepFeedbackLearning(widthNet*heightNet,[nHidden0*nHidden0,nHidden1*nHidden1], 1, nFiltersInput, nFiltersHidden, minT,maxT)
net.getLayer(0).setConvolution(widthNet,heightNet)
net.getLayer(1).setConvolution(nHidden0,nHidden0)
net.initWeights(0.5,0,deep_feedback_learning.Neuron.MAX_OUTPUT_RANDOM);
net.setLearningRate(0)
net.setAlgorithm(deep_feedback_learning.DeepFeedbackLearning.backprop);
# net.getLayer(0).setInputNorm2ZeroMean(128,256)
net.getLayer(0).setLearningRate(1E-10)
net.getLayer(1).setLearningRate(0.00001)
net.getLayer(2).setLearningRate(0.001)
#net.getLayer(1).setNormaliseWeights(True)
#net.getLayer(2).setNormaliseWeights(True)
net.setUseDerivative(0)
net.setBias(1)
# Initialize the game. Further configuration won't take any effect from now on.
game.init()
# Run this many episodes
episodes = 1000
# Sets time that will pause the engine after each action (in seconds)
# Without this everything would go too fast for you to keep track of what's happening.
sleep_time = 1.0 / DEFAULT_TICRATE # = 0.028
delta2 = 0
dontshoot = 1
deltaZeroCtr = 1
inp = np.zeros(widthNet*heightNet)
sharpen = np.array((
[0, 1, 0],
[1, 4, 1],
[0, 1, 0]), dtype="int")
edge = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
plt.ion()
plt.show()
ln1 = False
ln2 = [False,False,False,False]
def getWeights2D(neuron):
n_neurons = net.getLayer(0).getNneurons()
n_inputs = net.getLayer(0).getNeuron(neuron).getNinputs()
weights = np.zeros(n_inputs)
for i in range(n_inputs):
if net.getLayer(0).getNeuron(neuron).getMask(i):
weights[i] = net.getLayer(0).getNeuron(neuron).getAvgWeight(i)
else:
weights[i] = np.nan
return weights.reshape(heightNet,widthNet)
def getWeights1D(layer,neuron):
n_neurons = net.getLayer(layer).getNneurons()
n_inputs = net.getLayer(layer).getNeuron(neuron).getNinputs()
weights = np.zeros(n_inputs)
for i in range(n_inputs):
weights[i] = net.getLayer(layer).getNeuron(neuron).getAvgWeight(i)
return weights
def plotWeights():
global ln1
global ln2
while True:
if ln1:
ln1.remove()
plt.figure(1)
w1 = getWeights2D(0)
for i in range(1,net.getLayer(0).getNneurons()):
w2 = getWeights2D(i)
w1 = np.where(np.isnan(w2),w1,w2)
ln1 = plt.imshow(w1,cmap='gray')
plt.draw()
plt.pause(0.1)
for j in range(1,3):
if ln2[j]:
ln2[j].remove()
plt.figure(j+1)
w1 = np.zeros( (net.getLayer(j).getNneurons(),net.getLayer(j).getNeuron(0).getNinputs()) )
for i in range(0,net.getLayer(j).getNneurons()):
w1[i,:] = getWeights1D(j,i)
ln2[j] = plt.imshow(w1,cmap='gray')
plt.draw()
plt.pause(0.1)
t1 = threading.Thread(target=plotWeights)
t1.start()
for i in range(episodes):
print("Episode #" + str(i + 1))
# Starts a new episode. It is not needed right after init() but it doesn't cost much. At least the loop is nicer.
game.new_episode()
while not game.is_episode_finished():
# Gets the state
state = game.get_state()
# Which consists of:
n = state.number
vars = state.game_variables
screen_buf = state.screen_buffer
depth_buf = state.depth_buffer
labels_buf = state.labels_buffer
automap_buf = state.automap_buffer
labels = state.labels
midlinex = int(width/2);
midliney = int(height*0.75);
crcb = screen_buf
screen_left = screen_buf[100:midliney,0:midlinex-1,2]
screen_right = screen_buf[100:midliney,midlinex+1:(width-1),2]
screen_left = cv2.filter2D(screen_left, -1, sharpen);
screen_right = cv2.filter2D(screen_right, -1, sharpen);
# cv2.imwrite('/tmp/left.png',screen_left)
# cv2.imwrite('/tmp/right.png',screen_right)
lavg = np.average(screen_left)
ravg = np.average(screen_right)
delta = (lavg - ravg)*15
dd = delta - delta2
delta2 = delta
# print(delta)
# Makes a random action and get remember reward.
shoot = 0
if (dontshoot > 1) :
dontshoot = dontshoot - 1
else :
if (abs(dd) < 10) :
shoot = 1
dontshoot = 60
deltaZeroCtr = 4
if deltaZeroCtr>0:
deltaZeroCtr = deltaZeroCtr - 1
delta = 0
blue = cv2.resize(crcb, (widthNet,heightNet));
blue = blue[:,:,2]
blue = cv2.filter2D(blue, -1, edge);
err = np.linspace(delta,delta,nHidden0*nHidden0);
net.doStep(blue.flatten()/512-0.5,err[:1])
#weightsplot.set_xdata(np.append(weightsplot.get_xdata(),n))
#weightsplot.set_ydata(np.append(weightsplot.get_ydata(),net.getLayer(0).getWeightDistanceFromInitialWeights()))
output = net.getOutput(0)*5
print(delta,output,
net.getLayer(0).getWeightDistanceFromInitialWeights(),"\t",
net.getLayer(1).getWeightDistanceFromInitialWeights(),"\t",
net.getLayer(2).getWeightDistanceFromInitialWeights())
# action[0] is translating left/right; action[2] is rotating/aiming
# action = [ delta+output , shoot, 0. ]
action = [ 0., shoot, (delta+output)*0.1 ]
r = game.make_action(action)
# if sleep_time > 0:
# sleep(sleep_time)
# Check how the episode went.
print("Episode finished.")
print("Total reward:", game.get_total_reward())
print("************************")
sleep(1)
# It will be done automatically anyway but sometimes you need to do it in the middle of the program...
game.close()
| gpl-3.0 |
abhisg/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
mhdella/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
MMKrell/pyspace | pySPACE/missions/nodes/visualization/feature_vector_vis.py | 2 | 18462 | """ Visualize :class:`~pySPACE.resources.data_types.feature_vector.FeatureVector` elements"""
import itertools
import os
import warnings
import pylab
import numpy
from collections import defaultdict
from pySPACE.resources.data_types.prediction_vector import PredictionVector
from pySPACE.tools.filesystem import create_directory
try:
import mdp.nodes
except:
pass
from pySPACE.missions.nodes.base_node import BaseNode
class LLEVisNode(BaseNode):
""" Show a 2d scatter plot of all :class:`~pySPACE.resources.data_types.feature_vector.FeatureVector` based on Locally Linear Embedding (LLE) from MDP
This node collects all training examples it obtains along with their
label. It computes than an embedding of all these examples in a 2d space
using the "Locally Linear Embedding" algorithm and plots a scatter plot of
the examples in this space.
**Parameters**
:neighbors:
The number of neighbor vectors that should be considered for each
instance during locally linear embedding
(*optional, default: 15*)
**Exemplary Call**
.. code-block:: yaml
-
node : Time_Series_Source
-
node : All_Train_Splitter
-
node : Time_Domain_Features
-
node : LLE_Vis
parameters :
neighbors : 10
-
node : Nil_Sink
Known Issues:
This node will use pylab.show() to show the figure. There is no store
method implemented yet. On Macs, pylab.show() might sometimes fail due to
a wrong plotting backend. A possible workaround in that case is to
manually set the plotting backend to 'MacOSX'. This has to be done before
pylab is imported, so one can temporarily add "import matplotlib;
matplotlib.use('MacOSX')" to the very beginning of launch.py.
:Author: Jan Hendrik Metzen ([email protected])
:Created: 2009/07/07
"""
input_types = ["FeatureVector"]
def __init__(self, neighbors = 15, **kwargs):
super(LLEVisNode, self).__init__(**kwargs)
self.set_permanent_attributes(
neighbors = neighbors,
# A set of colors that can be used to distinguish different classes
colors = set(["r", "b"]),
# A mapping from class label to its color in the plot
class_colors = dict(),
# Remembers the classes (colors) of the instances seen
instance_colors = [],
#
instances = []
)
pylab.ion()
figure = pylab.figure(figsize=(21, 11))
figure.subplots_adjust(left=0.01, bottom=0.01, right=0.99, top= 0.99,
wspace=0.2, hspace=0.2)
pylab.draw()
def is_trainable(self):
""" Returns whether this node is trainable. """
# Though this node is not really trainable, it returns true in order
# to get trained. The reason is that during this training phase,
# it visualizes all samChannel_Visples that are passed as arguments
return True
def is_supervised(self):
""" Returns whether this node requires supervised training """
return True
def _get_train_set(self, use_test_data):
""" Returns the data that can be used for training """
# We take data that is provided by the input node for training
# NOTE: This might involve training of the preceding nodes
train_set = self.input_node.request_data_for_training(use_test_data)
# Add the data provided by the input node for testing to the
# training set
# NOTE: This node is not really learning but just collecting all
# examples. Because of that it must take
# all data for training (even when use_test_data is False)
train_set = itertools.chain(train_set,
self.input_node.request_data_for_testing())
return train_set
def _train(self, data, label):
"""
This node is not really trained but uses the labeled examples to
generate a scatter plot.
"""
# Determine color of this class if not yet done
if label not in self.class_colors.keys():
self.class_colors[label] = self.colors.pop()
# Stor the given example along with its class (encoded in the color)
self.instances.append(data)
self.instance_colors.append(self.class_colors[label])
def _stop_training(self, debug=False):
""" Stops the training, i.e. create the 2d representation
Uses the Locally Linear Embedding algorithm to create a 2d
representation of the data and creates a 2d scatter plot.
"""
instances = numpy.vstack(self.instances)
# Compute LLE and project the data
lle_projected_data = mdp.nodes.LLENode(k=self.neighbors,
output_dim=2)(instances)
# Create scatter plot of the projected data
pylab.scatter(lle_projected_data[:,0], lle_projected_data[:,1],
c = self.instance_colors)
pylab.show()
def _execute(self, data):
# We simply pass the given data on to the next node
return data
class MnistVizNode(BaseNode):
""" Node for plotting MNIST Data
**Parameters**
:mode:
One of *FeatureVector*, *PredictionVector*, and *nonlinear*.
If *FeatureVector* is taken, the data is assumed to be in the
28x28 format and can be visualized like the original data.
If *PredictionVector* is chosen, the affine
backtransformation approach is used. If possible,
the visualization is enhanced by the average data found in
the data history at the *history_index*.
If *nonlinear* os used, a nonlinear processing chain is assumed
for calculating the backtransformation with derivatives
with the sample at the
If not specified, the input data type is used.
(*recommended, default: input type*)
:history_index:
Index for determining the averaging data or the data for
calculating the derivative from prediction vectors.
To save the respective data, the *keep_in_history* parameter
has to be used, in the node, which produces the needed data.
This can be a Noop node at the beginning.
By default the last stored sample is used.
(*recommended, default: last sample*)
:max_samples:
In case of the *nonlinear* mode, a backtransformation
graphic must be generated for every data sample.
To reduce memory usage, only the first *max_*samples* training
samples are used.
(*optional, default: 10*)
**Exemplary Call**
.. code-block:: yaml
- node : MnistViz
"""
def __init__(self, mode=None, history_index=0, max_samples=10, **kwargs):
super(MnistVizNode, self).__init__(**kwargs)
self.set_permanent_attributes(
averages=None,
counter=None,
mode=mode,
history_index=history_index,
inputs=None,
max_samples=max_samples,
)
def _train(self, data, label):
""" Average data with labels (no real training)"""
if self.mode is None:
self.mode = type(data).__name__
if (self.mode == "PredictionVector") and data.has_history():
new_data = data.history[self.history_index - 1]
del(data)
data = new_data
if self.mode == "nonlinear":
if self.inputs is None:
self.inputs = []
self.inputs.append(data)
if self.mode in "FeatureVector" or (
(self.mode == "PredictionVector")
and not type(data) == PredictionVector):
if self.averages is None or self.counter is None:
self.averages = defaultdict(lambda : numpy.zeros((28, 28)))
self.counter = defaultdict(float)
# Average the given example along with its class
data.view(numpy.ndarray)
number_array = data.reshape((28,28))
self.averages[label] += number_array
self.counter[label] += 1
if self.inputs is None:
self.inputs = []
if not len(self.inputs) == self.max_samples:
self.inputs.append(number_array)
def store_state(self, result_dir, index=None):
""" Main method which generates and stores the graphics """
if self.store:
#set the specific directory for this particular node
node_dir = os.path.join(result_dir, self.__class__.__name__)
#do we have an index-number?
if not index is None:
#add the index-number...
node_dir += "_%d" % int(index)
create_directory(node_dir)
colors = ["white", "black", "blue", "red"]
if self.mode == "FeatureVector":
for label in self.averages:
self.averages[label] *= 1.0/self.counter[label]
#http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
pylab.figure(figsize=(4, 4), dpi=300)
pylab.contourf(self.averages[label], 50, cmap="jet",
origin="image")
pylab.xticks(())
pylab.yticks(())
#pylab.colorbar()
f_name = str(node_dir)+str(os.sep)+str(label)+"average"
pylab.savefig(f_name + ".png", bbox_inches='tight')
for index, input in enumerate(self.inputs):
pylab.figure(figsize=(4, 4), dpi=300)
pylab.contourf(input, 50, cmap="binary",
origin="image")
pylab.xticks(())
pylab.yticks(())
#pylab.colorbar()
f_name = str(node_dir)+str(os.sep)+"sample"+str(index)
pylab.savefig(f_name + ".png", bbox_inches='tight')
elif self.mode == "PredictionVector":
trafos = self.get_previous_transformations()[-1]
trafo = trafos[0]
trafo.view(numpy.ndarray)
covariance = trafos[1][1]
trafo_covariance = numpy.dot(covariance, trafo.flatten())
# covariance free picture
number_array = trafo.reshape((28, 28))
fig = pylab.figure(figsize=(4, 4), dpi=300)
pylab.contourf(number_array, 50, cmap="jet", origin="image",
vmax=abs(number_array).max(),
vmin=-abs(number_array).max())
pylab.xticks(())
pylab.yticks(())
#pylab.colorbar()
if not self.averages is None:
for label in self.averages:
self.averages[label] *= 1.0/self.counter[label]
pylab.contour(
self.averages[label],
levels=[50],
colors=colors[self.averages.keys().index(label)],
linewidths=3,
origin="image")
f_name = str(node_dir)+str(os.sep)+"classifier"
pylab.savefig(f_name + ".png", bbox_inches='tight')
pylab.close(fig)
# covariance picture (similar code as before)
number_array = trafo_covariance.reshape((28, 28))
fig = pylab.figure(figsize=(4, 4), dpi=300)
pylab.contourf(number_array, 50, cmap="jet", origin="image",
vmax=abs(number_array).max(),
vmin=-abs(number_array).max())
pylab.xticks(())
pylab.yticks(())
#pylab.colorbar()
if not self.averages is None:
for label in self.averages:
pylab.contour(
self.averages[label],
levels=[50],
linewidths=3,
colors=colors[self.averages.keys().index(label)],
origin="image")
f_name = str(node_dir)+str(os.sep)+"classifier_cov"
pylab.savefig(f_name + ".png", bbox_inches='tight')
pylab.close(fig)
elif self.mode == "nonlinear":
from matplotlib.backends.backend_pdf import PdfPages
import datetime
with PdfPages(str(node_dir)+str(os.sep)+'sample_vis.pdf') as pdf:
index = 0
for sample in self.inputs:
index += 1
base_vector = sample.history[self.history_index-1]
trafos = self.get_previous_transformations(base_vector)[-1]
trafo = trafos[0]
trafo.view(numpy.ndarray)
covariance = trafos[1][1]
trafo_covariance = \
numpy.dot(covariance, trafo.flatten())
covariance_array = trafo_covariance.reshape((28, 28))
base_array = base_vector.reshape((28, 28))
trafo_array = trafo.reshape((28, 28))
#fig = pylab.figure(figsize=(5, 5), dpi=300)
#pylab.suptitle(sample.label)
# SUBPLOT 1: plot of the derivative
#pylab.subplot(2, 2, 1)
#pylab.title("Backtransformation")
fig = pylab.figure(figsize=(4, 4), dpi=300)
pylab.contourf(trafo_array, 50, cmap="jet",
origin="image",
vmax=abs(trafo_array).max(),
vmin=-abs(trafo_array).max())
pylab.xticks(())
pylab.yticks(())
# pylab.colorbar()
pylab.contour(
base_array,
levels=[50],
colors=colors[1],
origin="image")
# store and clean
f_name = str(node_dir) + str(os.sep) + "classifier_" \
+ str(index)
pylab.savefig(f_name + ".png", bbox_inches='tight')
pylab.close(fig)
fig = pylab.figure(figsize=(4, 4), dpi=300)
# SUBPLOT 2: plot of the derivative multiplied with covariance
# pylab.subplot(2,2,2)
# pylab.title("Backtransformation times Covariance")
pylab.contourf(covariance_array, 50, cmap="jet",
origin="image",
vmax=abs(covariance_array).max(),
vmin=-abs(covariance_array).max())
pylab.xticks(())
pylab.yticks(())
# pylab.colorbar()
pylab.contour(
base_array,
levels=[50],
colors=colors[1],
origin="image")
# # SUBPLOT 2: plot of the original feature vector
# pylab.subplot(2,2,2)
# pylab.title("Original data")
#
# pylab.contourf(base_array, 50, cmap="binary", origin="image")
# pylab.xticks(())
# pylab.yticks(())
# pylab.colorbar()
# # SUBPLOT 3: plot of the difference between vectors
# pylab.subplot(2,2,3)
# pylab.title("Addition")
#
# pylab.contourf(trafo_array+base_array, 50, cmap="spectral", origin="image")
# pylab.xticks(())
# pylab.yticks(())
# pylab.colorbar()
#
# # SUBPLOT 4: plot of the difference between vectors
# pylab.subplot(2,2,4)
# pylab.title("Subtraction")
#
# pylab.contourf(base_array-trafo_array, 50, cmap="spectral", origin="image")
# pylab.xticks(())
# pylab.yticks(())
# pylab.colorbar()
# pdf.savefig(fig, bbox_inches='tight')
# store and clean
f_name = str(node_dir) + str(os.sep) + \
"classifier_cov_" + str(index)
pylab.savefig(f_name + ".png", bbox_inches='tight')
pylab.close(fig)
if index == self.max_samples:
break
# d = pdf.infodict()
# d['Title'] = 'Sample visualization'
# # d['Author'] = ''
# # d['Subject'] = ''
# # d['Keywords'] = ''
# d['CreationDate'] = datetime.datetime.today()
# d['ModDate'] = datetime.datetime.today()
pylab.close('all')
def is_trainable(self):
""" Labels are required for visualization """
return True
def is_supervised(self):
""" Labels are required for visualization """
return True
_NODE_MAPPING = {"LLE_Vis": LLEVisNode}
| gpl-3.0 |
paultopia/auto-sklearn | autosklearn/estimators.py | 5 | 4834 | import os
import random
import shutil
import numpy as np
import autosklearn.automl
from autosklearn.constants import *
class AutoSklearnClassifier(autosklearn.automl.AutoML):
"""This class implements the classification task. It must not be pickled!
Parameters
----------
time_left_for_this_task : int, optional (default=3600)
Time limit in seconds for the search for appropriate classification
models. By increasing this value, *auto-sklearn* will find better
configurations.
per_run_time_limit : int, optional (default=360)
Time limit for a single call to machine learning model.
initial_configurations_via_metalearning : int, optional (default=25)
ensemble_size : int, optional (default=50)
ensemble_nbest : int, optional (default=50)
seed : int, optional (default=1)
ml_memory_limit : int, optional (3000)
Memory limit for the machine learning algorithm. If the machine
learning algorithm allocates tries to allocate more memory,
its evaluation will be stopped.
"""
def __init__(self, time_left_for_this_task=3600,
per_run_time_limit=360,
initial_configurations_via_metalearning=25,
ensemble_size=50, ensemble_nbest=50, seed=1,
ml_memory_limit=3000):
random_number = random.randint(0, 10000)
pid = os.getpid()
output_dir = "/tmp/autosklearn_output_%d_%d" % (pid, random_number)
tmp_dir = "/tmp/autosklearn_tmp_%d_%d" % (pid, random_number)
os.makedirs(output_dir)
os.makedirs(tmp_dir)
super(AutoSklearnClassifier, self).__init__(
tmp_dir, output_dir, time_left_for_this_task, per_run_time_limit,
log_dir=tmp_dir,
initial_configurations_via_metalearning=initial_configurations_via_metalearning,
ensemble_size=ensemble_size, ensemble_nbest=ensemble_nbest,
seed=seed, ml_memory_limit=ml_memory_limit)
def __del__(self):
self._delete_output_directories()
def _create_output_directories(self):
os.makedirs(self.output_dir)
os.makedirs(self.tmp_dir)
def _delete_output_directories(self):
shutil.rmtree(self.tmp_dir)
shutil.rmtree(self.output_dir)
def fit(self, X, y, metric='acc_metric', feat_type=None):
"""Fit *autosklearn* to given training set (X, y).
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target classes.
metric : str, optional (default='acc_metric')
The metric to optimize for. Can be one of: ['acc_metric',
'auc_metric', 'bac_metric', 'f1_metric', 'pac_metric']
feat_type : list, optional (default=None)
List of :python:`len(X.shape[1])` describing if an attribute is
continuous or categorical. Categorical attributes will
automatically 1Hot encoded.
"""
# Fit is supposed to be idempotent!
self._delete_output_directories()
self._create_output_directories()
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
self.n_classes_ = np.array(self.n_classes_, dtype=np.int)
if self.n_outputs_ > 1:
task = MULTILABEL_CLASSIFICATION
else:
if len(self.classes_[0]) == 2:
task = BINARY_CLASSIFICATION
else:
task = MULTICLASS_CLASSIFICATION
# TODO: fix metafeatures calculation to allow this!
if y.shape[1] == 1:
y = y.flatten()
return super(AutoSklearnClassifier, self).fit(X, y, task, metric,
feat_type)
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
return super(AutoSklearnClassifier, self).predict(X)
class AutoSklearnRegressor(autosklearn.automl.AutoML):
def __init__(self, **kwargs):
raise NotImplementedError() | bsd-3-clause |
ak681443/mana-deep | server/ManaDeepServer/src/model.py | 1 | 4532 |
from keras.models import model_from_json
from keras.models import load_model
from keras import regularizers
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from keras.callbacks import TensorBoard
from keras.models import model_from_json
from keras.models import load_model
from keras import regularizers
from os import listdir
from os.path import isfile, join
import numpy as np
from matplotlib import pyplot as plt
import cv2
import scipy.misc
from scipy import spatial
from PIL import Image
import heapq
import sys
import cStringIO
import base64
from PIL import Image
import cv2
from StringIO import StringIO
import numpy as np
import json
global model_load_status
model_load_status = False
th = 70
v = 20
model_file = '/home/arvind/MyStuff/Desktop/Manatee_dataset/allmods/new_train/model_iter.h5'
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_test = []
masks = np.zeros((224,224))
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1[img1<th] = v
img1[img1>=th] = 0
masks = masks + img1
masks = masks / v
if not model_load_status:
input_img = Input(shape=(224, 224,1))
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same', input_shape=(224,224,1))(input_img)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same', activity_regularizer=regularizers.activity_l1(10e-5))(x)
encoded = MaxPooling2D((2, 2), border_mode='same')(x)
model = Model(input_img, encoded)
model.compile(loss='binary_crossentropy', optimizer='adagrad', verbose=0)
# In[4]:
model.load_weights(model_file, by_name=True)
model_load_status = True
def push_pqueue(queue, priority, value):
if len(queue)>20:
heapq.heappushpop(queue, (priority, value))
else:
heapq.heappush(queue, (priority, value))
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_test = []
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1[img1<=th] = v
img1[masks>60] = 0
img1[img1>th] = 0
X_test.append(np.array([img1]))
X_test = np.array(X_test).astype('float32')#/ float(np.max(X))
X_test = np.reshape(X_test, (len(X_test), 224, 224, 1))
X_test_pred = model.predict(X_test, verbose=0)
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_train = []
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1[img1<=th] = v
img1[masks>60] = 0
img1[img1>th] = 0
X_train.append(np.array([img1]))
X_train = np.array(X_train).astype('float32')#/ float(np.max(X))
X_train = np.reshape(X_train, (len(X_train), 224, 224, 1))
X_train_pred = model.predict(X_train, verbose=0)
mypath = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train/'
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
print 'model loaded ready to serve'
def predict(img_png_b64):
pqueue = []
tempimg = cStringIO.StringIO(img_png_b64.decode('base64'))
img1 = Image.open(tempimg)
img1 = np.array(img1)
img1 = cv2.resize(img1, (224,224))
img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1[img1<=th] = v
img1[masks>60] = 0
img1[img1>th] = 0
X_train = []
X_train.append(np.array([img1]))
X_train = np.array(X_train).astype('float32')#/ float(np.max(X))
X_train = np.reshape(X_train, (len(X_train), 224, 224, 1))
pred = model.predict(X_train, verbose=0)[0]
msk = cv2.resize(img1, (28, 28))
msk = np.repeat(msk[:, :, np.newaxis], 8, axis=2)
msk = msk.flatten()
pred = pred.flatten()
pred[msk!=0] = 5
for j in np.arange(0, len(files)):
filen = files[j]
tpred = X_train_pred[j].flatten()
tpred[msk!=0] = tpred[msk!=0] * 5
score = 1 - spatial.distance.cosine(tpred, pred)
push_pqueue(pqueue, score, filen)
return json.dumps(heapq.nlargest(len(pqueue), pqueue)) | apache-2.0 |
trankmichael/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
TimBizeps/BachelorAP | FP 2018/V64 Interferometrie/kontrast.py | 1 | 1369 | import numpy as np
import matplotlib.pyplot as plt
import uncertainties.unumpy as unp
from uncertainties import ufloat
from scipy.optimize import curve_fit
theta_P, U_min, U_max = np.genfromtxt('kontrast.txt', unpack=True)
theta_P = theta_P / 360 * (2*np.pi)
Z = U_max-U_min
N = U_max+U_min
def Fitf(theta, a, b, c, d):
return np.abs(a*np.sin(b*theta + c)) + d
Kontr = Z/N
params, cov = curve_fit(Fitf, theta_P, Kontr, )
errors = np.sqrt(np.diag(cov))
a = ufloat(params[0], errors[0])
b = ufloat(params[1], errors[1])
c = ufloat(params[2], errors[2])
d = ufloat(params[3], errors[3])
x = np.linspace(-0.4, 3.5, 1000)
theta = (np.pi/2 - c)/b
print(a)
print(b)
print(c)
print(d)
print(theta * 360 / (2 * np.pi))
plt.plot(theta_P, Kontr, 'r+', label="Daten")
plt.plot(x, Fitf(x, *params), 'b', label="Regression")
plt.xlabel(r"$\theta_P \, / \, \mathrm{rad}$")
plt.ylabel('K')
plt.xticks([0, 0.5*np.pi, np.pi], ['0', r'$\frac{\pi}{2}$', r'$\pi$'])
plt.xlim(-0.4, 3.5)
plt.ylim(0, 1)
# plt.xlim(0, 210)
plt.tight_layout()
plt.legend(loc="best")
plt.savefig("Kontrast.pdf")
plt.clf()
np.savetxt('kontrastplot.txt', np.column_stack([
theta_P * 180/np.pi,
U_max,
U_min,
Kontr,
]), delimiter=' & ', newline=r' \\'+'\n',
fmt='%.0f & %.2f & %.2f & %.2f')
| gpl-3.0 |
ZenDevelopmentSystems/pedal | vis.py | 1 | 5005 | """Visualization objects
"""
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
def gallery_gray_patches(W,show=False, rescale=False):
"""Create a gallery of image patches from <W>, with
grayscale patches aligned along columns"""
n_vis, n_feats = W.shape;
n_pix = np.sqrt(n_vis)
n_rows = np.floor(np.sqrt(n_feats))
n_cols = np.ceil(n_feats / n_rows)
border_pix = 1;
# INITIALIZE GALLERY CONTAINER
im_gallery = np.nan*np.ones((border_pix+n_rows*(border_pix+n_pix),
border_pix+n_cols*(border_pix+n_pix)))
for iw in xrange(n_feats):
# RESCALE EACH IMAGE
W_tmp = W[:,iw].copy()
if rescale:
W_tmp = (W_tmp - W_tmp.mean())/np.max(np.abs(W_tmp));
W_tmp = W_tmp.reshape(n_pix, n_pix)
# FANCY INDEXING INTO IMAGE GALLERY
im_gallery[border_pix + np.floor(iw/n_cols)*(border_pix+n_pix):
border_pix + (1 + np.floor(iw/n_cols))*(border_pix+n_pix) - border_pix,
border_pix + np.mod(iw,n_cols)*(border_pix+n_pix):
border_pix + (1 + np.mod(iw,n_cols))*(border_pix+n_pix) - border_pix] = W_tmp
if show:
plt.imshow(im_gallery,interpolation='none')
plt.axis("image")
plt.axis("off")
return im_gallery
def gallery_rgb_patches(W,show=False, rescale=False):
"""Create a gallery of image patches from <W>, with
rgb patches aligned along columns"""
n_vis, n_feats = W.shape;
n_pix = np.sqrt(n_vis/3)
n_rows = np.floor(np.sqrt(n_feats))
n_cols = np.ceil(n_feats / n_rows)
border_pix = 1;
# INITIALIZE GALLERY CONTAINER
im_gallery = np.nan*np.ones((border_pix+n_rows*(border_pix+n_pix),
border_pix+n_cols*(border_pix+n_pix),3))
for iw in xrange(n_feats):
# RESCALE EACH IMAGE
W_tmp = W[:,iw].copy()
W_tmp = W_tmp.reshape(3, n_pix, n_pix).transpose([1,2,0])
if rescale:
for c in xrange(3):
cols = W_tmp[:,:,c]
W_tmp[:,:,c] = (cols - cols.mean())/np.max(np.abs(cols))
# FANCY INDEXING INTO IMAGE GALLERY
im_gallery[border_pix + np.floor(iw/n_cols)*(border_pix+n_pix):
border_pix + (1 + np.floor(iw/n_cols))*(border_pix+n_pix) - border_pix,
border_pix + np.mod(iw,n_cols)*(border_pix+n_pix):
border_pix + (1 + np.mod(iw,n_cols))*(border_pix+n_pix) - border_pix,:] = W_tmp
if show:
plt.imshow(im_gallery,interpolation='none')
plt.axis("image")
plt.axis("off")
class RBMTraining(object):
"""General RBM Training Object"""
def __init__(self, cmap='jet'):
self.fig = plt.figure("RBM Learning")
# INITIALIZE PLOTS
self.W_ax = self.fig.add_subplot(221, title="W")
self.W_plot = self.W_ax.imshow(np.random.rand(2,2),
interpolation='none')
self.dW_ax = self.fig.add_subplot(222, title="dW")
self.dW_plot = self.dW_ax.imshow(np.random.rand(2,2),
interpolation='none')
self.a_hid_ax = self.fig.add_subplot(223, title="Hidden Activations")
self.a_hid_plot = self.a_hid_ax.hist(np.random.rand(200),bins=25)
self.err_ax = self.fig.add_subplot(224, title="Recon. Error")
self.err_plot = self.err_ax.plot(range(10),'r')
# PROBABLY A BETTER WAY OF DOING THIS, i.e. AxesStack?
self.axis_names = ['W_ax', 'dW_ax','a_hid_ax', 'err_ax']
def close(self):
plt.close(self.fig)
def set_data(self, data):
"""Given a dict of axis labels and data, update axes"""
for k, v in data.items():
print ('key', k, 'value', v)
try:
self.__getattribute__(k).set_data(v)
except:
self.__getattribute__(k).set_ydata(v)
else:
print 'data update failed: %s' % k
self.refresh()
def refresh(self):
self.fig.canvas.draw()
# self.fig.show()
def visibility(self, visibility=True):
print self.axis_names
for ax_name in self.axis_names:
try:
self.__getattribute__(ax_name).set_visible(visibility)
except: pass
self.refresh()
class RBMTrainingMNIST(RBMTraining):
def __init__(self):
super(RBMTrainingMNIST, self).__init__()
plt.set_cmap("gray")
def vis(self, trainer):
print 'updating data'
data = {'W_plot': gallery_gray_patches(trainer.rbm.W),
'dW_plot': gallery_gray_patches(trainer.log['gradients']['dW'])}
# 'err_plot': np.array(trainer.log['error'])}
# 'a_hid_plot': trainer.log['states']['a_hid']}
print 'setting data/ refreshing'
self.set_data(data)
self.refresh()
| bsd-2-clause |
metabolite-atlas/metatlas | metatlas/helpers/rt_corrector.py | 2 | 12988 | from datetime import datetime
from metatlas import metatlas_objects as metob
from metatlas.helpers import metatlas_get_data_helper_fun as ma_data
from metatlas import gui
import pandas as pd
import os
import os.path
import sys
from IPython.display import display
import matplotlib.pyplot as plt
try:
import ipywidgets as widgets
except ImportError:
from IPython.html import widgets
try:
import traitlets
except ImportError:
from IPython.utils import traitlets
from ipywidgets import interact, fixed, FloatSlider
try:
foo = widgets.Select()
except Exception as e:
print(e)
sys.exit(0)
data = []
groups = []
file_names = []
compound_names = []
compound_objects = []
files_idx = dict()
compound_idx = dict()
groups_idx = dict()
grid = gui.create_qgrid([])
grid2 = gui.create_qgrid([])
compounds_list_dict = dict()
atlas_header = ['Atlas Name', 'No. Compounds', 'Last Modified']
compound_header = ['Compound', 'rt_max', 'rt_min', 'rt_peak', 'rt_units', 'mz', 'mz_tolerance', 'mz_tolerance_units',
'lcms_run']
# --------------------------------------------------------------------------------------------------------------------
# --------------------- W I D G E T S ---------------- W I D G E T S -------------------- W I D G E T S --------------
# --------------------------------------------------------------------------------------------------------------------
# single select widget for atlas
atlases_select = widgets.Select(description='Atlases:', options=[])
# text widget to select for atlases in a database. accepts wild cards
search_string = widgets.Text(description="")
search_button = widgets.Button(description="Search for Atlas")
display_compounds_and_files = widgets.Button(description="Display Compounds & Files")
# single select widget for the compound
wcompounds = widgets.Select(
description="compounds",
options=[]
)
# multiple select widget for the files
wfiles = widgets.Select(
description="files",
options=[]
)
# dill file name
wfname = widgets.Text(
description='Atlas Name',
value='myAtlas',
)
# button that plots the intensity
plot_button = widgets.Button(description='Plot me')
save_atlas_button = widgets.Button(description="Save Atlas")
save_as_button = widgets.Button(description="Save Atlas As")
save_atlas_as_txt = widgets.Text()
# radio buttons to control how atlas is to be written
choose_output = widgets.RadioButtons(description='choose output:',
options=['new atlas name', 'old atlas name'],
)
# button that creates the atlas based on the selection from the radio buttons
create_atlas_btn = widgets.Button(description="Create Atlas from Dill")
modify_atlas_btn = widgets.Button(description="Modify Selected Atlas")
# text box that holds the selected atlas' RT values for the selected compound. displays
atlas_ref_vals = widgets.Text(
description="RT values for compound in Atlas",
value = "RT values for compound in Atlas go here",
color = 'red'
)
# sliders for the user to change the RT values
rtmin_widget = FloatSlider()
rtpeak_widget = FloatSlider()
rtmax_widget = FloatSlider()
# --------------------------------------------------------------------------------------------------------------------
# --------------- F U N C T I O N S ------------- F U N C T I O N S ----------- F U N C T I O N S --------------------
# --------------------------------------------------------------------------------------------------------------------
###########################################################################
###
def mod_atlas_compound_RT_values(**kwargs):
"""
Parameters
----------
kwargs: dictionary that holds atlas (object or name), compound, rt_min, rt_max, and rt_peak
Returns a modified atlas object
-------
"""
atlas = kwargs['atlas']
compound = kwargs['compound']
rt_min = kwargs['rt_min']
rt_max = kwargs['rt_max']
rt_peak = kwargs['rt_peak']
if isinstance(atlas, str):
atlas = metob.retrieve('Atlas', name=atlas, username='*')
num_compounds = len(atlas[0].compound_identifications)
for x in range(num_compounds):
cpd_name = atlas[0].compound_identifications[x].compound[0].name
if compound == cpd_name: # adjust the rt_values
atlas[0].compound_identifications[x].rt_references[0].rt_min = rt_min
atlas[0].compound_identifications[x].rt_references[0].rt_max = rt_max
atlas[0].compound_identifications[x].rt_references[0].rt_peak = rt_peak
break
return atlas
###########################################################################
###
def atlas_grid(sender):
atlas_dict = dict()
for i in atlas_header:
atlas_dict[i] = list()
wild_card = search_string.value
atlas = metob.retrieve('Atlas', name=wild_card, username='*')
for i, a in enumerate(atlas):
atlas_dict['Atlas Name'].append(a.name)
atlas_dict['No. Compounds'].append(str(len(a.compound_identifications)))
atlas_dict['Last Modified'].append(str(datetime.utcfromtimestamp(a.last_modified)))
grid.df = pd.DataFrame.from_dict(atlas_dict)
grid.width = "100%"
###########################################################################
###
def plot_intensity(cval, fvals, rt_min, rt_max, rt_peak):
global data
for idx, _fs in enumerate(fvals):
# d = data[idx][compound_idx]
d = data[idx][cval]
if len(d['data']['eic']['rt']) > 0:
x = d['data']['eic']['rt']
y = d['data']['eic']['intensity']
plt.plot(x, y, 'k-', ms=1, mew=0, mfc='b', alpha=1.0)
plt.axvline(rt_min, color='b', linewidth=2.0)
plt.axvline(rt_max, color='g', linewidth=2.0)
plt.axvline(rt_peak, color='r', linewidth=2.0)
###########################################################################
##
def plot_button_clicked(sender):
global data, rtmin_widget, rtmax_widget, rtpeak_widget
plt.cla()
plt.clf()
plt.close()
# get the pkl file name from the selection box
pkl_fname = wfiles.value
# get data and compound names from pickled file
data = ma_data.get_dill_data(pkl_fname)
file_names = ma_data.get_file_names(data)
(compound_names, compound_objects) = ma_data.get_compound_names(data)
# get the name of the compound as selected from the grid
print(grid2.get_selected_rows())
n = grid2.get_selected_rows()[0]
atlas_compound = grid2.df.loc[n]['Compound']
min_x = list()
max_x = list()
# see if selected atlas compound is in the pickle file
if atlas_compound not in compound_names:
print("Compound not found")
return
compound_idx = compound_names.index(atlas_compound)
for idx, _fs in enumerate(file_names):
# d = data[idx][compound_idx]
d = data[idx][0]
rt_min = d['identification'].rt_references[0].rt_min
rt_max = d['identification'].rt_references[0].rt_max
rt_peak = d['identification'].rt_references[0].rt_peak
if len(d['data']['eic']['rt']) > 0:
x = d['data']['eic']['rt']
y = d['data']['eic']['intensity']
min_x.append(min(x))
max_x.append(max(x))
plt.plot(x, y, 'k-', ms=1, mew=0, mfc='b', alpha=1.0)
plt.axvline(rt_min, color='b', linewidth=2.0)
plt.axvline(rt_max, color='g', linewidth=2.0)
plt.axvline(rt_peak, color='r', linewidth=2.0)
rtmin_widget.close()
rtpeak_widget.close()
rtmax_widget.close()
rtmin_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_min, color='blue')
rtpeak_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_peak, color='red')
rtmax_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_max, color='green')
interact(plot_intensity,
cval=fixed(compound_idx),
fvals=fixed(file_names),
rt_min=rtmin_widget,
rt_peak=rtpeak_widget,
rt_max=rtmax_widget)
###########################################################################
##
def display_atlases():
search_string.width = '100%'
atlases_select.width = '100%'
hbox = widgets.HBox((search_string, search_button))
display(hbox)
search_button.on_click(atlas_grid)
grid.on_msg(my_handler)
display(grid)
display(grid2)
###########################################################################
###
def display_pkl_files_and_plot_data(pkl_path='$HOME',filter_str = '', extension = '*.pkl'):
import subprocess
import fnmatch
# if location == '$HOME':
# pkl_path = os.path.expandvars(location)
# else:
# pkl_path = os.path.expandvars(os.path.join('$HOME', location))
# print pkl_path
# pkl_file_list = subprocess.Popen(["find", pkl_path,"-iname",filter_str, "-iname", extension],
# stdout=subprocess.PIPE).communicate()[0]
#
# pkl_file_list = pkl_file_list.split('\n')
pkl_file_list = []
for root, dirnames, filenames in os.walk(pkl_path):
for filename in fnmatch.filter(filenames, extension):
fname = os.path.join(root, filename)
if filter_str:
if filter_str.lower() in os.path.basename(fname).lower():
pkl_file_list.append(fname)
else:
pkl_file_list.append(fname)
wfiles.options = pkl_file_list
display(widgets.HBox((plot_button, save_atlas_button, save_as_button, save_atlas_as_txt)))
display(wfiles)
wfiles.width = "100%"
plot_button.on_click(plot_button_clicked)
save_atlas_button.on_click(save_atlas)
save_as_button.on_click(save_atlas_as)
###########################################################################
###
def save_atlas(sender):
n = grid.get_selected_rows()
m = grid2.get_selected_rows()
if len(m) == 0 or len(n) == 0:
ok_to_save_atlas = False
else:
ok_to_save_atlas = True
m = m[0]
n = n[0]
if ok_to_save_atlas:
kwargs = dict()
kwargs['atlas'] = grid.df.loc[n]['Atlas']
kwargs['compound'] = grid2.df.loc[m]['Compound']
kwargs['rt_min'] = rtmin_widget.value
kwargs['rt_max'] = rtmax_widget.value
kwargs['rt_peak'] = rtpeak_widget.value
atlas = mod_atlas_compound_RT_values(**kwargs)
metob.store(atlas)
else:
print("cannot save atlas")
###########################################################################
###
def save_atlas_as(sender):
n = grid.get_selected_rows()
m = grid2.get_selected_rows()
if len(m) == 0 or len(n) == 0 or len(save_atlas_as_txt.value) == 0:
ok_to_save_atlas = False
else:
ok_to_save_atlas = True
m = m[0]
n = n[0]
if ok_to_save_atlas:
kwargs = dict()
kwargs['atlas'] = grid.df.loc[n]['Atlas']
kwargs['compound'] = grid2.df.loc[m]['Compound']
kwargs['rt_min'] = rtmin_widget.value
kwargs['rt_max'] = rtmax_widget.value
kwargs['rt_peak'] = rtpeak_widget.value
atlas = mod_atlas_compound_RT_values(**kwargs)
if len(save_atlas_as_txt.value) > 1:
atlas.name = save_atlas_as_txt.value
metob.store(atlas)
else:
print("cannot save atlas")
###########################################################################
###
def my_handler(widget, content, buffers=None):
if content['type'] == 'selection_change':
row = content['rows'][0]
# get the compounds in that atlas and display their content
atlas_name = grid.df['Atlas Name'][row]
atlas = metob.retrieve('Atlas', name=atlas_name, username="*")
compound_vals_dict = dict()
for i in compound_header:
compound_vals_dict[i] = list()
for x in atlas[0].compound_identifications:
if x.compound:
compound_vals_dict['Compound'].append(str(x.compound[0].name))
else:
compound_vals_dict['Compound'].append(str(x.name))
compound_vals_dict['rt_max'].append(str(x.rt_references[0].rt_max))
compound_vals_dict['rt_min'].append(str(x.rt_references[0].rt_min))
compound_vals_dict['rt_peak'].append(str(x.rt_references[0].rt_peak))
compound_vals_dict['rt_units'].append(str(x.rt_references[0].rt_units))
compound_vals_dict['mz'].append(str(x.mz_references[0].mz))
compound_vals_dict['mz_tolerance'].append(str(x.mz_references[0].mz_tolerance))
compound_vals_dict['mz_tolerance_units'].append(str(x.mz_references[0].mz_tolerance_units))
compound_vals_dict['lcms_run'].append(str(x.rt_references[0].lcms_run))
grid2.df = pd.DataFrame.from_dict(compound_vals_dict)
grid2.width = '100%'
| bsd-3-clause |
winklerand/pandas | pandas/tests/indexing/common.py | 1 | 9623 | """ common utilities """
import itertools
from warnings import catch_warnings
import numpy as np
from pandas.compat import lrange
from pandas.core.dtypes.common import is_scalar
from pandas import Series, DataFrame, Panel, date_range, UInt64Index
from pandas.util import testing as tm
from pandas.io.formats.printing import pprint_thing
_verbose = False
def _mklbl(prefix, n):
return ["%s%s" % (prefix, i) for i in range(n)]
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class Base(object):
""" indexing comprehensive base class """
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setup_method(self, method):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
with catch_warnings(record=True):
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
with catch_warnings(record=True):
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
with catch_warnings(record=True):
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
with catch_warnings(record=True):
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
with catch_warnings(record=True):
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
with catch_warnings(record=True):
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
with catch_warnings(record=True):
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def generate_indices(self, f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def get_result(self, obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
with catch_warnings(record=True):
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def get_value(self, f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = self.get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
assert rs == xp
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is None:
continue
def _call(obj=obj):
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
# Panel deprecations
if isinstance(obj, Panel):
with catch_warnings(record=True):
_call()
else:
_call()
| bsd-3-clause |
yonglehou/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
yonglehou/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |