repo_name
stringlengths 6
96
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 762
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Myasuka/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
drericstrong/tinsul | tinsul/core.py | 1 | 6979 | # -*- coding: utf-8 -*-
"""
tinsul.core
~~~~~~~~~~
A module to simulate condition-monitoring data for Transformer INSULation
prognostics models.
:copyright: (c) 2017 by Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
import numpy as np
import pandas as pd
from numba import jit
from numpy.random import randn
@jit
# Transformer INSULation SIMulator
def tinsul_sim(temps, start_month=1.0, dp_initial=1000, fail_loc=175.0,
fail_scale=10.0, o=1.0, t0=35.0, tc=30.0, n=1.0, n0=0.8, nc=1.0,
l=1.0, a=(3.7*10**7)):
"""
tinsul_sim is a function to simulate condition-monitoring data for
Transformer INSULation prognostics models.
:param temps: 12x3 array, containing monthly low, average, and high temps
supplying "default" will use the temperatures for DC
:param start_month: first week of January=1, second week=1.25, etc.
:param dp_initial: starting degree of polymerization (DP) of the paper
:param fail_loc: "location" parameter of the logistic failure distribution
:param fail_scale: "scale" parameter of the logistic failure distribution
:param o: overload_ratio, typically between 0.75 and 1.1 [Montsinger]
if a list is supplied, simulation will iterate through the list
as if it were a repeating pattern
:param t0: temperature rise of oil over ambient [Montsinger]
:param tc: temperature rise of the windings over the oil [Montsinger]
:param n: ratio of copper to iron loss at rated load [Montsinger]
:param n0: 0.8 for self-cooled transformers, 0.5 water-cooled [Montsinger]
:param nc: 1.0 for vertical windings, 0.8 for horizontal [Montsinger]
:param l: if hot spot is constant, l=1.0 [Montsinger]
:param a: a constant based on the type of paper insulation [Emsley]
:return: A DataFrame of condition indicators per week: co, co2, furan,
furfural, and paper water content (in order)
"""
if type(o) is float:
o_list = [o]
else:
o_list = o
if temps == "default":
temps = _use_default_temps()
# Stores the accumulated condition indicator values in a dict
acc = {}
# Variables to store some "state" values per iteration
cur_month = start_month
cur_dp = dp_initial
cur_week = 0
cur_o = 0
# dp_failed determines the DP at which the paper insulation will
# fail, drawn from a logistic distribution centered at 200
fail_dp = np.random.logistic(loc=fail_loc, scale=fail_scale)
while cur_week < 5000: # transformers don't live longer than 5000 weeks
# Simulate 6 hours at low, 12 hours at avg, and 6 hours at high temps
# The first index is temp, the second index is hours at that temp
ambient_low = [temps[int(cur_month)-1][0], 6]
ambient_avg = [temps[int(cur_month)-1][1], 12]
ambient_high = [temps[int(cur_month)-1][2], 6]
# Update DP based on the heat stresses from the core hot spot
for ambient, time in [ambient_low, ambient_avg, ambient_high]:
chs = _core_hot_spot(ambient, o_list[cur_o], t0, tc, n, n0, nc, l)
cur_dp = _calculate_dp(chs, time, cur_dp, a)
# Calculate the condition indicators based on DP
acc[cur_week] = _oil_contamination(cur_dp)
# Check for transformer failure (less than 150 DP is instant failure)
if (cur_dp <= fail_dp) | (cur_dp < 150):
break
# Add the value (Months/Weeks) to get proportion of month per week
cur_month += 0.230769
cur_week += 1
cur_o += 1
# Rollover to the next year, if necessary
if cur_month >= 13.0:
cur_month = 1
if cur_o > (len(o_list) - 1):
cur_o = 0
# Convert the dict to a pandas DataFrame
df = pd.DataFrame.from_dict(acc, orient='index')
df.columns = ['CO', 'CO2', 'Furan', 'Furfural', 'Water Content']
return df
# Uses the monthly temperatures from Washington, DC
def _use_default_temps():
return [[-2, 1, 6], # January
[-1, 3, 8], # February
[3, 7, 13], # March
[8, 13, 19], # April
[14, 18, 24], # May
[19, 24, 29], # June
[22, 28, 31], # July
[21, 27, 30], # August
[17, 22, 26], # September
[10, 15, 20], # October
[5, 10, 14], # November
[0, 4, 8]] # December
@jit
# Calculates the core hot spot temperature of the transformer
def _core_hot_spot(amb, o=1.0, t0=35.0, tc=30.0, n=1.0, n0=0.8, nc=1.0, l=1.0):
# ---Refer to Montsinger's paper for more information---
# amb is ambient temperature in Celsius
# o is overload ratio in decimal form (e.g. 0.75 not 75%)
# t0 is the temperature rise of oil over ambient
# tc is the temperature rise of the windings over the oil
# n = ratio of copper to iron loss at rated load (1 for simplification)
# n0 = 0.8 for self-cooled transformers, 0.5 for water-cooled
# nc = 1.0 for vertical windings, 0.8 for horizontal windings
# l = hot spot is constant, kva varies with the ambient -> l=1.0
t_chs = t0*((n*o**2+1)**n0) + tc*l*o**(2*nc) + amb
return t_chs
@jit
def _calculate_dp(core_hot_spot, time, dp_initial, a=(3.7*10**7)):
# ---Refer to Emsley's paper for more information---
# core_hot_spot is from the _core_hot_spot function (in Celsius)
# time is measured in hours
# dp_initial is the initial degree of polymerization
# a is a constant based on the type of paper insulation:
# -upgraded paper: 3.7*10**7
# -dry Kraft paper: 1.1*10**8
# -Kraft paper + 1% water: 3.5*10**8
# -Kraft paper + 2% water: 7.8*10**8
# -Kraft paper + 4% water: 3.5*10**9
k = a * np.exp(-(117000 / (8.314 * (core_hot_spot + 273.15))))
dp_final = 1 / ((k * 24 * 7 * time) + (1 / dp_initial))
return dp_final
@jit
def _oil_contamination(dp):
# dp is the degree of polymerization
# ------------------------------------------------------------------------
# This section contains estimates of dissolved gases and other features
# based on regression of empirical data in academic papers
# ---Refer to Pradhan and Ramu's paper for more information---
# CO and CO2 are the TOTAL accumulation of the gas, not the rate
co = (-0.0028*dp + 6.28) + (0.13*randn())
co2 = (-0.0026*dp + 8.08) + (0.66*randn())
# ---Refer to "On the Estimation of Elapsed Life" for more information---
furan = (-0.0038*dp + 7.93) + (0.13*randn())
# ---Refer to "Study on the Aging Characteristics and Bond-Breaking
# Process of Oil - Paper Insulation" for more information---
furfural = (-0.0025*dp + 4.72) + (0.11*randn())
# ---Refer to Emsley's paper for more information---
water_content = (0.5 * np.log(1000 / dp)) / (np.log(2)) + (0.03*randn())
return co, co2, furan, furfural, water_content
| agpl-3.0 |
TimBizeps/BachelorAP | V206_Wärmepumpe/auswertung3.py | 1 | 1126 | import matplotlib as mpl
mpl.use('pgf')
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from uncertainties import ufloat
import uncertainties.unumpy as unp
from uncertainties.unumpy import (nominal_values as noms, std_devs as stds)
mpl.rcParams.update({
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'pgf.texsystem': 'lualatex',
'pgf.preamble': r'\usepackage{unicode-math}\usepackage{siunitx}'
})
v, x, n, o = np.genfromtxt('daten2.txt', unpack=True)
n = n + 273.15
o = o + 273.15
p0 = 1.013
x = x/p0
def R(o, h, i):
return h * o + i
params3, covariance3 = curve_fit(R, 1/o, np.log(x))
errors3 = np.sqrt(np.diag(covariance3))
print('a =', params3[0], '±', errors3[0])
print('b =', params3[1], '±', errors3[1])
x_plot = np.linspace(0.00335, 0.0037)
plt.plot(1/o, np.log(x), 'rx', label="Messwerte")
plt.plot(x_plot, R(x_plot, *params3), 'b-', label='Ausgleichsgerade', linewidth=1)
plt.legend(loc="best")
plt.xlabel(r'$\frac{1}{T} \,\,/\,\, \frac{1}{K}$')
plt.ylabel(r'$ln(\frac{p}{p_0})$')
plt.tight_layout()
plt.savefig("Plot3.pdf")
| gpl-3.0 |
jfinkels/networkx | examples/graph/atlas.py | 4 | 2761 | #!/usr/bin/env python
"""
Atlas of all graphs of 6 nodes or less.
"""
# Author: Aric Hagberg ([email protected])
# Copyright (C) 2004-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.generators.atlas import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
import random
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas = graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U = nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree = [n for n in G if G.degree(n)==0]
for n in zerodegree:
G.remove_node(n)
U = nx.disjoint_union(U, G)
# list of graphs of all connected components
C = nx.connected_component_subgraphs(U)
UU = nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist = [] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G, nlist):
nlist.append(G)
UU = nx.disjoint_union(UU, G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1, G2):
return True
return False
if __name__ == '__main__':
G=atlas6()
print("graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G), nx.number_of_edges(G)))
print(nx.number_connected_components(G), "connected components")
try:
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
except ImportError:
try:
import pydot
from networkx.drawing.nx_pydot import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either "
"PyGraphviz or pydot")
import matplotlib.pyplot as plt
plt.figure(1, figsize=(8, 8))
# layout graphs with positions using graphviz neato
pos = graphviz_layout(G, prog="neato")
# color nodes the same in each connected subgraph
C = nx.connected_component_subgraphs(G)
for g in C:
c = [random.random()] * nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.savefig("atlas.png", dpi=75)
| bsd-3-clause |
IshankGulati/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 46 | 3387 | import numpy as np
from sklearn.utils import testing
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils.testing import assert_equal, assert_array_equal
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have received X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have received X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
def test_kw_arg():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
# Test that rounding is correct
assert_array_equal(F.transform(X),
np.around(X, decimals=3))
def test_kw_arg_update():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args['decimals'] = 1
# Test that rounding is correct
assert_array_equal(F.transform(X), np.around(X, decimals=1))
def test_kw_arg_reset():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args = dict(decimals=1)
# Test that rounding is correct
assert_array_equal(F.transform(X), np.around(X, decimals=1))
def test_inverse_transform():
X = np.array([1, 4, 9, 16]).reshape((2, 2))
# Test that inverse_transform works correctly
F = FunctionTransformer(
func=np.sqrt,
inverse_func=np.around, inv_kw_args=dict(decimals=3),
)
assert_array_equal(
F.inverse_transform(F.transform(X)),
np.around(np.sqrt(X), decimals=3),
)
| bsd-3-clause |
WaveBlocks/libwaveblocks | scripts/plotHarmonic_2D.py | 2 | 2853 | from numpy import *
from numpy.linalg import det
from matplotlib.pyplot import *
from matplotlib import gridspec
import h5py as hdf
import sys
sys.path.insert(0, 'IMPORTANT/WaveBlocksND/src/WaveBlocksND')
from Plot import stemcf
F = hdf.File("harmonic_2D.hdf5", "r")
dt = 0.01
T = 12
time = linspace(0, T, T/dt+1)
f = lambda x: ('%f' % x).rstrip('0').rstrip('.')
def read_data(base, time):
L = [F[base + "@" + f(t)][:] for t in time]
return array(L)
q = read_data("q", time)
p = read_data("p", time)
Q = read_data("Q", time)
P = read_data("P", time)
EPot = read_data("Epot",time)
EKin = read_data("Ekin",time)
Etot = EPot + EKin
figure()
Etot = squeeze(Etot);
semilogy(time, abs(Etot - Etot[0]), 'm-', label='$|E_{kin}(0) - E_{kin}(t)|$')
xlabel('time')
legend(loc="upper left", bbox_to_anchor=(1,1))
grid(True)
fig = figure()
gs = gridspec.GridSpec(2,2)
ax1 = fig.add_subplot(gs[0,0])
ax1.plot(q[:,0,:],q[:,1,:], 'b-', label = '$(q_x,q_y)$')
legend()
grid(True)
ax2 = fig.add_subplot(gs[0,1])
ax2.plot(p[:,0,:],p[:,1,:], 'r-', label= '$(p_x,p_y)$')
legend()
grid(True)
# In[13]:
detQ = det(Q)
detP = det(P)
# In[14]:
fig.add_subplot(gs[1,0])
plot(time, abs(detQ), 'b-', label= '$abs(det(Q))$')
xlabel('time')
legend()
grid(True)
fig.add_subplot(gs[1,1])
plot(time, abs(detP), 'r-', label = '$abs(det(P))$')
xlabel('time')
legend()
grid(True)
# In[15]:
Ekin = read_data("Ekin", time)
Epot = read_data("Epot", time)
Etot = read_data("Etot", time)
# In[16]:
Ekin.shape
# In[17]:
figure()
plot(time, squeeze(Ekin), 'r-', label='$E_{kin}$')
plot(time, squeeze(Epot), 'b-', label='$E_{pot}$' )
plot(time, squeeze(Etot), 'm-', label='$E_{pot}$')
xlabel('time')
legend(loc="upper left", bbox_to_anchor=(1,1))
grid(True)
# In[18]:
C = read_data("coefficients", time)
# In[19]:
C.shape
#~ # In[28]:
K = len(C[0,:,0]);
figure()
subplot(2,3,1)
title('$t=0$')
c = C[0, :,0];
stemcf(arange(K), angle(c), abs(c))
grid(True)
xlim(-1, K + 1)
ylim(-0.1, 1.1)
# In[29]:
subplot(2,3,2)
title('$t=4$')
c = C[400, :,0];
stemcf(arange(K), angle(c), abs(c))
grid(True)
xlim(-1, K + 1)
ylim(-0.1, 1.1)
# In[30]:
subplot(2,3,3)
title('$t=6$')
c = C[600, :,0];
stemcf(arange(K), angle(c), abs(c))
grid(True)
xlim(-1, K + 1)
ylim(-0.1, 1.1)
# In[31]:
subplot(2,3,4)
title('$t=8$')
c = C[800, :,0];
stemcf(arange(K), angle(c), abs(c))
grid(True)
xlim(-1, K + 1)
ylim(-0.1, 1.1)
# In[27]:
subplot(2,3,5)
title('$t=10$')
c = C[1000, :,0];
stemcf(arange(K), angle(c), abs(c))
grid(True)
xlim(-1, K + 1)
ylim(-0.1, 1.1)
subplot(2,3,6)
title('$t=12$')
c = C[1200, :,0];
stemcf(arange(K), angle(c), abs(c))
grid(True)
xlim(-1, K + 1)
ylim(-0.1, 1.1)
show()
| gpl-2.0 |
EnergieID/entsoe-py | tests.py | 1 | 4305 | import unittest
import pandas as pd
from bs4 import BeautifulSoup
from entsoe import EntsoeRawClient, EntsoePandasClient
from entsoe.exceptions import NoMatchingDataError
from settings import *
class EntsoeRawClientTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = EntsoeRawClient(api_key=api_key)
cls.start = pd.Timestamp('20180101', tz='Europe/Brussels')
cls.end = pd.Timestamp('20180107', tz='Europe/Brussels')
cls.country_code = 'BE'
def test_datetime_to_str(self):
start_str = self.client._datetime_to_str(dtm=self.start)
self.assertIsInstance(start_str, str)
self.assertEqual(start_str, '201712312300')
def test_basic_queries(self):
queries = [
self.client.query_day_ahead_prices,
self.client.query_load,
self.client.query_wind_and_solar_forecast,
self.client.query_load_forecast,
self.client.query_generation,
self.client.query_generation_forecast,
self.client.query_installed_generation_capacity,
# this one gives back a zip so disabled for testing right now
#self.client.query_imbalance_prices,
self.client.query_net_position_dayahead
]
for query in queries:
text = query(country_code=self.country_code, start=self.start,
end=self.end)
self.assertIsInstance(text, str)
try:
BeautifulSoup(text, 'html.parser')
except Exception as e:
self.fail(f'Parsing of response failed with exception: {e}')
def query_crossborder_flows(self):
text = self.client.query_crossborder_flows(
country_code_from='BE', country_code_to='NL', start=self.start,
end=self.end)
self.assertIsInstance(text, str)
try:
BeautifulSoup(text, 'html.parser')
except Exception as e:
self.fail(f'Parsing of response failed with exception: {e}')
def test_query_unavailability_of_generation_units(self):
text = self.client.query_unavailability_of_generation_units(
country_code='BE', start=self.start,
end=self.end)
self.assertIsInstance(text, bytes)
def test_query_withdrawn_unavailability_of_generation_units(self):
with self.assertRaises(NoMatchingDataError):
self.client.query_withdrawn_unavailability_of_generation_units(
country_code='BE', start=self.start, end=self.end)
class EntsoePandasClientTest(EntsoeRawClientTest):
@classmethod
def setUpClass(cls):
cls.client = EntsoePandasClient(api_key=api_key)
cls.start = pd.Timestamp('20180101', tz='Europe/Brussels')
cls.end = pd.Timestamp('20180107', tz='Europe/Brussels')
cls.country_code = 'BE'
def test_basic_queries(self):
pass
def test_basic_series(self):
queries = [
self.client.query_day_ahead_prices,
self.client.query_load,
self.client.query_load_forecast,
self.client.query_generation_forecast,
self.client.query_net_position_dayahead
]
for query in queries:
ts = query(country_code=self.country_code, start=self.start,
end=self.end)
self.assertIsInstance(ts, pd.Series)
def query_crossborder_flows(self):
ts = self.client.query_crossborder_flows(
country_code_from='BE', country_code_to='NL', start=self.start,
end=self.end)
self.assertIsInstance(ts, pd.Series)
def test_basic_dataframes(self):
queries = [
self.client.query_wind_and_solar_forecast,
self.client.query_generation,
self.client.query_installed_generation_capacity,
self.client.query_imbalance_prices,
self.client.query_unavailability_of_generation_units
]
for query in queries:
ts = query(country_code=self.country_code, start=self.start,
end=self.end)
self.assertIsInstance(ts, pd.DataFrame)
def test_query_unavailability_of_generation_units(self):
pass
if __name__ == '__main__':
unittest.main() | mit |
masasin/latexipy | docs/conf.py | 1 | 8725 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# latexipy documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import latexipy
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode']
napoleon_use_admonition_for_examples = True
napoleon_use_admonition_for_notes = True
napoleon_use_admonition_for_references = True
napoleon_include_special_with_doc = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'LaTeXiPy'
copyright = u"2017, Jean Nassar"
author = u'Jean Nassar'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = latexipy.__version__
# The full version, including alpha/beta/rc tags.
release = latexipy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'latexipydoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'latexipy.tex',
u'LaTeXiPy Documentation',
u'Jean Nassar', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'latexipy',
u'LaTeXiPy Documentation',
[u'Jean Nassar'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'latexipy',
u'LaTeXiPy Documentation',
u'Jean Nassar',
'latexipy',
'Generate beatiful plots for LaTeX using your existing matplotlib-based code.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Suppress warnings.
suppress_warnings = ['image.nonlocal_uri']
| mit |
lazywei/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
nsalomonis/AltAnalyze | stats_scripts/MutationEnrichment_adj.py | 1 | 10611 | #!/usr/bin/env python
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import numpy as np
import pylab as pl
import os.path
from collections import defaultdict
from sklearn.cluster import KMeans
try:from stats_scripts import statistics
except Exception: import statistics
import random
import UI
import export; reload(export)
import re
from stats_scripts import fishers_exact_test
import traceback
import warnings
import math
import export
def FishersExactTest(r,n,R,N):
z=0.0
"""
N is the total number of genes measured (Ensembl linked from denom) (total number of ) (number of exons evaluated)
R is the total number of genes meeting the criterion (Ensembl linked from input) (number of exonic/intronic regions overlaping with any CLIP peeks)
n is the total number of genes in this specific MAPP (Ensembl denom in MAPP) (number of exonic/intronic regions associated with the SF)
r is the number of genes meeting the criterion in this MAPP (Ensembl input in MAPP) (number of exonic/intronic regions with peeks overlapping with the SF)
With these values, we must create a 2x2 contingency table for a Fisher's Exact Test
that reports:
+---+---+ a is the # of IDs in the term regulated
| a | b | b is the # of IDs in the term not-regulated
+---+---+ c is the # of IDs not-in-term and regulated
| c | d | d is the # of IDs not-in-term and not-regulated
+---+---+
If we know r=20, R=80, n=437 and N=14480
+----+-----+
| 20 | 417 | 437
+----+-----+
| 65 |13978| 14043
+----+-----+
85 14395 14480
"""
if (R-N) == 0: return 0
elif r==0 and n == 0: return 0
else:
try:
#try:
z = (r - n*(R/N))/math.sqrt(n*(R/N)*(1-(R/N))*(1-((n-1)/(N-1))))
#except ZeroDivisionError: print 'r,_n,R,N: ', r,_n,R,N;kill
except Exception: print (r - n*(R/N)), n*(R/N),(1-(R/N)),(1-((n-1)/(N-1))),r,n,N,R;kill
a = r; b = n-r; c=R-r; d=N-R-b
table = [[int(a),int(b)], [int(c),int(d)]]
"""
print a,b; print c,d
import fishers_exact_test; table = [[a,b], [c,d]]
ft = fishers_exact_test.FishersExactTest(table)
print ft.probability_of_table(table); print ft.two_tail_p()
print ft.right_tail_p(); print ft.left_tail_p()
"""
try: ### Scipy version - cuts down rutime by ~1/3rd the time
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning) ### hides import warnings
oddsratio, pvalue = stats.fisher_exact(table)
# print pvalue
return pvalue,z
except Exception:
ft = fishers_exact_test.FishersExactTest(table)
# print ft.two_tail_p()
return ft.two_tail_p(),z
def header_file(fname, delimiter=None,Expand="no"):
head=0
header=[]
newheader=[]
with open(fname, 'rU') as fin:
for line in fin:
#print line
line = line.rstrip(os.linesep)
header=string.split(line,'\t')
if Expand=="yes":
if head==0:
for i in range(1,len(header)):
iq=header[i]
#iq=string.split(header[i],".")[0]
newheader.append(iq)
head=1
else:break
else:
if len(header)<3 or Expand=="no":
if header[0] not in newheader:
newheader.append(header[0])
#print len(newheader)
return newheader
def Enrichment(Inputfile,mutdict,mutfile,Expand,header):
import collections
import mappfinder
X=defaultdict(list)
prev=""
head=0
group=defaultdict(list)
enrichdict=defaultdict(float)
mut=export.findFilename(mutfile)
dire=export.findParentDir(Inputfile)
output_dir = dire+'MutationEnrichment'
export.createExportFolder(output_dir)
exportnam=output_dir+'/Enrichment_Results.txt'
export_enrich=open(exportnam,"w")
exportnam=output_dir+'/Enrichment_tophits.txt'
export_hit=open(exportnam,"w")
export_enrich.write("Mutations"+"\t"+"Cluster"+"\t"+"r"+"\t"+"R"+"\t"+"n"+"\t"+"Sensitivity"+"\t"+"Specificity"+"\t"+"z-score"+"\t"+"Fisher exact test"+"\t"+"adjp value"+"\n")
if Expand=="yes":
header2=header_file(Inputfile,Expand="yes")
for line in open(Inputfile,'rU').xreadlines():
if head >0:
line=line.rstrip('\r\n')
q= string.split(line,'\t')
for i in range(1,len(q)):
if q[i]==str(1):
#group[q[0]].append(header2[i-1])
group[header2[i-1]].append(q[0])
else:
head+=1
continue
else:
for line in open(Inputfile,'rU').xreadlines():
line=line.rstrip('\r\n')
line=string.split(line,'\t')
#for i in range(1,len(line)):
group[line[2]].append(line[0])
total_Scores={}
for kiy in mutdict:
if kiy =="MDP":
print mutdict[kiy]
groupdict={}
remaining=[]
remaining=list(set(header) - set(mutdict[kiy]))
groupdict[1]=mutdict[kiy]
groupdict[2]=remaining
# export_enrich1.write(kiy)
for key2 in group:
r=float(len(list(set(group[key2])))-len(list(set(group[key2]) - set(mutdict[kiy]))))
n=float(len(group[key2]))
R=float(len(set(mutdict[kiy])))
N=float(len(header))
if r==0 or R==1.0:
print kiy,key2,r,n,R,N
pval=float(1)
z=float(0)
null_z = 0.000
zsd = mappfinder.ZScoreData(key2,r,R,z,null_z,n)
zsd.SetP(pval)
else:
try: z = Zscore(r,n,N,R)
except : z = 0.0000
### Calculate a Z-score assuming zero matching entries
try: null_z = Zscore(0,n,N,R)
except Exception: null_z = 0.000
try:
pval = mappfinder.FishersExactTest(r,n,R,N)
zsd = mappfinder.ZScoreData(key2,r,R,z,null_z,n)
zsd.SetP(pval)
except Exception:
pval=1.0
zsd = mappfinder.ZScoreData(key2,r,R,z,null_z,n)
zsd.SetP(pval)
#pass
if kiy in total_Scores:
signature_db = total_Scores[kiy]
signature_db[key2]=zsd ### Necessary format for the permutation function
else:
signature_db={key2:zsd}
total_Scores[kiy] = signature_db
sorted_results=[]
mutlabels={}
for kiy in total_Scores:
signature_db = total_Scores[kiy]
### Updates the adjusted p-value instances
mappfinder.adjustPermuteStats(signature_db)
for signature in signature_db:
zsd = signature_db[signature]
results = [kiy,signature,zsd.Changed(),zsd.Measured(),zsd.InPathway(),str(float(zsd.PercentChanged())/100.0),str(float(float(zsd.Changed())/float(zsd.InPathway()))), zsd.ZScore(), zsd.PermuteP(), zsd.AdjP()] #string.join(zsd.AssociatedIDs(),'|')
sorted_results.append([signature,float(zsd.PermuteP()),results])
sorted_results.sort() ### Sort by p-value
prev=""
for (sig,p,values) in sorted_results:
if sig!=prev:
flag=True
export_hit.write(string.join(values,'\t')+'\n')
if flag:
if (float(values[5])>=0.5 and float(values[6])>=0.5) or float(values[5])>=0.6 :
mutlabels[values[1]]=values[0]
flag=False
export_hit.write(string.join(values,'\t')+'\n')
export_enrich.write(string.join(values,'\t')+'\n')
prev=sig
if len(sorted_results)==0:
export_enrich.write(string.join([splicing_factor,'NONE','NONE','NONE','NONE','NONE','NONE'],'\t')+'\n')
export_enrich.close()
#print mutlabels
return mutlabels
def findsiggenepermut(mutfile):
samplelist=[]
mutdict=defaultdict(list)
head=0
#File with all the sample names
for exp1 in open(mutfile,"rU").xreadlines():
#print exp1
lin=exp1.rstrip('\r\n')
lin=string.split(lin,"\t")
if len(lin)>3:
if head==0:
for i in lin[1:]:
samplelist.append(i)
head=1
continue
else:
for j in range(1,len(lin)):
if lin[j]==str(1):
mutdict[lin[0]].append(samplelist[j-1])
else:
mutdict[lin[2]].append(lin[0])
return mutdict
def Zscore(r,n,N,R):
"""where N is the total number of events measured:
R is the total number of events meeting the criterion:
n is the total number of events in this specific reference gene-set:
r is the number of events meeting the criterion in the examined reference gene-set: """
N=float(N) ### This bring all other values into float space
z = (r - n*(R/N))/math.sqrt(n*(R/N)*(1-(R/N))*(1-((n-1)/(N-1))))
return z
if __name__ == '__main__':
import getopt
mutdict=defaultdict(list)
################ Comand-line arguments ################
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Insufficient command line flags supplied."
sys.exit()
else:
analysisType = []
options, remainder = getopt.getopt(sys.argv[1:],'', ['Inputfile=','Reference=','Expand='])
for opt, arg in options:
if opt == '--Inputfile': Inputfile=arg
elif opt == '--Reference':Reference=arg
elif opt =='--Expand': Expand=arg
else:
print "Warning! Command-line argument: %s not recognized. Exiting..." % opt; sys.exit()
mutfile=Reference
header=header_file(mutfile)
mutdict=findsiggenepermut(mutfile)
Enrichment(Inputfile,mutdict,mutfile,Expand,header)
| apache-2.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/pylab_examples/legend_auto.py | 7 | 2267 | """
This file was written to test matplotlib's autolegend placement
algorithm, but shows lots of different ways to create legends so is
useful as a general examples
Thanks to John Gill and Phil ?? for help at the matplotlib sprint at
pycon 2005 where the auto-legend support was written.
"""
from pylab import *
import sys
rcParams['legend.loc'] = 'best'
N = 100
x = arange(N)
def fig_1():
figure(1)
t = arange(0, 40.0 * pi, 0.1)
l, = plot(t, 100*sin(t), 'r', label='sine')
legend()
def fig_2():
figure(2)
plot(x, 'o', label='x=y')
legend()
def fig_3():
figure(3)
plot(x, -x, 'o', label='x= -y')
legend()
def fig_4():
figure(4)
plot(x, ones(len(x)), 'o', label='y=1')
plot(x, -ones(len(x)), 'o', label='y=-1')
legend()
def fig_5():
figure(5)
n, bins, patches = hist(randn(1000), 40, normed=1)
l, = plot(bins, normpdf(bins, 0.0, 1.0), 'r--', label='fit', linewidth=3)
legend([l, patches[0]], ['fit', 'hist'])
def fig_6():
figure(6)
plot(x, 50-x, 'o', label='y=1')
plot(x, x-50, 'o', label='y=-1')
legend()
def fig_7():
figure(7)
xx = x - (N/2.0)
plot(xx, (xx*xx)-1225, 'bo', label='$y=x^2$')
plot(xx, 25*xx, 'go', label='$y=25x$')
plot(xx, -25*xx, 'mo', label='$y=-25x$')
legend()
def fig_8():
figure(8)
b1 = bar(x, x, color='m')
b2 = bar(x, x[::-1], color='g')
legend([b1[0], b2[0]], ['up', 'down'])
def fig_9():
figure(9)
b1 = bar(x, -x)
b2 = bar(x, -x[::-1], color='r')
legend([b1[0], b2[0]], ['down', 'up'])
def fig_10():
figure(10)
b1 = bar(x, x, bottom=-100, color='m')
b2 = bar(x, x[::-1], bottom=-100, color='g')
b3 = bar(x, -x, bottom=100)
b4 = bar(x, -x[::-1], bottom=100, color='r')
legend([b1[0], b2[0], b3[0], b4[0]], ['bottom right', 'bottom left',
'top left', 'top right'])
if __name__ == '__main__':
nfigs = 10
figures = []
for f in sys.argv[1:]:
try:
figures.append(int(f))
except ValueError:
pass
if len(figures) == 0:
figures = range(1, nfigs+1)
for fig in figures:
fn_name = "fig_%d" % fig
fn = globals()[fn_name]
fn()
show()
| mit |
MJuddBooth/pandas | pandas/tests/test_sorting.py | 1 | 17450 | from collections import defaultdict
from datetime import datetime
from itertools import product
import warnings
import numpy as np
from numpy import nan
import pytest
from pandas.compat import PY2
from pandas import DataFrame, MultiIndex, Series, compat, concat, merge
from pandas.core import common as com
from pandas.core.sorting import (
decons_group_index, get_group_index, is_int64_overflow_possible,
lexsort_indexer, nargsort, safe_sort)
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestSorting(object):
@pytest.mark.slow
def test_int64_overflow(self):
B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500)))
A = np.arange(2500)
df = DataFrame({'A': A,
'B': B,
'C': A,
'D': B,
'E': A,
'F': B,
'G': A,
'H': B,
'values': np.random.randn(2500)})
lg = df.groupby(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
rg = df.groupby(['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A'])
left = lg.sum()['values']
right = rg.sum()['values']
exp_index, _ = left.index.sortlevel()
tm.assert_index_equal(left.index, exp_index)
exp_index, _ = right.index.sortlevel(0)
tm.assert_index_equal(right.index, exp_index)
tups = list(map(tuple, df[['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'
]].values))
tups = com.asarray_tuplesafe(tups)
expected = df.groupby(tups).sum()['values']
for k, v in compat.iteritems(expected):
assert left[k] == right[k[::-1]]
assert left[k] == v
assert len(left) == len(right)
def test_int64_overflow_moar(self):
# GH9096
values = range(55109)
data = DataFrame.from_dict(
{'a': values, 'b': values, 'c': values, 'd': values})
grouped = data.groupby(['a', 'b', 'c', 'd'])
assert len(grouped) == len(values)
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 15, 5))
i = np.random.choice(len(arr), len(arr) * 4)
arr = np.vstack((arr, arr[i])) # add sume duplicate rows
i = np.random.permutation(len(arr))
arr = arr[i] # shuffle rows
df = DataFrame(arr, columns=list('abcde'))
df['jim'], df['joe'] = np.random.randn(2, len(df)) * 10
gr = df.groupby(list('abcde'))
# verify this is testing what it is supposed to test!
assert is_int64_overflow_possible(gr.grouper.shape)
# manually compute groupings
jim, joe = defaultdict(list), defaultdict(list)
for key, a, b in zip(map(tuple, arr), df['jim'], df['joe']):
jim[key].append(a)
joe[key].append(b)
assert len(gr) == len(jim)
mi = MultiIndex.from_tuples(jim.keys(), names=list('abcde'))
def aggr(func):
f = lambda a: np.fromiter(map(func, a), dtype='f8')
arr = np.vstack((f(jim.values()), f(joe.values()))).T
res = DataFrame(arr, columns=['jim', 'joe'], index=mi)
return res.sort_index()
assert_frame_equal(gr.mean(), aggr(np.mean))
assert_frame_equal(gr.median(), aggr(np.median))
def test_lexsort_indexer(self):
keys = [[nan] * 5 + list(range(100)) + [nan] * 5]
# orders=True, na_position='last'
result = lexsort_indexer(keys, orders=True, na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=True, na_position='first'
result = lexsort_indexer(keys, orders=True, na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='last'
result = lexsort_indexer(keys, orders=False, na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='first'
result = lexsort_indexer(keys, orders=False, na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
def test_nargsort(self):
# np.argsort(items) places NaNs last
items = [nan] * 5 + list(range(100)) + [nan] * 5
# np.argsort(items2) may not place NaNs first
items2 = np.array(items, dtype='O')
# mergesort is the most difficult to get right because we want it to be
# stable.
# According to numpy/core/tests/test_multiarray, """The number of
# sorted items must be greater than ~50 to check the actual algorithm
# because quick and merge sort fall over to insertion sort for small
# arrays."""
# mergesort, ascending=True, na_position='last'
result = nargsort(items, kind='mergesort', ascending=True,
na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items, kind='mergesort', ascending=True,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items, kind='mergesort', ascending=False,
na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(items, kind='mergesort', ascending=False,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='last'
result = nargsort(items2, kind='mergesort', ascending=True,
na_position='last')
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items2, kind='mergesort', ascending=True,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items2, kind='mergesort', ascending=False,
na_position='last')
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(items2, kind='mergesort', ascending=False,
na_position='first')
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
class TestMerge(object):
@pytest.mark.slow
def test_int64_overflow_issues(self):
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G1'])
df2 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G2'])
# it works!
result = merge(df1, df2, how='outer')
assert len(result) == 2000
low, high, n = -1 << 10, 1 << 10, 1 << 20
left = DataFrame(np.random.randint(low, high, (n, 7)),
columns=list('ABCDEFG'))
left['left'] = left.sum(axis=1)
# one-2-one match
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
right.columns = right.columns[:-1].tolist() + ['right']
right.index = np.arange(len(right))
right['right'] *= -1
out = merge(left, right, how='outer')
assert len(out) == len(left)
assert_series_equal(out['left'], - out['right'], check_names=False)
result = out.iloc[:, :-2].sum(axis=1)
assert_series_equal(out['left'], result, check_names=False)
assert result.name is None
out.sort_values(out.columns.tolist(), inplace=True)
out.index = np.arange(len(out))
for how in ['left', 'right', 'outer', 'inner']:
assert_frame_equal(out, merge(left, right, how=how, sort=True))
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how='left', sort=False)
assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how='left', sort=False)
assert_frame_equal(right, out[right.columns.tolist()])
# one-2-many/none match
n = 1 << 11
left = DataFrame(np.random.randint(low, high, (n, 7)).astype('int64'),
columns=list('ABCDEFG'))
# confirm that this is checking what it is supposed to check
shape = left.apply(Series.nunique).values
assert is_int64_overflow_possible(shape)
# add duplicates to left frame
left = concat([left, left], ignore_index=True)
right = DataFrame(np.random.randint(low, high, (n // 2, 7))
.astype('int64'),
columns=list('ABCDEFG'))
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
right = concat([right, right, left.iloc[i]], ignore_index=True)
left['left'] = np.random.randn(len(left))
right['right'] = np.random.randn(len(right))
# shuffle left & right frames
i = np.random.permutation(len(left))
left = left.iloc[i].copy()
left.index = np.arange(len(left))
i = np.random.permutation(len(right))
right = right.iloc[i].copy()
right.index = np.arange(len(right))
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list('ABCDEFG')).iterrows():
ldict[idx].append(row['left'])
for idx, row in right.set_index(list('ABCDEFG')).iterrows():
rdict[idx].append(row['right'])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(k + tuple([lv, rv]))
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
vals.append(k + tuple([np.nan, rv]))
def align(df):
df = df.sort_values(df.columns.tolist())
df.index = np.arange(len(df))
return df
def verify_order(df):
kcols = list('ABCDEFG')
assert_frame_equal(df[kcols].copy(),
df[kcols].sort_values(kcols, kind='mergesort'))
out = DataFrame(vals, columns=list('ABCDEFG') + ['left', 'right'])
out = align(out)
jmask = {'left': out['left'].notna(),
'right': out['right'].notna(),
'inner': out['left'].notna() & out['right'].notna(),
'outer': np.ones(len(out), dtype='bool')}
for how in 'left', 'right', 'outer', 'inner':
mask = jmask[how]
frame = align(out[mask].copy())
assert mask.all() ^ mask.any() or how == 'outer'
for sort in [False, True]:
res = merge(left, right, how=how, sort=sort)
if sort:
verify_order(res)
# as in GH9092 dtypes break with outer/right join
assert_frame_equal(frame, align(res),
check_dtype=how not in ('right', 'outer'))
def test_decons():
def testit(label_list, shape):
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
label_list2 = decons_group_index(group_index, shape)
for a, b in zip(label_list, label_list2):
tm.assert_numpy_array_equal(a, b)
shape = (4, 5, 6)
label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64)]
testit(label_list, shape)
shape = (10000, 10000)
label_list = [np.tile(np.arange(10000, dtype=np.int64), 5),
np.tile(np.arange(10000, dtype=np.int64), 5)]
testit(label_list, shape)
class TestSafeSort(object):
def test_basic_sort(self):
values = [3, 1, 2, 0, 4]
result = safe_sort(values)
expected = np.array([0, 1, 2, 3, 4])
tm.assert_numpy_array_equal(result, expected)
values = list("baaacb")
result = safe_sort(values)
expected = np.array(list("aaabbc"), dtype='object')
tm.assert_numpy_array_equal(result, expected)
values = []
result = safe_sort(values)
expected = np.array([])
tm.assert_numpy_array_equal(result, expected)
def test_labels(self):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
labels = [0, 1, 1, 2, 3, 0, -1, 4]
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
# na_sentinel
labels = [0, 1, 1, 2, 3, 0, 99, 4]
result, result_labels = safe_sort(values, labels,
na_sentinel=99)
expected_labels = np.array([3, 1, 1, 2, 0, 3, 99, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
# out of bound indices
labels = [0, 101, 102, 2, 3, 0, 99, 4]
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
labels = []
result, result_labels = safe_sort(values, labels)
expected_labels = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
def test_mixed_integer(self):
values = np.array(['b', 1, 0, 'a', 0, 'b'], dtype=object)
result = safe_sort(values)
expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
values = np.array(['b', 1, 0, 'a'], dtype=object)
labels = [0, 1, 2, 3, 0, -1, 1]
result, result_labels = safe_sort(values, labels)
expected = np.array([0, 1, 'a', 'b'], dtype=object)
expected_labels = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
def test_mixed_integer_from_list(self):
values = ['b', 1, 0, 'a', 0, 'b']
result = safe_sort(values)
expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
msg = (r"'(<|>)' not supported between instances of ('"
r"datetime\.datetime' and 'int'|'int' and 'datetime\.datetime"
r"')|"
r"unorderable types: int\(\) > datetime\.datetime\(\)")
if compat.PY2:
# RuntimeWarning: tp_compare didn't return -1 or -2 for exception
with warnings.catch_warnings():
with pytest.raises(TypeError, match=msg):
safe_sort(arr)
else:
with pytest.raises(TypeError, match=msg):
safe_sort(arr)
def test_exceptions(self):
with pytest.raises(TypeError,
match="Only list-like objects are allowed"):
safe_sort(values=1)
with pytest.raises(TypeError,
match="Only list-like objects or None"):
safe_sort(values=[0, 1, 2], labels=1)
with pytest.raises(ValueError,
match="values should be unique"):
safe_sort(values=[0, 1, 2, 1], labels=[0, 1])
| bsd-3-clause |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/io/json/_table_schema.py | 3 | 10308 | """
Table Schema builders
https://specs.frictionlessdata.io/json-table-schema/
"""
from typing import TYPE_CHECKING, Any, Dict, Optional, cast
import warnings
import pandas._libs.json as json
from pandas._typing import DtypeObj, FrameOrSeries, JSONSerializable
from pandas.core.dtypes.common import (
is_bool_dtype,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_numeric_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas import DataFrame
import pandas.core.common as com
if TYPE_CHECKING:
from pandas.core.indexes.multi import MultiIndex
loads = json.loads
def as_json_table_type(x: DtypeObj) -> str:
"""
Convert a NumPy / pandas type to its corresponding json_table.
Parameters
----------
x : np.dtype or ExtensionDtype
Returns
-------
str
the Table Schema data types
Notes
-----
This table shows the relationship between NumPy / pandas dtypes,
and Table Schema dtypes.
============== =================
Pandas type Table Schema type
============== =================
int64 integer
float64 number
bool boolean
datetime64[ns] datetime
timedelta64[ns] duration
object str
categorical any
=============== =================
"""
if is_integer_dtype(x):
return "integer"
elif is_bool_dtype(x):
return "boolean"
elif is_numeric_dtype(x):
return "number"
elif is_datetime64_dtype(x) or is_datetime64tz_dtype(x) or is_period_dtype(x):
return "datetime"
elif is_timedelta64_dtype(x):
return "duration"
elif is_categorical_dtype(x):
return "any"
elif is_string_dtype(x):
return "string"
else:
return "any"
def set_default_names(data):
"""Sets index names to 'index' for regular, or 'level_x' for Multi"""
if com.all_not_none(*data.index.names):
nms = data.index.names
if len(nms) == 1 and data.index.name == "index":
warnings.warn("Index name of 'index' is not round-trippable")
elif len(nms) > 1 and any(x.startswith("level_") for x in nms):
warnings.warn("Index names beginning with 'level_' are not round-trippable")
return data
data = data.copy()
if data.index.nlevels > 1:
names = [
name if name is not None else f"level_{i}"
for i, name in enumerate(data.index.names)
]
data.index.names = names
else:
data.index.name = data.index.name or "index"
return data
def convert_pandas_type_to_json_field(arr):
dtype = arr.dtype
if arr.name is None:
name = "values"
else:
name = arr.name
field: Dict[str, JSONSerializable] = {
"name": name,
"type": as_json_table_type(dtype),
}
if is_categorical_dtype(dtype):
cats = dtype.categories
ordered = dtype.ordered
field["constraints"] = {"enum": list(cats)}
field["ordered"] = ordered
elif is_period_dtype(dtype):
field["freq"] = dtype.freq.freqstr
elif is_datetime64tz_dtype(dtype):
field["tz"] = dtype.tz.zone
return field
def convert_json_field_to_pandas_type(field):
"""
Converts a JSON field descriptor into its corresponding NumPy / pandas type
Parameters
----------
field
A JSON field descriptor
Returns
-------
dtype
Raises
------
ValueError
If the type of the provided field is unknown or currently unsupported
Examples
--------
>>> convert_json_field_to_pandas_type({'name': 'an_int',
'type': 'integer'})
'int64'
>>> convert_json_field_to_pandas_type({'name': 'a_categorical',
'type': 'any',
'constraints': {'enum': [
'a', 'b', 'c']},
'ordered': True})
'CategoricalDtype(categories=['a', 'b', 'c'], ordered=True)'
>>> convert_json_field_to_pandas_type({'name': 'a_datetime',
'type': 'datetime'})
'datetime64[ns]'
>>> convert_json_field_to_pandas_type({'name': 'a_datetime_with_tz',
'type': 'datetime',
'tz': 'US/Central'})
'datetime64[ns, US/Central]'
"""
typ = field["type"]
if typ == "string":
return "object"
elif typ == "integer":
return "int64"
elif typ == "number":
return "float64"
elif typ == "boolean":
return "bool"
elif typ == "duration":
return "timedelta64"
elif typ == "datetime":
if field.get("tz"):
return f"datetime64[ns, {field['tz']}]"
else:
return "datetime64[ns]"
elif typ == "any":
if "constraints" in field and "ordered" in field:
return CategoricalDtype(
categories=field["constraints"]["enum"], ordered=field["ordered"]
)
else:
return "object"
raise ValueError(f"Unsupported or invalid field type: {typ}")
def build_table_schema(
data: FrameOrSeries,
index: bool = True,
primary_key: Optional[bool] = None,
version: bool = True,
) -> Dict[str, JSONSerializable]:
"""
Create a Table schema from ``data``.
Parameters
----------
data : Series, DataFrame
index : bool, default True
Whether to include ``data.index`` in the schema.
primary_key : bool or None, default True
Column names to designate as the primary key.
The default `None` will set `'primaryKey'` to the index
level or levels if the index is unique.
version : bool, default True
Whether to include a field `pandas_version` with the version
of pandas that generated the schema.
Returns
-------
schema : dict
Notes
-----
See `Table Schema
<https://pandas.pydata.org/docs/user_guide/io.html#table-schema>`__ for
conversion types.
Timedeltas as converted to ISO8601 duration format with
9 decimal places after the seconds field for nanosecond precision.
Categoricals are converted to the `any` dtype, and use the `enum` field
constraint to list the allowed values. The `ordered` attribute is included
in an `ordered` field.
Examples
--------
>>> df = pd.DataFrame(
... {'A': [1, 2, 3],
... 'B': ['a', 'b', 'c'],
... 'C': pd.date_range('2016-01-01', freq='d', periods=3),
... }, index=pd.Index(range(3), name='idx'))
>>> build_table_schema(df)
{'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'}],
'pandas_version': '0.20.0',
'primaryKey': ['idx']}
"""
if index is True:
data = set_default_names(data)
schema: Dict[str, Any] = {}
fields = []
if index:
if data.index.nlevels > 1:
data.index = cast("MultiIndex", data.index)
for level, name in zip(data.index.levels, data.index.names):
new_field = convert_pandas_type_to_json_field(level)
new_field["name"] = name
fields.append(new_field)
else:
fields.append(convert_pandas_type_to_json_field(data.index))
if data.ndim > 1:
for column, s in data.items():
fields.append(convert_pandas_type_to_json_field(s))
else:
fields.append(convert_pandas_type_to_json_field(data))
schema["fields"] = fields
if index and data.index.is_unique and primary_key is None:
if data.index.nlevels == 1:
schema["primaryKey"] = [data.index.name]
else:
schema["primaryKey"] = data.index.names
elif primary_key is not None:
schema["primaryKey"] = primary_key
if version:
schema["pandas_version"] = "0.20.0"
return schema
def parse_table_schema(json, precise_float):
"""
Builds a DataFrame from a given schema
Parameters
----------
json :
A JSON table schema
precise_float : boolean
Flag controlling precision when decoding string to double values, as
dictated by ``read_json``
Returns
-------
df : DataFrame
Raises
------
NotImplementedError
If the JSON table schema contains either timezone or timedelta data
Notes
-----
Because :func:`DataFrame.to_json` uses the string 'index' to denote a
name-less :class:`Index`, this function sets the name of the returned
:class:`DataFrame` to ``None`` when said string is encountered with a
normal :class:`Index`. For a :class:`MultiIndex`, the same limitation
applies to any strings beginning with 'level_'. Therefore, an
:class:`Index` name of 'index' and :class:`MultiIndex` names starting
with 'level_' are not supported.
See Also
--------
build_table_schema : Inverse function.
pandas.read_json
"""
table = loads(json, precise_float=precise_float)
col_order = [field["name"] for field in table["schema"]["fields"]]
df = DataFrame(table["data"], columns=col_order)[col_order]
dtypes = {
field["name"]: convert_json_field_to_pandas_type(field)
for field in table["schema"]["fields"]
}
# No ISO constructor for Timedelta as of yet, so need to raise
if "timedelta64" in dtypes.values():
raise NotImplementedError(
'table="orient" can not yet read ISO-formatted Timedelta data'
)
df = df.astype(dtypes)
if "primaryKey" in table["schema"]:
df = df.set_index(table["schema"]["primaryKey"])
if len(df.index.names) == 1:
if df.index.name == "index":
df.index.name = None
else:
df.index.names = [
None if x.startswith("level_") else x for x in df.index.names
]
return df
| gpl-2.0 |
Achuth17/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 16 | 12745 | import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
| bsd-3-clause |
chenhh/PySPPortfolio | PySPPortfolio/pysp_portfolio/utils.py | 1 | 2140 | # -*- coding: utf-8 -*-
"""
Authors: Hung-Hsin Chen <[email protected]>
License: GPL v2
"""
import numpy as np
import pandas as pd
def sharpe(series):
"""
Sharpe ratio
note: the numpy std() function is the population estimator
Parameters:
---------------
series: list or numpy.array, ROI series
"""
s = np.asarray(series)
try:
val = s.mean() / s.std()
except FloatingPointError:
# set 0 when standard deviation is zero
val = 0
return val
def sortino_full(series, mar=0):
"""
Sortino ratio, using all periods of the series
Parameters:
---------------
series: list or numpy.array, ROI series
mar: float, minimum acceptable return, usually set to 0
"""
s = np.asarray(series)
mean = s.mean()
semi_std = np.sqrt(((s * ((s - mar) < 0)) ** 2).mean())
try:
val = mean / semi_std
except FloatingPointError:
# set 0 when semi-standard deviation is zero
val = 0
return val, semi_std
def sortino_partial(series, mar=0):
"""
Sortino ratio, using only negative roi periods of the series
Parameters:
---------------
series: list or numpy.array, ROI series
mar: float, minimum acceptable return, usually set to 0
"""
s = np.asarray(series)
mean = s.mean()
n_neg_period = ((s - mar) < 0).sum()
try:
semi_std = np.sqrt(((s * ((s - mar) < 0)) ** 2).sum() / n_neg_period)
val = mean / semi_std
except FloatingPointError:
# set 0 when semi-standard deviation or negative period is zero
val, semi_std = 0, 0
return val, semi_std
def maximum_drawdown(series):
"""
https://en.wikipedia.org/wiki/Drawdown_(economics)
the peak may be zero
e.g.
s= [0, -0.4, -0.2, 0.2]
peak = [0, 0, 0, 0.2]
therefore we don't provide relative percentage of mdd
Parameters:
---------------
series: list or numpy.array, ROI series
"""
s = np.asarray(series)
peak = pd.expanding_max(s)
# absolute drawdown
ad = np.maximum(peak - s, 0)
mad = np.max(ad)
return mad
| gpl-3.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/numpy/lib/twodim_base.py | 1 | 27642 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
import functools
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def _flip_dispatcher(m):
return (m,)
@array_function_dispatch(_flip_dispatcher)
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.fliplr(A)
array([[0., 0., 1.],
[0., 2., 0.],
[3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
@array_function_dispatch(_flip_dispatcher)
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.flipud(A)
array([[0., 0., 3.],
[0., 2., 0.],
[1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
@set_module('numpy')
def eye(N, M=None, k=0, dtype=float, order='C'):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def _diag_dispatcher(v, k=None):
return (v,)
@array_function_dispatch(_diag_dispatcher)
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
@array_function_dispatch(_diag_dispatcher)
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
@set_module('numpy')
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def _trilu_dispatcher(m, k=None):
return (m,)
@array_function_dispatch(_trilu_dispatcher)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
@array_function_dispatch(_trilu_dispatcher)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
def _vander_dispatcher(x, N=None, increasing=None):
return (x,)
# Originally borrowed from John Hunter and matplotlib
@array_function_dispatch(_vander_dispatcher)
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043 # may vary
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None,
weights=None, density=None):
yield x
yield y
# This terrible logic is adapted from the checks in histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N == 2:
yield from bins # bins=[x, y]
else:
yield bins
yield weights
@array_function_dispatch(_histogram2d_dispatcher)
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_area``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> from matplotlib.image import NonUniformImage
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
<matplotlib.image.AxesImage object at 0x...>
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
<matplotlib.collections.QuadMesh object at 0x...>
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights, density)
return hist, edges[0], edges[1]
@set_module('numpy')
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return nonzero(a != 0)
@set_module('numpy')
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, ..., 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return nonzero(tri(n, m, k=k, dtype=bool))
def _trilu_indices_form_dispatcher(arr, k=None):
return (arr,)
@array_function_dispatch(_trilu_indices_form_dispatcher)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
@set_module('numpy')
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, ..., 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return nonzero(~tri(n, m, k=k-1, dtype=bool))
@array_function_dispatch(_trilu_indices_form_dispatcher)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| apache-2.0 |
nick-monto/SpeechRecog_CNN | model_keras.py | 1 | 5924 | import os
import fnmatch
import pandas as pd
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.callbacks import ModelCheckpoint
img_height, img_width = 120, 200
num_epochs = 10
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result[0]
def load_image(path):
img = Image.open(path).convert('L') # read in as grayscale
img = img.resize((img_width, img_height))
img.load() # loads the image into memory
img_data = np.asarray(img, dtype="float")
return img_data
stim_train = pd.read_table('img_set.txt',
delim_whitespace=True,
names=['stimulus', 'language'])
stim = stim_train['stimulus']
labels = pd.get_dummies(stim_train['language'])
# generate a train and validate set
X_train, X_val, y_train, y_val = train_test_split(stim,
labels,
test_size=0.2)
labels_train = y_train.values
labels_val = y_val.values
training_data_dir = 'Input_spectrogram/Training' # directory for training data
# test_data_dir = 'Input_spectrogram/Test' # directory for test data
print("Preparing the input and labels...")
specs_train_input = []
for i in range(len(X_train)):
specs_train_input.append(load_image(find(X_train.iloc[i],
training_data_dir)))
specs_train_input = np.asarray(specs_train_input)
specs_train_input = specs_train_input.reshape((len(X_train),
img_height, img_width, 1))
print('There are a total of {} training stimuli!'.format(specs_train_input.shape[0]))
specs_val_input = []
for i in range(len(X_val)):
specs_val_input.append(load_image(find(X_val.iloc[i],
training_data_dir)))
specs_val_input = np.asarray(specs_val_input)
specs_val_input = specs_val_input.reshape((len(X_val),
img_height, img_width, 1))
print('There are a total of {} validation stimuli!'.format(specs_val_input.shape[0]))
print("Done!")
# set of augments that will be applied to the training data
datagen = ImageDataGenerator(rescale=1./255)
checkpoint = ModelCheckpoint('./weights.best.hdf5', monitor='val_acc',
verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
# # set up checkpoints for weights
# filepath="weights-improvement-{epoch:02d}-{accuracy:.2f}.hdf5"
# checkpoint = ModelCheckpoint(filepath,
# monitor='accuracy',
# verbose=1,
# save_best_only=True,
# mode='max')
# callbacks_list = [checkpoint]
# Define the model: 4 convolutional layers, 4 max pools
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=(img_height, img_width, 1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # converts 3D feature mapes to 1D feature vectors
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5)) # reset half of the weights to zero
model.add(Dense(8))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# compute quantities required for featurewise normalization
datagen.fit(specs_train_input)
datagen.fit(specs_val_input)
print("Initializing the model...")
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(specs_train_input,
labels_train,
batch_size=8),
steps_per_epoch=len(X_train) / 8,
epochs=num_epochs,
verbose=1,
callbacks=callbacks_list,
validation_data=datagen.flow(specs_val_input,
labels_val,
batch_size=8),
validation_steps=len(X_val) / 8)
# # this generator will read pictures found in a sub folder
# # it will indefinitely generate batches of augmented image data
# train_generator = train_datagen.flow_from_directory(
# training_data_dir,
# target_size=(img_width, img_height),
# batch_size=32,
# class_mode='categorical') # need categorical labels
#
# validation_generator = test_datagen.flow_from_directory(
# test_data_dir,
# target_size=(img_width, img_height),
# batch_size=32,
# class_mode='categorical')
# model.fit_generator(
# train_generator,
# samples_per_epoch=num_train_samples,
# nb_epoch=num_epoch,
# validation_data=validation_generator,
# nb_val_samples=num_val_samples,
# verbose=1,
# callbacks=callbacks_list
# )
# model.save_weights("model_trainingWeights_final.h5")
# print("Saved model weights to disk")
#
# model.predict_generator(
# test_generator,
# val_samples=nb_test_samples)
| mit |
pnedunuri/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
amninder/crypto | fabfile/crypto/plots.py | 1 | 2689 | # -*- coding: utf-8 -*-
import RSA
import miller_rabin
from gmpy2 import *
import gmpy2
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import time
from random import randint
param = 1
previous = 0
def plotGraphMiller():
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
plt.suptitle("Bytes VS Time")
plt.xlabel('Bytes')
plt.ylabel('NanoSeconds')
xar = []
yar = []
def animate(i):
global param
global previous
n = plotMillerTime(param)
if n[1]>previous:
previous = n[1]
xar.append(n[1]/8)
yar.append(n[0])
print ("Time: %d"%n[0])
print ("Bytes: %d"%int(n[1]/8))
ax1.clear()
plt.xlabel('Bytes')
plt.ylabel('NanoSeconds')
ax1.plot(xar, yar)
param += 1
ani = animation.FuncAnimation(fig, animate)
plt.show()
def plotGraphMillerDigits():
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
plt.suptitle("Digits VS Time")
plt.xlabel('Digits')
plt.ylabel('NanoSeconds')
xar = []
yar = []
def animate(i):
global param
global previous
n = plotMillerTime(param)
if n[1]>previous:
previous = n[1]
xar.append(n[2])
yar.append(n[0])
print ("Time: %d"%n[0])
print ("Digits: %d"%int(n[2]))
ax1.clear()
plt.xlabel('Digits')
plt.ylabel('NanoSeconds')
ax1.plot(xar, yar)
param += 1
ani = animation.FuncAnimation(fig, animate)
plt.show()
def plotPrimeRange():
r = primeRange(1000000)
plt.plot(r[0], r[1])
# plt.xticks(np.arange(min(r[0], max(r[0]), 1.0)))
plt.ylabel('percent of primes')
plt.xlabel('Number Range')
pp = PdfPages('multipage.pdf')
plt.savefig(pp, format='pdf')
pp.close()
# plt.show()
# ______PLOT FUNCTIONS_______
def plotMillerTime(p):
multiVar = 1000000000
mills = int(round(time.time()*multiVar))
n = (gmpy2.xmpz(RSA.getRandom())**gmpy2.xmpz(p))+(gmpy2.xmpz(RSA.getRandom())**gmpy2.xmpz(p)-1)
while not miller_rabin.millerRabin(n, 2):
n = (gmpy2.xmpz(RSA.getRandom())**gmpy2.xmpz(p))+(gmpy2.xmpz(RSA.getRandom())**gmpy2.xmpz(p)-1)
mills = int(round(time.time()*multiVar)) - mills
print mills
return (mills, bit_length(n), totalDigits(n))
def totalDigits(num):
i = 0
while num>0:
i += 1
num /= 10
return i
total_primes = []
nextP = 1
def nextPrime(p):
power = 100
global total_primes
global nextP
while nextP <= p:
nextP = next_prime(nextP)
total_primes.append(nextP)
return len(total_primes)-1
def primeRange(j):
x = 1
ran = []
tot_primes = []
while x <=j:
ran.append(x)
tot_primes.append(float(nextPrime(x))/float(x))
print ("%.10f in %d"%(float(nextPrime(x))/float(x), x))
x += 1
return (ran, tot_primes)
# primeRange(10000)
| mit |
lsst-dm/great3-public | metrics/evaluate.py | 2 | 61024 | # Copyright (c) 2014, the GREAT3 executive committee (http://www.great3challenge.info/?q=contacts)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""@file evaluate.py
Module containing functions for the evaluation of the constant and variable shear-type branches
of the GREAT3 Challenge.
For the functions used to directly evaluate the GREAT3 metrics, see q_constant() and q_variable().
This code requires the GREAT3 metric evaluation truth data to have been unpacked into the directory
specified by the path in TRUTH_DIR, set below. Please update TRUTH_DIR to match the location of the
truth data on your system.
For information about getting this data, please see the following page on the great3-public wiki:
https://github.com/barnabytprowe/great3-public/wiki/Metric-evaluation-scripts-and-truth-data
Constant shear branches
-----------------------
Each submission file (one per branch) should take the format of a 3-column ASCII catalog, e.g.:
# SUBFIELD_INDEX G1 G2
0 -.26664 0.11230
1 -.13004 0.48103
...
or similar. The hashed header/comment can be omitted, and almost all formats for the numbers are
fine. The main criterion to be satisfied is that after
>>> data = np.loadtxt(submission_file)
the `data` object must be a NumPy array with shape `(NSUBFIELDS, 3)`, where `NSUBFIELDS` is the
total number of subfields in the branch (currently fixed at 200).
In addition, the array slice `data[:, 0]` must be the subfield number in ascending order from `0` to
`NSUBFIELDS - 1`. The array slices `data[:, 1]` and `data[:, 2]` must be the corresponding
estimates of mean shear g1 and g2 in each subfield.
In these details the submission should match the output of the helper script `presubmission.py`
available at https://github.com/barnabytprowe/great3-public .
Variable shear branches
-----------------------
Each submission file (one per branch) should take the format of an ASCII catalog with a minimum of
3 columns as in the following example:
# FIELD_INDEX THETA [degrees] MAP_E
0 0.0246 2.5650e-06
0 0.0372 4.1300e-06
...
The `FIELD_INDEX` will be an integer between 0 and 9. `THETA` should be a sequence of floats giving
the annular bin centres in degrees; these are logarithmically spaced between the minimum separation
considered (0.01 degrees) and the maximum (10.0 degrees). `MAP_E` is the E-mode aperture mass
dispersion. `FIELD`, `THETA` (and the thus corresponding `MAP_E` entries) must be ordered as in the
output of `presubmission.py`.
The hashed header/comment can be omitted. Additional columns can be present provided that the
location and order of the three described above Additional columns can be present provided that the
location and order of the three described above. An example of this is the output of
`presubmission.py` for variable shear branches, which also append columns for the B-mode aperture
mass dispersion and a (shot noise only) error estimate.
After
>>> data = np.loadtxt(submission_file)
the `data` object must be a NumPy array with shape `(NFIELDS * NBINS_THETA, n)`, where `NFIELDS` is
the total number of fields in the branch (currently fixed at 10), `NBINS_THETA` is the number of
annular bins in angle used to estimate Map_E in each field (currently fixed at 15), and `n >= 3`.
As mentioned, in these details the submission should match the output of the helper script
`presubmission.py` available at https://github.com/barnabytprowe/great3-public .
"""
import os
import sys
import logging
import numpy as np
try:
import great3sims
import great3sims.mapper
except ImportError:
path, module = os.path.split(__file__)
sys.path.append(os.path.join(path, "..")) # Appends the folder great3-public/ to sys.path
import great3sims
import great3sims.mapper
try:
import g3metrics
except ImportError:
path, module = os.path.split(__file__)
sys.path.append(os.path.join(path, "..", "metrics")) # Appends the great3-private/metrics
# folder to path
import g3metrics
TRUTH_DIR = "/great3/beta/truth" # Root folder in which the truth values are upacked
NFIELDS = 10 # Total number of fields
NSUBFIELDS = 200 # Total number of subfields, not necessarily equal to the number of subfields made
# in mass_produce as that script also generates the deep fields
NSUBFIELDS_PER_FIELD = NSUBFIELDS / NFIELDS
NGALS_PER_SUBFIELD = 10000 # 100x100 galaxies per subfield
CFID = 2.e-4
MFID = 2.e-3
XMAX_GRID_DEG = 10.0 # Maximum image spatial extent in degrees
DX_GRID_DEG = 0.1 # Grid spacing in degrees
THETA_MIN_DEG = 0.02 # Minimum and maximum angular scales for logarithmic bins used to calculate the
THETA_MAX_DEG = 10.0 # aperture mass disp. - MUST match specs given to participants - in degrees
NBINS_THETA = 15 # Number of logarithmic bins theta for the aperture mass dispersion
EXPECTED_THETA = np.array([ # Array of theta values expected in submissions, good to 3 d.p.
0.0246, 0.0372, 0.0563, 0.0853, 0.1290, 0.1953, 0.2955, 0.4472, 0.6768, 1.0242, 1.5499,
2.3455, 3.5495, 5.3716, 8.1289] * NFIELDS)
USEBINS = np.array([ # Which of the theta bins above to actually use in calculating the metric?
False, False, False, True, True, True, True, True, True, True, True,
True, True, False, False] * NFIELDS) # Note the *NFIELDS to match per-field theta layout
STORAGE_DIR = "./metric_calculation_products" # Folder into which to store useful intermediate
# outputs of metric calculations (e.g. rotation files,
# dicts, mapE tables) which need be calculated only
# once
SUBFIELD_DICT_FILE_PREFIX = "subfield_dict_"
GTRUTH_FILE_PREFIX = "gtruth_"
ROTATIONS_FILE_PREFIX = "rotations_"
OFFSETS_FILE_PREFIX = "offsets_"
MAPESHEAR_FILE_PREFIX = "mapEshear_"
MAPEINT_FILE_PREFIX = "mapEint_"
MAPEOBS_FILE_PREFIX = "mapEobs_"
# These constant normalization factors come from a run of ~1000 sims done on 6 Jan 2014, modified on
# 30 Jan 2014 to bring space and ground into agreement at high bias
NORMALIZATION_CONSTANT_SPACE = 1.232
NORMALIZATION_CONSTANT_GROUND = NORMALIZATION_CONSTANT_SPACE
NORMALIZATION_VARIABLE_SPACE = 0.0001837 # Factor comes from tests with
# tabulate_variable_shear_metric_rev1.py on 1000 runs and
# NOISE_SIGMA = 0.10, 6 Jan 2015, with sigma2_min = 4.e-8
NORMALIZATION_VARIABLE_GROUND = NORMALIZATION_VARIABLE_SPACE # Bring space=ground at high bias
# Values of sigma2_min to adopt as the defaults for the Q_c and Q_v metrics, as of 30 Dec 2013.
# These parameters add a damping
SIGMA2_MIN_CONSTANT_GROUND = 4. # 2**2
SIGMA2_MIN_CONSTANT_SPACE = 1. # 1**2
SIGMA2_MIN_VARIABLE_GROUND = 9.e-8 # [2 * 1.e-3]**2
SIGMA2_MIN_VARIABLE_SPACE = 4.e-8 # [3 * 1.e-3]**2
def get_generate_const_truth(experiment, obs_type, truth_dir=TRUTH_DIR, storage_dir=STORAGE_DIR,
logger=None):
"""Get or generate arrays of subfield_index, g1true, g2true, each of length `NSUBFIELDS`.
If the gtruth file has already been built for this constant shear branch, loads and returns the
saved copies.
If the array of truth values has not been built, or is older than the first entry in the set of
shear_params files, the arrays are built first, saved to file, then returned.
@param experiment Experiment for this branch, one of 'control', 'real_galaxy',
'variable_psf', 'multiepoch', 'full'
@param obs_type Observation type for this branch, one of 'ground' or 'space'
@param storage_dir Directory from/into which to load/store rotation files
@param truth_dir Root directory in which the truth information for the challenge is stored
@param logger Python logging.Logger instance, for message logging
@return subfield_index, g1true, g2true
"""
gtruefile = os.path.join(storage_dir, GTRUTH_FILE_PREFIX+experiment[0]+obs_type[0]+"c.asc")
mapper = great3sims.mapper.Mapper(truth_dir, experiment, obs_type, "constant")
use_stored = True
if not os.path.isfile(gtruefile):
use_stored = False
if logger is not None:
logger.info(
"First build of shear truth tables using values from "+
os.path.join(mapper.full_dir, "shear_params-*.yaml"))
else:
# Compare timestamps for the gtruefile and the first shear_params file
# (subfield = 000) for this branch. If the former is older than the latter, or this file,
# force rebuild...
gtruemtime = os.path.getmtime(gtruefile)
shear_params_file = os.path.join(mapper.full_dir, "shear_params-000.yaml")
shear_params_mtime = os.path.getmtime(shear_params_file)
if gtruemtime < shear_params_mtime or gtruemtime < os.path.getmtime(__file__):
use_stored = False
if logger is not None:
logger.info(
"Updating out-of-date shear truth tables using newer values from "+
os.path.join(mapper.full_dir, "shear_params-*.yaml"))
# Then load or build (and save) the array of truth values per subfield
if use_stored:
if logger is not None:
logger.info("Loading shear truth tables from "+gtruefile)
gtruedata = np.loadtxt(gtruefile)
else:
params_prefix = os.path.join(mapper.full_dir, "shear_params-")
import yaml
# Check to see if this is a variable_psf or full branch, in which case we only need the
# first entry from each set of subfields
if experiment in ("variable_psf", "full"):
gtruedata = np.empty((NFIELDS, 3))
gtruedata[:, 0] = np.arange(NFIELDS)
subfield_index_targets = range(0, NSUBFIELDS, NSUBFIELDS_PER_FIELD)
else:
gtruedata = np.empty((NSUBFIELDS, 3))
gtruedata[:, 0] = np.arange(NSUBFIELDS)
subfield_index_targets = range(NSUBFIELDS)
# Then loop over the required subfields reading in the shears
for i, subfield_index in enumerate(subfield_index_targets):
params_file = params_prefix+("%03d" % subfield_index)+".yaml"
with open(params_file, "rb") as funit:
gdict = yaml.load(funit)
gtruedata[i, 1] = gdict["g1"]
gtruedata[i, 2] = gdict["g2"]
if logger is not None:
logger.info("Saving shear truth table to "+gtruefile)
if not os.path.isdir(storage_dir):
os.mkdir(storage_dir)
with open(gtruefile, "wb") as fout:
fout.write("# True shears for "+experiment+"-"+obs_type+"-constant\n")
fout.write("# subfield_index g1true g2true\n")
np.savetxt(fout, gtruedata, fmt=" %4d %+.18e %+.18e")
return (gtruedata[:, 0]).astype(int), gtruedata[:, 1], gtruedata[:, 2]
def get_generate_const_rotations(experiment, obs_type, storage_dir=STORAGE_DIR, truth_dir=TRUTH_DIR,
logger=None):
"""Get or generate an array of rotation angles for Q_const calculation.
If the rotation file has already been built for this constant shear branch, loads and returns an
array of rotation angles to align with the PSF. This array is of shape `(NSUBFIELDS,)`, having
averaged over the `n_epochs` epochs in the case of multi-epoch branches.
If the rotation file has not been built, or is older than the first entry in the set of
starshape_parameters files, the array of rotations is built, saved to file, then returned.
@param experiment Experiment for this branch, one of 'control', 'real_galaxy',
'variable_psf', 'multiepoch', 'full'
@param obs_type Observation type for this branch, one of 'ground' or 'space'
@param storage_dir Directory from/into which to load/store rotation files
@param truth_dir Root directory in which the truth information for the challenge is stored
@param logger Python logging.Logger instance, for message logging
@return rotations An array containing all the rotation angles, in radians
"""
import great3sims
rotfile = os.path.join(storage_dir, ROTATIONS_FILE_PREFIX+experiment[0]+obs_type[0]+"c.asc")
mapper = great3sims.mapper.Mapper(truth_dir, experiment, obs_type, "constant")
use_stored = True
if not os.path.isfile(rotfile):
use_stored = False
if logger is not None:
logger.info(
"First build of rotations file using starshape_parameters from "+
mapper.full_dir)
else:
# Then compare timestamps for the rotation file and the first starshape_parameters file
# (subfield = 000, epoch =0) for this branch. If the former is older than the latter, or
# this file, force rebuild...
rotmtime = os.path.getmtime(rotfile)
starshape_file_template, _ ,_ = mapper.mappings['starshape_parameters']
starshape_file = os.path.join(
mapper.full_dir, starshape_file_template % {"epoch_index": 0, "subfield_index": 0})
starshapemtime = os.path.getmtime(starshape_file+".yaml")
if rotmtime < starshapemtime or rotmtime < os.path.getmtime(__file__):
use_stored = False
if logger is not None:
logger.info(
"Updating out-of-date rotations file using newer starshape_parameters from "+
mapper.full_dir)
# Then load / build as required
if use_stored is True:
if logger is not None:
logger.info("Loading rotations from "+rotfile)
rotations = np.loadtxt(rotfile)[:, 1] # First column is just subfield indices
else:
# To build we must loop over all the subfields and epochs
# First work out if the experiment is multi-exposure and has multiple epochs
if experiment in ("multiepoch", "full"):
import great3sims.constants
n_epochs = great3sims.constants.n_epochs
else:
n_epochs = 1
# Setup the array for storing the PSF values from which rotations are calculated
psf_g1 = np.empty((NSUBFIELDS, n_epochs))
psf_g2 = np.empty((NSUBFIELDS, n_epochs))
mean_psf_g1 = np.empty(NSUBFIELDS)
mean_psf_g2 = np.empty(NSUBFIELDS)
for subfield_index in range(NSUBFIELDS):
n_ignore = 0 # Counter for how many epochs had flagged, bad PSF g1/g2 values
for epoch_index in range(n_epochs):
starshape_parameters = mapper.read(
"starshape_parameters",
data_id={"epoch_index": epoch_index, "subfield_index": subfield_index})
star_g1 = starshape_parameters["psf_g1"]
star_g2 = starshape_parameters["psf_g2"]
# Test for flagged failures (these do happen rarely and are given the value
# psf_g1=psf_g2=-10.0, see writeStarParameters in great3sims/builders.py)
# If the psf ellipticities are failed, we just ignore these for the (m, c) calcs
if star_g1 > -9.9 and star_g2 > -9.9:
psf_g1[subfield_index, epoch_index] = star_g1
psf_g2[subfield_index, epoch_index] = star_g2
else:
n_ignore += 1
psf_g1[subfield_index, epoch_index] = 0.
psf_g2[subfield_index, epoch_index] = 0.
# Calculate the mean across the epochs in this subfield taking any flagged values into
# account
n_eff = n_epochs - n_ignore
if n_eff > 0:
mean_psf_g1[subfield_index] = (psf_g1[subfield_index, :]).sum() / float(n_eff)
mean_psf_g2[subfield_index] = (psf_g2[subfield_index, :]).sum() / float(n_eff)
else:
mean_psf_g1[subfield_index] = 0. # This is safe in np.arctan2() -> 0.
mean_psf_g2[subfield_index] = 0.
if experiment in ("variable_psf", "full"):
# Average over all subfields per field
final_psf_g1 = np.empty(NFIELDS)
final_psf_g2 = np.empty(NFIELDS)
for i in range(NFIELDS):
final_psf_g1[i] = np.mean(
mean_psf_g1[i * NSUBFIELDS_PER_FIELD: (i + 1) * NSUBFIELDS_PER_FIELD])
final_psf_g2[i] = np.mean(
mean_psf_g2[i * NSUBFIELDS_PER_FIELD: (i + 1) * NSUBFIELDS_PER_FIELD])
else:
final_psf_g1 = mean_psf_g1
final_psf_g2 = mean_psf_g2
rotations = .5 * np.arctan2(final_psf_g2, final_psf_g1)
# We have built rotations, but then save this file as ascii for use next time
if logger is not None:
logger.info("Saving rotations to "+rotfile)
if not os.path.isdir(storage_dir):
os.mkdir(storage_dir)
with open(rotfile, "wb") as fout:
fout.write("# Rotations for "+experiment+"-"+obs_type+"-constant\n")
fout.write("# subfield_index rotation [radians]\n")
np.savetxt(fout, np.array((np.arange(len(rotations)), rotations)).T, fmt=" %4d %+.18f")
return rotations
def get_generate_variable_offsets(experiment, obs_type, storage_dir=STORAGE_DIR,
truth_dir=TRUTH_DIR, logger=None):
"""Get or generate arrays of subfield_index, offset_deg_x, offset_deg_y, each of length
`NSUBFIELDS`.
If the offsets file has already been built for this variable shear branch, loads and returns the
saved arrays.
If the arrays of offset values have not been built, or are older than the first entry in the set
of subfield_offset files, the arrays are built first, saved to file, then returned.
@param experiment Experiment for this branch, one of 'control', 'real_galaxy',
'variable_psf', 'multiepoch', 'full'
@param obs_type Observation type for this branch, one of 'ground' or 'space'
@param storage_dir Directory from/into which to load/store rotation files
@param truth_dir Root directory in which the truth information for the challenge is stored
@param logger Python logging.Logger instance, for message logging
@return subfield_index, offset_deg_x, offset_deg_y
"""
offsetfile = os.path.join(storage_dir, OFFSETS_FILE_PREFIX+experiment[0]+obs_type[0]+"v.asc")
mapper = great3sims.mapper.Mapper(truth_dir, experiment, obs_type, "variable")
use_stored = True
if not os.path.isfile(offsetfile):
use_stored = False
if logger is not None:
logger.info(
"First build of offsets file using subfield_offset files from "+
mapper.full_dir)
else:
# Then compare timestamps for the offsets file and the first file
# (subfield = 000) for this branch. If the former is older than the latter, or
# this file, force rebuild...
offsetmtime = os.path.getmtime(offsetfile)
subfield_offset_file = os.path.join(mapper.full_dir, "subfield_offset-000.yaml")
subfield_offset_mtime = os.path.getmtime(subfield_offset_file)
if offsetmtime < subfield_offset_mtime or offsetmtime < os.path.getmtime(__file__):
use_stored = False
if logger is not None:
logger.info(
"Updating out-of-date offset file using newer values from "+
os.path.join(mapper.full_dir, "subfield_offset-*.yaml"))
# Then load / build as required
if use_stored is True:
if logger is not None:
logger.info("Loading offsets from "+offsetfile)
offsets = np.loadtxt(offsetfile)
else:
offsets_prefix = os.path.join(mapper.full_dir, "subfield_offset-")
offsets = np.empty((NSUBFIELDS, 3))
import yaml
offsets[:, 0] = np.arange(NSUBFIELDS)
for i in range(NSUBFIELDS):
offsets_file = offsets_prefix+("%03d" % i)+".yaml"
with open(offsets_file, "rb") as funit:
offsetdict = yaml.load(funit)
offsets[i, 1] = offsetdict["offset_deg_x"]
offsets[i, 2] = offsetdict["offset_deg_y"]
if logger is not None:
logger.info("Saving offset file to "+offsetfile)
if not os.path.isdir(storage_dir):
os.mkdir(storage_dir)
with open(offsetfile, "wb") as fout:
fout.write("# Subfield offsets for "+experiment+"-"+obs_type+"-variable\n")
fout.write("# subfield_index offset_deg_x offset_deg_y\n")
np.savetxt(fout, offsets, fmt=" %4d %.18e %.18e")
return (offsets[:, 0]).astype(int), offsets[:, 1], offsets[:, 2]
def run_corr2(x, y, e1, e2, w, min_sep=THETA_MIN_DEG, max_sep=THETA_MAX_DEG, nbins=NBINS_THETA,
cat_file_suffix='_temp.fits', params_file_suffix='_corr2.params',
m2_file_suffix='_temp.m2', xy_units='degrees', sep_units='degrees',
corr2_executable='corr2'):
"""Copied from presubmission.py
"""
import pyfits
import subprocess
import tempfile
# Create temporary, unique files for I/O
catfile = tempfile.mktemp(suffix=cat_file_suffix)
paramsfile = tempfile.mktemp(suffix=params_file_suffix)
m2file = tempfile.mktemp(suffix=m2_file_suffix)
# Write the basic corr2.params to temp location
print_basic_corr2_params(paramsfile, min_sep=min_sep, max_sep=max_sep, nbins=nbins,
xy_units=xy_units, sep_units=sep_units, fits_columns=True)
# Use fits binary table for faster I/O. (Converting to/from strings is slow.)
# First, make the data into np arrays
x_array = np.asarray(x).flatten()
y_array = np.asarray(y).flatten()
g1_array = np.asarray(e1).flatten()
g2_array = np.asarray(e2).flatten()
w_array = np.asarray(w).flatten()
# Then, mask out the >= 10 values
use_mask = np.logical_and.reduce([g1_array<10.,g2_array<10.])
# And finally make the FITS file
x_col = pyfits.Column(name='x', format='1D', array=x_array[use_mask])
y_col = pyfits.Column(name='y', format='1D', array=y_array[use_mask])
g1_col = pyfits.Column(name='g1', format='1D', array=g1_array[use_mask])
g2_col = pyfits.Column(name='g2', format='1D', array=g2_array[use_mask])
w_col = pyfits.Column(name='w', format='1D', array=w_array[use_mask])
cols = pyfits.ColDefs([x_col, y_col, g1_col, g2_col, w_col])
table = pyfits.new_table(cols)
phdu = pyfits.PrimaryHDU()
hdus = pyfits.HDUList([phdu, table])
hdus.writeto(catfile, clobber=True)
subprocess.Popen([
corr2_executable, str(paramsfile), 'file_name='+str(catfile), 'm2_file_name='+str(m2file)
]).wait()
results = np.loadtxt(m2file)
os.remove(paramsfile)
os.remove(catfile)
os.remove(m2file)
return results
def print_basic_corr2_params(outfile, min_sep=THETA_MIN_DEG, max_sep=THETA_MAX_DEG,
nbins=NBINS_THETA, xy_units='degrees', sep_units='degrees',
fits_columns=False):
"""Write a bare-bones corr2.params file (used by corr2) to the file named outfile.
"""
with open(outfile, 'wb') as fout:
if fits_columns:
fout.write("# Column description\n")
fout.write("x_col = x\n")
fout.write("y_col = y\n")
fout.write("g1_col = g1\n")
fout.write("g2_col = g2\n")
fout.write("w_col = w\n")
fout.write("\n")
fout.write("# File info\n")
fout.write("file_type=FITS")
else:
fout.write("# Column description\n")
fout.write("x_col = 1\n")
fout.write("y_col = 2\n")
fout.write("g1_col = 3\n")
fout.write("g2_col = 4\n")
fout.write("w_col = 5\n")
fout.write("\n")
fout.write(
"# Assume sign conventions for gamma were correct in the catalog passed to "+
"presubmission.py\n")
fout.write("flip_g1 = false\n")
fout.write("flip_g2 = false\n")
fout.write("\n")
fout.write("# Describe the parameters of the requested correlation function\n")
fout.write('min_sep=%f\n'%min_sep)
fout.write('max_sep=%f\n'%max_sep)
fout.write('nbins=%f\n'%nbins)
fout.write('x_units='+str(xy_units)+'\n')
fout.write('y_units='+str(xy_units)+'\n')
fout.write('sep_units='+str(sep_units)+'\n')
fout.write('\n')
fout.write("# verbose specifies how much progress output the code should emit.\n")
fout.write("verbose = 0\n")
fout.write("\n")
def get_generate_variable_truth(experiment, obs_type, storage_dir=STORAGE_DIR, truth_dir=TRUTH_DIR,
logger=None, corr2_exec="corr2", make_plots=False,
file_prefixes=("galaxy_catalog",), suffixes=("",),
mape_file_prefix=MAPESHEAR_FILE_PREFIX, output_xy_prefix=None):
"""Get or generate an array of truth map_E vectors for all the fields in this branch.
If the map_E truth file has already been built for this variable shear branch, loads and returns
the saved copies.
If the array of truth values has not been built, or is older than the first entry in the set of
galaxy_catalog files, the arrays are built first, saved to file, then returned.
@param experiment Experiment for this branch, one of 'control', 'real_galaxy',
'variable_psf', 'multiepoch', 'full'
@param obs_type Observation type for this branch, one of 'ground' or 'space'
@param storage_dir Directory from/into which to load/store rotation files
@param truth_dir Root directory in which the truth info for the challenge is stored
@param logger Python logging.Logger instance, for message logging
@param corr2_exec Path to Mike Jarvis' corr2 exectuable
@param make_plots Generate plotting output
@param file_prefixes Tuple containing one or more prefixes for file type in which to load
up shears, summing shears when `len(file_prefixes) >= 2`
[default = `("galaxy_catalog",)`]
@param suffixes Load up shear from entries "g1"+suffixes[0] and "g2"+suffixes[0] in the
`file_prefixes[0]`-type files, then add "g1"+suffixes[1] from
`file_prefixes[1]`-type files, etc. Must be same length as
`file_prefixes` tuple [default = `("",)`]
@param mape_file_prefix Prefix for output filename
@param output_xy_prefix Filename prefix (and switch if not None) for x-y position debug output
@return field, theta, map_E, map_B, maperr
"""
# Sanity check on suffixes & prefixes
if len(suffixes) != len(file_prefixes):
raise ValueError("Input file_prefixes and suffixes kwargs must be same length.")
# Build basic x and y grids to use for coord positions: note we do this here rather than as
# needed later so as to check the dimensions (meshgrid is very quick anyway)
xgrid_deg, ygrid_deg = np.meshgrid(
np.arange(0., XMAX_GRID_DEG, DX_GRID_DEG), np.arange(0., XMAX_GRID_DEG, DX_GRID_DEG))
xgrid_deg = xgrid_deg.flatten() # Flatten these - the default C ordering corresponds to the way
ygrid_deg = ygrid_deg.flatten() # the true shears are ordered too, which is handy
if len(xgrid_deg) != NGALS_PER_SUBFIELD:
raise ValueError(
"Dimensions of xgrid_deg and ygrid_deg do not match NGALS_PER_SUBFIELD. Please check "+
"the values of XMAX_GRID_DEG and DX_GRID_DEG in evaluate.py.")
# Define storage file and check for its existence and/or age
mapEtruefile = os.path.join(
storage_dir, mape_file_prefix+experiment[0]+obs_type[0]+"v.asc")
mapper = great3sims.mapper.Mapper(truth_dir, experiment, obs_type, "variable")
use_stored = True
if not os.path.isfile(mapEtruefile):
use_stored = False
if logger is not None:
logger.info(
"First build of map_E truth file using "+str(file_prefixes)+" files from "+
mapper.full_dir)
else:
# Then compare timestamps for the mapE file and the newest file_prefixes[:]-000.fits file
# (subfield = 000) for this branch. If the former is older than the latter, or
# this file, force rebuild...
mapEmtime = os.path.getmtime(mapEtruefile)
catalogmtime = 0 # Set earliest possible T
for prefix in file_prefixes:
catalog_file = os.path.join(mapper.full_dir, prefix+"-000.fits")
tmpmtime = os.path.getmtime(catalog_file)
if tmpmtime > catalogmtime: catalogmtime = tmpmtime
if mapEmtime < catalogmtime or mapEmtime < os.path.getmtime(__file__):
use_stored = False
if logger is not None:
logger.info(
"Updating out-of-date map_E file using newer "+str(file_prefixes)+" files "+
"from "+mapper.full_dir)
# Then load / build as required
if use_stored is True:
if logger is not None:
logger.info("Loading truth map_E from "+mapEtruefile)
data = np.loadtxt(mapEtruefile)
field, theta, map_E, map_B, maperr = (
data[:, 0].astype(int), data[:, 1], data[:, 2], data[:, 3], data[:, 4])
else:
# Define the field array, then theta and map arrays in which we'll store the results
field = np.arange(NBINS_THETA * NFIELDS) / NBINS_THETA
theta = np.empty(NBINS_THETA * NFIELDS)
map_E = np.empty(NBINS_THETA * NFIELDS)
map_B = np.empty(NBINS_THETA * NFIELDS)
maperr = np.empty(NBINS_THETA * NFIELDS)
# Load the offsets
subfield_indices, offset_deg_x, offset_deg_y = get_generate_variable_offsets(
experiment, obs_type, storage_dir=storage_dir, truth_dir=truth_dir, logger=logger)
# Setup some storage arrays into which we'll write
xfield = np.empty((NGALS_PER_SUBFIELD, NSUBFIELDS_PER_FIELD))
yfield = np.empty((NGALS_PER_SUBFIELD, NSUBFIELDS_PER_FIELD))
# Loop over fields
import pyfits
for ifield in range(NFIELDS):
# Read in all the shears in this field and store
g1 = np.zeros((NGALS_PER_SUBFIELD, NSUBFIELDS_PER_FIELD))
g2 = np.zeros((NGALS_PER_SUBFIELD, NSUBFIELDS_PER_FIELD))
for jsub in range(NSUBFIELDS_PER_FIELD):
# Build the x,y grid using the subfield offsets
isubfield_index = jsub + ifield * NSUBFIELDS_PER_FIELD
xfield[:, jsub] = xgrid_deg + offset_deg_x[isubfield_index]
yfield[:, jsub] = ygrid_deg + offset_deg_y[isubfield_index]
# If requested (by setting output_xy_prefix) then write these xy out for diagnostic
if output_xy_prefix is not None:
output_xy_filename = output_xy_prefix+("-sub%03d" % isubfield_index)+".asc"
print "Writing "+output_xy_filename+" as requested..."
with open(output_xy_filename, 'wb') as fout:
fout.write("# x y\n")
np.savetxt(fout, np.array((xfield[:, jsub], yfield[:, jsub])).T)
# Then loop over the supplied file_prefixes and g1/g2 suffixes, summing shears
for prefix, suffix in zip(file_prefixes, suffixes):
galcatfile = os.path.join(
mapper.full_dir, (prefix+"-%03d.fits" % isubfield_index))
truedata = pyfits.getdata(galcatfile)
if len(truedata) != NGALS_PER_SUBFIELD:
raise ValueError(
"Number of records in "+galcatfile+" (="+str(len(truedata))+") is not "+
"equal to NGALS_PER_SUBFIELD (="+str(NGALS_PER_SUBFIELD)+")")
# Use the correct rule for shear addition, best (most safely) evaluated using
# arrays of complex numbers, see Schneider 2006 eq 12
gtoaddc = truedata["g1"+suffix] + truedata["g2"+suffix]*1j
gpriorc = g1[:, jsub] + g2[:, jsub]*1j
gfinalc = (gpriorc + gtoaddc) / (1. + gtoaddc.conj() * gpriorc)
g1[:, jsub] = gfinalc.real
g2[:, jsub] = gfinalc.imag
# If requested (by setting output_xy_prefix) then write these xy out for diagnostic
if output_xy_prefix is not None:
output_xy_filename = output_xy_prefix+("-%03d" % ifield)+".asc"
with open(output_xy_filename, 'wb') as fout:
fout.write("# x y\n")
np.savetxt(fout, np.array((xfield.flatten(), yfield.flatten())).T)
# Having got the x,y and g1, g2 for all the subfields in this field, flatten and use
# to calculate the map_E
map_results = run_corr2(
xfield.flatten(), yfield.flatten(), g1.flatten(), g2.flatten(),
np.ones(NGALS_PER_SUBFIELD * NSUBFIELDS_PER_FIELD), min_sep=THETA_MIN_DEG,
max_sep=THETA_MAX_DEG, nbins=NBINS_THETA, corr2_executable=corr2_exec,
xy_units="degrees", sep_units="degrees")
theta[ifield * NBINS_THETA: (ifield + 1) * NBINS_THETA] = map_results[:, 0]
map_E[ifield * NBINS_THETA: (ifield + 1) * NBINS_THETA] = map_results[:, 1]
map_B[ifield * NBINS_THETA: (ifield + 1) * NBINS_THETA] = map_results[:, 2]
maperr[ifield * NBINS_THETA: (ifield + 1) * NBINS_THETA] = map_results[:, 5]
# Save these in ASCII format
if logger is not None:
logger.info("Saving truth map_E file to "+mapEtruefile)
with open(mapEtruefile, "wb") as fout:
fout.write("# True aperture mass statistics for "+experiment+"-"+obs_type+"-variable\n")
fout.write("# field_index theta [deg] map_E map_B maperr\n")
np.savetxt(
fout, np.array((field, theta, map_E, map_B, maperr)).T,
fmt=" %2d %.18e %.18e %.18e %.18e")
if make_plots and not use_stored: # No point plotting if already built!
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 8))
plt.subplot(211)
for ifield in range(NFIELDS):
plt.semilogx(
theta[ifield * NBINS_THETA: (ifield + 1) * NBINS_THETA],
map_E[ifield * NBINS_THETA: (ifield + 1) * NBINS_THETA], label="Field "+str(ifield))
plt.ylim(-2.e-5, 2.e-5)
plt.title(mapEtruefile+" E-mode")
plt.ylabel("Ap. Mass Dispersion")
plt.axhline(ls="--", color="k")
plt.legend()
plt.subplot(212)
for ifield in range(NFIELDS):
plt.semilogx(
theta[ifield * NBINS_THETA: (ifield + 1) * NBINS_THETA],
map_B[ifield * NBINS_THETA: (ifield + 1) * NBINS_THETA], label="Field "+str(ifield))
plt.ylim(-2.e-5, 2.e-5)
plt.title(mapEtruefile+" B-mode")
plt.xlabel("Theta [degrees]")
plt.ylabel("Ap. Mass Dispersion")
plt.axhline(ls="--", color="k")
plt.legend()
plotfile = mapEtruefile.rstrip("asc")+"png"
if logger is not None:
logger.info("Saving plot output to "+plotfile)
plt.savefig(plotfile)
# Then return
return field, theta, map_E, map_B, maperr
def q_constant(submission_file, experiment, obs_type, storage_dir=STORAGE_DIR, truth_dir=TRUTH_DIR,
logger=None, normalization=None, sigma2_min=None, just_q=False, cfid=CFID, mfid=MFID,
pretty_print=False, flip_g1=False, flip_g2=False, plot=False, ignore_fields=None):
"""Calculate the Q_c for a constant shear branch submission.
@param submission_file File containing the user submission.
@param experiment Experiment for this branch, one of 'control', 'real_galaxy',
'variable_psf', 'multiepoch', 'full'
@param obs_type Observation type for this branch, one of 'ground' or 'space'
@param storage_dir Directory from/into which to load/store rotation files
@param truth_dir Root directory in which the truth information for the challenge is
stored
@param logger Python logging.Logger instance, for message logging
@param normalization Normalization factor for the metric (default `None` uses either
`NORMALIZATION_CONSTANT_GROUND` or `NORMALIZATION_CONSTANT_SPACE`
depending on `obs_type`)
@param sigma2_min Damping term to put into the denominator of QZ1 metric (default `None`
uses either `SIGMA2_MIN_CONSTANT_GROUND` or `SIGMA2_MIN_CONSTANT_SPACE`
depending on `obs_type`)
@param just_q Set `just_q = True` (default is `False`) to only return Q_c rather than
the default behaviour of returning a tuple including best fitting c+,
m+, cx, mx, etc.
@param cfid Fiducial, target c value
@param mfid Fiducial, target m value
@param ignore_fields List or tuple of fields to ignore. If None, use all fields.
@return The metric Q_c, & optionally best fitting c+, m+, cx, mx, sigc+, sigcm+, sigcx, sigmx.
"""
if not os.path.isfile(submission_file):
raise ValueError("Supplied submission_file '"+submission_file+"' does not exist.")
# If the sigma2_min is not changed from None, set using defaults based on obs_type
if sigma2_min is None:
if obs_type == "ground":
sigma2_min = SIGMA2_MIN_CONSTANT_GROUND
elif obs_type == "space":
sigma2_min = SIGMA2_MIN_CONSTANT_SPACE
else:
raise ValueError("Default sigma2_min cannot be set as obs_type not recognised")
# If the normalization is not changed from None, set using defaults based on obs_type
if normalization is None:
if obs_type == "ground":
normalization = NORMALIZATION_CONSTANT_GROUND
elif obs_type == "space":
normalization = NORMALIZATION_CONSTANT_SPACE
else:
raise ValueError("Default sigma2_min cannot be set as obs_type not recognised")
# Load the submission and label the slices we're interested in
if logger is not None:
logger.info("Calculating Q_c metric for "+submission_file)
data = np.loadtxt(submission_file)
subfield = data[:, 0]
g1sub = data[:, 1]
g2sub = data[:, 2]
if flip_g1: g1sub = -g1sub
if flip_g2: g2sub = -g2sub
# Load up the rotations, then rotate g1 & g2 in the correct sense.
# NOTE THE MINUS SIGNS! This is because we need to rotate the coordinates *back* into a frame
# in which the primary direction of the PSF is g1, and the orthogonal is g2
try: # Put this in a try except block to handle funky submissions better
rotations = get_generate_const_rotations(
experiment, obs_type, truth_dir=truth_dir, storage_dir=storage_dir, logger=logger)
g1srot = g1sub * np.cos(-2. * rotations) - g2sub * np.sin(-2. * rotations)
g2srot = g1sub * np.sin(-2. * rotations) + g2sub * np.cos(-2. * rotations)
# Load the truth
_, g1truth, g2truth = get_generate_const_truth(
experiment, obs_type, truth_dir=truth_dir, storage_dir=storage_dir, logger=logger)
# Rotate the truth in the same sense, then use the g3metrics.fitline routine to
# perform simple linear regression
g1trot = g1truth * np.cos(-2. * rotations) - g2truth * np.sin(-2. * rotations)
g2trot = g1truth * np.sin(-2. * rotations) + g2truth * np.cos(-2. * rotations)
# Decide which subfields to use / ignore
use = np.ones_like(g1srot, dtype=bool)
if ignore_fields is not None:
for field_index in ignore_fields:
use[field_index] = False
Q_c, c1, m1, c2, m2, sigc1, sigm1, sigc2, sigm2 = g3metrics.metricQZ1_const_shear(
g1srot[use], g2srot[use], g1trot[use], g2trot[use],
cfid=cfid, mfid=mfid, sigma2_min=sigma2_min)
Q_c *= normalization
if plot:
import matplotlib.pyplot as plt
plt.subplot(211)
plt.plot(g1trot, g1srot - g1trot, 'k+')
plt.plot(
[min(g1trot), max(g1trot)], [m1 * min(g1trot) + c1, m1 * max(g1trot) + c1],
'b-', label="c+ = %+.5f +/- %.5f \nm+ = %+.5f +/- %.5f" % (c1, sigc1, m1, sigm1))
plt.xlim()
plt.xlabel("gtrue_+")
plt.ylabel("(gsub - gtrue)_+")
plt.ylim(-0.015, 0.015)
plt.title(os.path.split(submission_file)[-1])
plt.axhline(ls='--', color='k')
plt.legend()
plt.subplot(212)
plt.plot(g2trot, g2srot - g2trot, 'k+')
plt.plot(
[min(g2trot), max(g2trot)], [m2 * min(g2trot) + c2, m2 * max(g2trot) + c2],
'r-', label="cx = %+.5f +/- %.5f \nmx = %+.5f +/- %.5f" % (c2, sigc2, m2, sigm2))
plt.xlabel("gtrue_x")
plt.ylabel("(gsub - gtrue)_x")
plt.ylim(-0.015, 0.015)
plt.axhline(ls='--', color='k')
plt.legend()
if type(plot) == str:
print "Saving plot to "+plot
plt.savefig(plot)
plt.show()
except Exception as err:
# Something went wrong... We'll handle this silently setting all outputs to zero but warn
# the user via any supplied logger; else raise
Q_c, c1, m1, c2, m2, sigc1, sigm1, sigc2, sigm2 = 0, 0, 0, 0, 0, 0, 0, 0, 0
print err
if logger is not None:
logger.warn(err.message)
else:
raise err # ...Raise exception if there is no logger
# Then return
if just_q:
ret = Q_c
else:
if pretty_print:
print
print "Evaluated results for submission "+str(submission_file)
print "Using sigma2_min = "+str(sigma2_min)
print
print "Q_c = %.4f" % Q_c
print "c+ = %+.5f +/- %.5f" % (c1, sigc1)
print "cx = %+.5f +/- %.5f" % (c2, sigc2)
print "m+ = %+.5f +/- %.5f" % (m1, sigm1)
print "mx = %+.5f +/- %.5f" % (m2, sigm2)
print
ret = (Q_c, c1, m1, c2, m2, sigc1, sigm1, sigc2, sigm2)
return ret
def q_variable(submission_file, experiment, obs_type, normalization=None, truth_dir=TRUTH_DIR,
storage_dir=STORAGE_DIR, logger=None, corr2_exec="corr2", poisson_weight=False,
usebins=USEBINS, fractional_diff=False, squared_diff=False, sigma2_min=None):
"""Calculate the Q_v for a variable shear branch submission.
@param submission_file File containing the user submission.
@param experiment Experiment for this branch, one of 'control', 'real_galaxy',
'variable_psf', 'multiepoch', 'full'
@param obs_type Observation type for this branch, one of 'ground' or 'space'
@param normalization Normalization factor for the metric, default will be set differently for
obs_type='space' and obs_type='ground'
@param storage_dir Directory from/into which to load/store rotation files
@param truth_dir Root directory in which the truth information for the challenge is
stored
@param logger Python logging.Logger instance, for message logging
@param corr2_exec Path to Mike Jarvis' corr2 exectuable
@param poisson_weight If `True`, use the relative Poisson errors in each bin of map_E
to form an inverse variance weight for the difference metric
[default = `False`]
@param usebins An array the same shape as EXPECTED_THETA specifying which bins to
use in the calculation of Q_v [default = `USEBINS`]. If set to `None`,
uses all bins
@param fractional_diff Use a |fractional|, rather than absolute difference in metric
@param squared_diff Use the squared, rather than the absolute difference in metric
@param sigma2_min Damping term to put into the denominator of metric (default `None`
uses either `SIGMA2_MIN_VARIABLE_GROUND` or `SIGMA2_MIN_VARIABLE_SPACE`
depending on `obs_type`)
@return The metric Q_v
"""
if not os.path.isfile(submission_file):
raise ValueError("Supplied submission_file '"+submission_file+"' does not exist.")
# Set the default normalization based on whether ground or space data
if normalization is None:
if obs_type == "ground":
normalization = NORMALIZATION_VARIABLE_GROUND
elif obs_type == "space":
normalization = NORMALIZATION_VARIABLE_SPACE
else:
raise ValueError("Default normalization cannot be set as obs_type not recognised")
# If the sigma2_min is not changed from `None`, set using defaults based on `obs_type`
if sigma2_min is None:
if obs_type == "ground":
sigma2_min = SIGMA2_MIN_VARIABLE_GROUND
elif obs_type == "space":
sigma2_min = SIGMA2_MIN_VARIABLE_SPACE
else:
raise ValueError("Default sigma2_min cannot be set as obs_type not recognised")
# Load the submission and label the slices we're interested in
if logger is not None:
logger.info("Calculating Q_v metric for "+submission_file)
data = np.loadtxt(submission_file)
# We are stating that we want at least 4 and up to 5 columns, so check for this
if data.shape not in ((NBINS_THETA * NFIELDS, 4), (NBINS_THETA * NFIELDS, 5)):
raise ValueError("Submission "+str(submission_file)+" is not the correct shape!")
# Extract the salient parts of the submission from data
field_sub = data[:, 0].astype(int)
theta_sub = data[:, 1]
map_E_sub = data[:, 2]
# Load/generate the truth shear signal
field_shear, theta_shear, map_E_shear, _, maperr_shear = get_generate_variable_truth(
experiment, obs_type, truth_dir=truth_dir, storage_dir=storage_dir, logger=logger,
corr2_exec=corr2_exec, mape_file_prefix=MAPESHEAR_FILE_PREFIX, suffixes=("",),
make_plots=False)
# Then generate the intrinsic only map_E, useful for examinging plots, including the maperr
# (a good estimate of the relative Poisson errors per bin) which we will use to provide a weight
field_int, theta_int, map_E_int, _, maperr_int = get_generate_variable_truth(
experiment, obs_type, truth_dir=truth_dir, storage_dir=storage_dir, logger=logger,
corr2_exec=corr2_exec, mape_file_prefix=MAPEINT_FILE_PREFIX, suffixes=("_intrinsic",),
make_plots=False)
# Then generate the theory observed = int + shear combined map signals - these are our reference
# Note this uses the new functionality of get_generate_variable_truth for adding shears
field_ref, theta_ref, map_E_ref, _, maperr_ref = get_generate_variable_truth(
experiment, obs_type, truth_dir=truth_dir, storage_dir=storage_dir, logger=logger,
corr2_exec=corr2_exec, mape_file_prefix=MAPEOBS_FILE_PREFIX,
file_prefixes=("galaxy_catalog", "galaxy_catalog"), suffixes=("_intrinsic", ""),
make_plots=False)
# Set up the weight
if poisson_weight:
weight = max(maperr_int**2) / maperr_int**2 # Inverse variance weight
else:
weight = np.ones_like(map_E_ref)
# Set up the usebins to use if `usebins == None` (use all bins)
if usebins is None:
usebins = np.repeat(True, NBINS_THETA * NFIELDS)
# Get the total number of active bins per field
nactive = sum(usebins) / NFIELDS
try: # Put this in a try except block to handle funky submissions better
np.testing.assert_array_almost_equal( # Sanity check out truth / expected theta bins
theta_shear, EXPECTED_THETA, decimal=3,
err_msg="BIG SNAFU! Truth theta does not match the EXPECTED_THETA, failing...")
np.testing.assert_array_equal(
field_sub, field_ref, err_msg="User field array does not match truth.")
np.testing.assert_array_almost_equal(
theta_sub, theta_ref, decimal=3, err_msg="User theta array does not match truth.")
# The definition of Q_v is so simple there is no need to use the g3metrics version
# NOTE WE ARE TRYING A NEW DEFINITION OF Q_v THAT IS NOT SO SIMPLE
Q_v_fields = np.zeros(nactive) # To store diffs averaged over fields, per bin
if not fractional_diff:
for i in range(nactive): # Sum over all fields for each bin, nactive being the stride
Q_v_fields[i] = np.sum(((weight * (map_E_sub - map_E_ref))[usebins])[i::nactive])
else:
for i in range(nactive): # Sum over all fields for each bin, nactive being the stride
Q_v_fields[i] = np.sum(
((weight * (map_E_sub - map_E_ref) / map_E_ref)[usebins])[i::nactive])
# Then take the weighted average abs(Q_v_fields)
if not squared_diff:
Q_v = normalization / (
sigma2_min + (np.sum(np.abs(Q_v_fields)) / np.sum(weight[usebins])))
else:
Q_v = normalization / (
sigma2_min + (np.sum(Q_v_fields**2) / np.sum(weight[usebins])))
except Exception as err:
Q_v = 0. # If the theta or field do not match, let's be strict and force Q_v...
if logger is not None:
logger.warn(err.message) # ...But let's warn if there's a logger!
else: # ...And raise the exception if not
raise err
# Then return Q_v
return Q_v
def map_diff_func(cm_array, mapEsub, maperrsub, mapEref, mapEunitc):
"""Difference between an m,c model of a biased aperture mass statistic submission and the
submission itself, as a vector corresponding to the theta vector.
The model of the biased submission is simply:
mapEmodel = mapEunitc * c^2 + mapEref * (1 + 2 * m + m^2)
where c, m = cm_array[0], cm_array[1]. This code returns
(mapEmodel - mapEsub) / maperrsub
for the use of scipy.optimize.leastsq() within q_variable_by_mc().
"""
ret = (
mapEunitc * cm_array[0]**2 + mapEref * (1. + 2. * cm_array[1] + cm_array[1]**2)
- mapEsub) / maperrsub
return ret
def q_variable_by_mc(submission_file, experiment, obs_type, map_E_unitc, normalization=None,
truth_dir=TRUTH_DIR, storage_dir=STORAGE_DIR, logger=None, usebins=None,
corr2_exec="corr2", sigma2_min=None, cfid=CFID, mfid=MFID, just_q=False,
pretty_print=False):
"""Calculate the Q_v for a variable shear branch submission, using a best-fitting m and c model
of submission biases to evaluate the score. Experimental metric, not used in the GREAT3
challenge due to the difficulty of reliably modelling m & c in simulation tests.
@param submission_file File containing the user submission.
@param experiment Experiment for this branch, one of 'control', 'real_galaxy',
'variable_psf', 'multiepoch', 'full'
@param obs_type Observation type for this branch, one of 'ground' or 'space'
@param normalization Normalization factor for the metric, default will be set differently for
obs_type='space' and obs_type='ground'
@param storage_dir Directory from/into which to load/store rotation files
@param truth_dir Root directory in which the truth information for the challenge is
stored
@param logger Python logging.Logger instance, for message logging
@param usebins An array the same shape as EXPECTED_THETA specifying which bins to
use in the calculation of Q_v [default = `USEBINS`]. If set to `None`,
uses all bins
@param corr2_exec Path to Mike Jarvis' corr2 exectuable
@param sigma2_min Damping term to put into the denominator of metric (default `None`
uses either `SIGMA2_MIN_VARIABLE_GROUND` or `SIGMA2_MIN_VARIABLE_SPACE`
depending on `obs_type`)
@param cfid Fiducial, target c value
@param mfid Fiducial, target m value
@param just_q Set `just_q = True` (default is `False`) to only return Q_v rather than
the default behaviour of returning a tuple including best fitting |c|,
m, uncertainties etc.
@return The metric Q_v
"""
if not os.path.isfile(submission_file):
raise ValueError("Supplied submission_file '"+submission_file+"' does not exist.")
# Set the default normalization based on whether ground or space data
if normalization is None:
if obs_type == "ground":
normalization = NORMALIZATION_CONSTANT_GROUND
elif obs_type == "space":
normalization = NORMALIZATION_CONSTANT_SPACE
else:
raise ValueError("Default normalization cannot be set as obs_type not recognised")
# If the sigma2_min is not changed from `None`, set using defaults based on `obs_type`
if sigma2_min is None:
if obs_type == "ground":
sigma2_min = SIGMA2_MIN_VARIABLE_GROUND
elif obs_type == "space":
sigma2_min = SIGMA2_MIN_VARIABLE_SPACE
else:
raise ValueError("Default sigma2_min cannot be set as obs_type not recognised")
# Load the submission and label the slices we're interested in
if logger is not None:
logger.info("Calculating Q_v metric (by m & c) for "+submission_file)
data = np.loadtxt(submission_file)
# We are stating that we want at least 4 and up to 5 columns, so check for this
if data.shape not in ((NBINS_THETA * NFIELDS, 4), (NBINS_THETA * NFIELDS, 5)):
raise ValueError("Submission "+str(submission_file)+" is not the correct shape!")
# Extract the salient parts of the submission from data
field_sub = data[:, 0].astype(int)
theta_sub = data[:, 1]
map_E_sub = data[:, 2]
# Load/generate the truth shear signal
field_shear, theta_shear, map_E_shear, _, maperr_shear = get_generate_variable_truth(
experiment, obs_type, truth_dir=truth_dir, storage_dir=storage_dir, logger=logger,
corr2_exec=corr2_exec, mape_file_prefix=MAPESHEAR_FILE_PREFIX, suffixes=("",),
make_plots=False)
# Then generate the intrinsic only map_E, useful for examinging plots, including the maperr
# (a good estimate of the relative Poisson errors per bin) which we will use to provide a weight
field_int, theta_int, map_E_int, _, maperr_int = get_generate_variable_truth(
experiment, obs_type, truth_dir=truth_dir, storage_dir=storage_dir, logger=logger,
corr2_exec=corr2_exec, mape_file_prefix=MAPEINT_FILE_PREFIX, suffixes=("_intrinsic",),
make_plots=False)
# Then generate the theory observed = int + shear combined map signals - these are our reference
# Note this uses the new functionality of get_generate_variable_truth for adding shears
field_ref, theta_ref, map_E_ref, _, maperr_ref = get_generate_variable_truth(
experiment, obs_type, truth_dir=truth_dir, storage_dir=storage_dir, logger=logger,
corr2_exec=corr2_exec, mape_file_prefix=MAPEOBS_FILE_PREFIX,
file_prefixes=("galaxy_catalog", "galaxy_catalog"), suffixes=("_intrinsic", ""),
make_plots=False)
# Set up the usebins to use if `usebins == None` (use all bins)
if usebins is None:
usebins = np.repeat(True, NBINS_THETA * NFIELDS)
# Get the total number of active bins per field
nactive = sum(usebins) / NFIELDS
if True: # Put this in a try except block to handle funky submissions better
np.testing.assert_array_almost_equal( # Sanity check out truth / expected theta bins
theta_shear, EXPECTED_THETA, decimal=3,
err_msg="BIG SNAFU! Truth theta does not match the EXPECTED_THETA, failing...")
np.testing.assert_array_equal(
field_sub, field_ref, err_msg="User field array does not match truth.")
np.testing.assert_array_almost_equal(
theta_sub, theta_ref, decimal=3, err_msg="User theta array does not match truth.")
# Use optimize.leastsq to find the best fitting linear bias model params, and covariances
import scipy.optimize
optimize_results = scipy.optimize.leastsq(
map_diff_func, np.array([0., 0.]),
args=(
map_E_sub[usebins],
maperr_ref[usebins],
map_E_ref[usebins], # Note use of ref errors: this will appropriately
# weight different bins and is not itself noisy
map_E_unitc[usebins]), full_output=True)
csub = optimize_results[0][0]
msub = optimize_results[0][1]
map_E_model = map_E_unitc * csub**2 + map_E_ref * (1. + 2. * msub + msub**2)
residual_variance = np.var(
((map_E_sub - map_E_model) / maperr_ref)[usebins], ddof=1)
if optimize_results[1] is not None:
covcm = optimize_results[1] * residual_variance
sigcsub = np.sqrt(covcm[0, 0])
sigmsub = np.sqrt(covcm[1, 1])
covcm = covcm[0, 1]
else:
sigcsub = 0.
sigmsub = 0.
covcm = 0.
# Then we define the Q_v
Q_v = 2449. * normalization / np.sqrt(
(csub / cfid)**2 + (msub / mfid)**2 + sigma2_min)
try:
pass
except Exception as err:
Q_v = 0. # If the theta or field do not match, let's be strict and force Q_v...
if logger is not None:
logger.warn(err.message) # ...But let's warn if there's a logger!
else: # ...And raise the exception if not
raise err
# Then return
# Then return
if just_q:
ret = Q_v
else:
if pretty_print:
print "Evaluated results for submission "+str(submission_file)
print "Using sigma2_min = "+str(sigma2_min)
print "Q_v = %.4f" % Q_v
print "|c| = %+.5f +/- %.5f" % (csub, sigcsub)
print " m = %+.5f +/- %.5f" % (msub, sigmsub)
print "Cov(0, 0) = %+.4e" % sigcsub**2
print "Cov(0, 1) = %+.4e" % covcm
print "Cov(1, 1) = %+.4e" % sigmsub**2
ret = (Q_v, csub, msub, sigcsub, sigmsub, covcm)
return ret
| bsd-3-clause |
tdhopper/scikit-learn | sklearn/linear_model/ransac.py | 191 | 14261 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
dhermes/bezier | src/python/bezier/_plot_helpers.py | 1 | 3607 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plotting utilities."""
import numpy as np
from bezier import _helpers
def new_axis():
"""Get a new matplotlib axis.
Returns:
matplotlib.artist.Artist: A newly created axis.
"""
# NOTE: We import the plotting library at runtime to
# avoid the cost for users that only want to compute.
# The ``matplotlib`` import is a tad expensive.
import matplotlib.pyplot as plt # pylint: disable=import-outside-toplevel
figure = plt.figure()
return figure.gca()
def add_plot_boundary(ax, padding=0.125):
"""Add a buffer of empty space around a plot boundary.
.. note::
This only uses ``line`` data from the axis. It **could**
use ``patch`` data, but doesn't at this time.
Args:
ax (matplotlib.artist.Artist): A matplotlib axis.
padding (Optional[float]): Amount (as a fraction of width and height)
of padding to add around data. Defaults to ``0.125``.
"""
nodes = np.asfortranarray(
np.vstack([line.get_xydata() for line in ax.lines]).T
)
left, right, bottom, top = _helpers.bbox(nodes)
center_x = 0.5 * (right + left)
delta_x = right - left
center_y = 0.5 * (top + bottom)
delta_y = top - bottom
multiplier = (1.0 + padding) * 0.5
ax.set_xlim(
center_x - multiplier * delta_x, center_x + multiplier * delta_x
)
ax.set_ylim(
center_y - multiplier * delta_y, center_y + multiplier * delta_y
)
def add_patch(ax, color, pts_per_edge, *edges):
"""Add a polygonal surface patch to a plot.
Args:
ax (matplotlib.artist.Artist): A matplotlib axis.
color (Tuple[float, float, float]): Color as RGB profile.
pts_per_edge (int): Number of points to use in polygonal
approximation of edge.
edges (Tuple[~bezier.curve.Curve, ...]): Curved edges defining
a boundary.
"""
# pylint: disable=import-outside-toplevel
from matplotlib import patches
from matplotlib import path as _path_mod
# pylint: enable=import-outside-toplevel
s_vals = np.linspace(0.0, 1.0, pts_per_edge)
# Evaluate points on each edge.
all_points = []
for edge in edges:
points = edge.evaluate_multi(s_vals)
# We assume the edges overlap and leave out the first point
# in each.
all_points.append(points[:, 1:])
# Add first point as last point (polygon is closed).
first_edge = all_points[0]
all_points.append(first_edge[:, [0]])
# Add boundary first.
polygon = np.asfortranarray(np.hstack(all_points))
(line,) = ax.plot(polygon[0, :], polygon[1, :], color=color)
# Reset ``color`` in case it was ``None`` and set from color wheel.
color = line.get_color()
# ``polygon`` is stored Fortran-contiguous with ``x-y`` points in each
# column but ``Path()`` wants ``x-y`` points in each row.
path = _path_mod.Path(polygon.T)
patch = patches.PathPatch(
path, facecolor=color, edgecolor=color, alpha=0.625
)
ax.add_patch(patch)
| apache-2.0 |
tridesclous/tridesclous | tridesclous/gui/silhouette.py | 1 | 4432 | """
This view is from taken from sklearn examples.
See http://scikit-learn.org: plot-kmeans-silhouette-analysis-py
"""
from .myqt import QT
import pyqtgraph as pg
import numpy as np
import matplotlib.cm
import matplotlib.colors
from .base import WidgetBase
from .tools import ParamDialog
class MyViewBox(pg.ViewBox):
doubleclicked = QT.pyqtSignal()
def mouseDoubleClickEvent(self, ev):
self.doubleclicked.emit()
ev.accept()
def raiseContextMenu(self, ev):
#for some reasons enableMenu=False is not taken (bug ????)
pass
class Silhouette(WidgetBase):
"""
**Silhouette** display the silhouette score.
Implemented with sklearn. Must compute metrics first.
See:
* `Silhouette wikipedia <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
* `Silhouette sklearn <http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_silhouette_analysis.html#sphx-glr-auto-examples-cluster-plot-kmeans-silhouette-analysis-py>`_
"""
_params = [
]
def __init__(self, controller=None, parent=None):
WidgetBase.__init__(self, parent=parent, controller=controller)
self.layout = QT.QVBoxLayout()
self.setLayout(self.layout)
h = QT.QHBoxLayout()
self.layout.addLayout(h)
h.addWidget(QT.QLabel('<b>Silhouette</b>') )
but = QT.QPushButton('settings')
but.clicked.connect(self.open_settings)
h.addWidget(but)
self.graphicsview = pg.GraphicsView()
self.layout.addWidget(self.graphicsview)
self.alpha = 60
self.initialize_plot()
self.refresh()
def on_params_changed(self):
self.compute_slihouette()
self.refresh()
def initialize_plot(self):
self.viewBox = MyViewBox()
self.viewBox.doubleclicked.connect(self.open_settings)
self.viewBox.disableAutoRange()
self.plot = pg.PlotItem(viewBox=self.viewBox)
self.graphicsview.setCentralItem(self.plot)
self.plot.hideButtons()
def refresh(self):
self.plot.clear()
silhouette_values = self.controller.spike_silhouette
if silhouette_values is None:
return
if silhouette_values.shape != self.controller.spike_label.shape:
return
silhouette_avg = np.mean(silhouette_values)
silhouette_by_labels = {}
labels = self.controller.spike_label
labels_list = np.unique(labels)
for k in labels_list:
v = silhouette_values[k==labels]
v.sort()
silhouette_by_labels[k] = v
self.vline = pg.InfiniteLine(pos=silhouette_avg, angle = 90, movable = False, pen = '#FF0000')
self.plot.addItem(self.vline)
y_lower = 10
cluster_visible = self.controller.cluster_visible
visibles = [c for c, v in self.controller.cluster_visible.items() if v and c>=0]
for k in visibles:
if k not in silhouette_by_labels:
continue
v = silhouette_by_labels[k]
color = self.controller.qcolors[k]
color2 = QT.QColor(color)
color2.setAlpha(self.alpha)
y_upper = y_lower + v.size
y_vect = np.arange(y_lower, y_upper)
curve1 = pg.PlotCurveItem(np.zeros(v.size), y_vect, pen=color)
curve2 = pg.PlotCurveItem(v, y_vect, pen=color)
self.plot.addItem(curve1)
self.plot.addItem(curve2)
fill = pg.FillBetweenItem(curve1=curve1, curve2=curve2, brush=color2)
self.plot.addItem(fill)
txt = pg.TextItem( text='{}'.format(k), color='#FFFFFF', anchor=(0, 0.5), border=None)#, fill=pg.mkColor((128,128,128, 180)))
self.plot.addItem(txt)
txt.setPos(0, (y_upper+y_lower)/2.)
y_lower = y_upper + 10
self.plot.setXRange(-.5, 1.)
self.plot.setYRange(0,y_lower)
def on_spike_selection_changed(self):
pass
def on_spike_label_changed(self):
#~ self.compute_slihouette()
self.refresh()
def on_colors_changed(self):
self.refresh()
def on_cluster_visibility_changed(self):
self.refresh()
| mit |
nwjs/chromium.src | tools/perf/cli_tools/soundwave/tables/timeseries_test.py | 10 | 9331 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import unittest
from cli_tools.soundwave import pandas_sqlite
from cli_tools.soundwave import tables
from core.external_modules import pandas
def SamplePoint(point_id, value, timestamp=None, missing_commit_pos=False):
"""Build a sample point as returned by timeseries2 API."""
revisions = {
'r_commit_pos': str(point_id),
'r_chromium': 'chromium@%d' % point_id,
}
annotations = {
'a_tracing_uri': 'http://example.com/trace/%d' % point_id
}
if timestamp is None:
timestamp = datetime.datetime.utcfromtimestamp(
1234567890 + 60 * point_id).isoformat()
if missing_commit_pos:
# Some data points have a missing commit position.
revisions['r_commit_pos'] = None
return [
point_id,
revisions,
value,
timestamp,
annotations,
]
class TestKey(unittest.TestCase):
def testKeyFromDict_typical(self):
key1 = tables.timeseries.Key.FromDict({
'test_suite': 'loading.mobile',
'bot': 'ChromiumPerf:android-nexus5',
'measurement': 'timeToFirstInteractive',
'test_case': 'Wikipedia'})
key2 = tables.timeseries.Key(
test_suite='loading.mobile',
measurement='timeToFirstInteractive',
bot='ChromiumPerf:android-nexus5',
test_case='Wikipedia')
self.assertEqual(key1, key2)
def testKeyFromDict_defaultTestCase(self):
key1 = tables.timeseries.Key.FromDict({
'test_suite': 'loading.mobile',
'bot': 'ChromiumPerf:android-nexus5',
'measurement': 'timeToFirstInteractive'})
key2 = tables.timeseries.Key(
test_suite='loading.mobile',
measurement='timeToFirstInteractive',
bot='ChromiumPerf:android-nexus5',
test_case='')
self.assertEqual(key1, key2)
def testKeyFromDict_invalidArgsRaises(self):
with self.assertRaises(TypeError):
tables.timeseries.Key.FromDict({
'test_suite': 'loading.mobile',
'bot': 'ChromiumPerf:android-nexus5'})
@unittest.skipIf(pandas is None, 'pandas not available')
class TestTimeSeries(unittest.TestCase):
def testDataFrameFromJsonV1(self):
test_path = ('ChromiumPerf/android-nexus5/loading.mobile'
'/timeToFirstInteractive/PageSet/Google')
data = {
'test_path': test_path,
'improvement_direction': 1,
'timeseries': [
['revision', 'value', 'timestamp', 'r_commit_pos', 'r_chromium'],
[547397, 2300.3, '2018-04-01T14:16:32.000', '547397', 'adb123'],
[547398, 2750.9, '2018-04-01T18:24:04.000', '547398', 'cde456'],
[547423, 2342.2, '2018-04-02T02:19:00.000', '547423', 'fab789'],
# Some timeseries have a missing commit position.
[547836, 2402.5, '2018-04-02T02:20:00.000', None, 'acf147'],
]
}
timeseries = tables.timeseries.DataFrameFromJson(test_path, data)
# Check the integrity of the index: there should be no duplicates.
self.assertFalse(timeseries.index.duplicated().any())
self.assertEqual(len(timeseries), 4)
# Check values on the first point of the series.
point = timeseries.reset_index().iloc[0]
self.assertEqual(point['test_suite'], 'loading.mobile')
self.assertEqual(point['measurement'], 'timeToFirstInteractive')
self.assertEqual(point['bot'], 'ChromiumPerf/android-nexus5')
self.assertEqual(point['test_case'], 'PageSet/Google')
self.assertEqual(point['improvement_direction'], 'down')
self.assertEqual(point['point_id'], 547397)
self.assertEqual(point['value'], 2300.3)
self.assertEqual(point['timestamp'], datetime.datetime(
year=2018, month=4, day=1, hour=14, minute=16, second=32))
self.assertEqual(point['commit_pos'], 547397)
self.assertEqual(point['chromium_rev'], 'adb123')
self.assertEqual(point['clank_rev'], None)
def testDataFrameFromJsonV2(self):
test_path = tables.timeseries.Key(
test_suite='loading.mobile',
measurement='timeToFirstInteractive',
bot='ChromiumPerf:android-nexus5',
test_case='Wikipedia')
data = {
'improvement_direction': 'down',
'units': 'ms',
'data': [
SamplePoint(547397, 2300.3, timestamp='2018-04-01T14:16:32.000'),
SamplePoint(547398, 2750.9),
SamplePoint(547423, 2342.2),
SamplePoint(547836, 2402.5, missing_commit_pos=True),
]
}
timeseries = tables.timeseries.DataFrameFromJson(test_path, data)
# Check the integrity of the index: there should be no duplicates.
self.assertFalse(timeseries.index.duplicated().any())
self.assertEqual(len(timeseries), 4)
# Check values on the first point of the series.
point = timeseries.reset_index().iloc[0]
self.assertEqual(point['test_suite'], 'loading.mobile')
self.assertEqual(point['measurement'], 'timeToFirstInteractive')
self.assertEqual(point['bot'], 'ChromiumPerf:android-nexus5')
self.assertEqual(point['test_case'], 'Wikipedia')
self.assertEqual(point['improvement_direction'], 'down')
self.assertEqual(point['units'], 'ms')
self.assertEqual(point['point_id'], 547397)
self.assertEqual(point['value'], 2300.3)
self.assertEqual(point['timestamp'], datetime.datetime(
year=2018, month=4, day=1, hour=14, minute=16, second=32))
self.assertEqual(point['commit_pos'], 547397)
self.assertEqual(point['chromium_rev'], 'chromium@547397')
self.assertEqual(point['clank_rev'], None)
def testDataFrameFromJson_withSummaryMetric(self):
test_path = tables.timeseries.Key(
test_suite='loading.mobile',
measurement='timeToFirstInteractive',
bot='ChromiumPerf:android-nexus5',
test_case='')
data = {
'improvement_direction': 'down',
'units': 'ms',
'data': [
SamplePoint(547397, 2300.3),
SamplePoint(547398, 2750.9),
],
}
timeseries = tables.timeseries.DataFrameFromJson(
test_path, data).reset_index()
self.assertTrue((timeseries['test_case'] == '').all())
def testGetTimeSeries(self):
test_path = tables.timeseries.Key(
test_suite='loading.mobile',
measurement='timeToFirstInteractive',
bot='ChromiumPerf:android-nexus5',
test_case='Wikipedia')
data = {
'improvement_direction': 'down',
'units': 'ms',
'data': [
SamplePoint(547397, 2300.3),
SamplePoint(547398, 2750.9),
SamplePoint(547423, 2342.2),
]
}
timeseries_in = tables.timeseries.DataFrameFromJson(test_path, data)
with tables.DbSession(':memory:') as con:
pandas_sqlite.InsertOrReplaceRecords(con, 'timeseries', timeseries_in)
timeseries_out = tables.timeseries.GetTimeSeries(con, test_path)
# Both DataFrame's should be equal, except the one we get out of the db
# does not have an index defined.
timeseries_in = timeseries_in.reset_index()
self.assertTrue(timeseries_in.equals(timeseries_out))
def testGetTimeSeries_withSummaryMetric(self):
test_path = tables.timeseries.Key(
test_suite='loading.mobile',
measurement='timeToFirstInteractive',
bot='ChromiumPerf:android-nexus5',
test_case='')
data = {
'improvement_direction': 'down',
'units': 'ms',
'data': [
SamplePoint(547397, 2300.3),
SamplePoint(547398, 2750.9),
SamplePoint(547423, 2342.2),
]
}
timeseries_in = tables.timeseries.DataFrameFromJson(test_path, data)
with tables.DbSession(':memory:') as con:
pandas_sqlite.InsertOrReplaceRecords(con, 'timeseries', timeseries_in)
timeseries_out = tables.timeseries.GetTimeSeries(con, test_path)
# Both DataFrame's should be equal, except the one we get out of the db
# does not have an index defined.
timeseries_in = timeseries_in.reset_index()
self.assertTrue(timeseries_in.equals(timeseries_out))
def testGetMostRecentPoint_success(self):
test_path = tables.timeseries.Key(
test_suite='loading.mobile',
measurement='timeToFirstInteractive',
bot='ChromiumPerf:android-nexus5',
test_case='Wikipedia')
data = {
'improvement_direction': 'down',
'units': 'ms',
'data': [
SamplePoint(547397, 2300.3),
SamplePoint(547398, 2750.9),
SamplePoint(547423, 2342.2),
]
}
timeseries = tables.timeseries.DataFrameFromJson(test_path, data)
with tables.DbSession(':memory:') as con:
pandas_sqlite.InsertOrReplaceRecords(con, 'timeseries', timeseries)
point = tables.timeseries.GetMostRecentPoint(con, test_path)
self.assertEqual(point['point_id'], 547423)
def testGetMostRecentPoint_empty(self):
test_path = tables.timeseries.Key(
test_suite='loading.mobile',
measurement='timeToFirstInteractive',
bot='ChromiumPerf:android-nexus5',
test_case='Wikipedia')
with tables.DbSession(':memory:') as con:
point = tables.timeseries.GetMostRecentPoint(con, test_path)
self.assertIsNone(point)
| bsd-3-clause |
sanjayankur31/nest-simulator | pynest/examples/gif_population.py | 8 | 5045 | # -*- coding: utf-8 -*-
#
# gif_population.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Population of GIF neuron model with oscillatory behavior
--------------------------------------------------------
This script simulates a population of generalized integrate-and-fire (GIF)
model neurons driven by noise from a group of Poisson generators.
Due to spike-frequency adaptation, the GIF neurons tend to show oscillatory
behavior on the time scale comparable with the time constant of adaptation
elements (stc and sfa).
Population dynamics are visualized by raster plot and as average firing rate.
References
~~~~~~~~~~
.. [1] Schwalger T, Degert M, Gerstner W (2017). Towards a theory of cortical columns: From spiking
neurons to interacting neural populations of finite size. PLoS Comput Biol.
https://doi.org/10.1371/journal.pcbi.1005507
.. [2] Mensi S, Naud R, Pozzorini C, Avermann M, Petersen CC and
Gerstner W (2012). Parameter extraction and classification of
three cortical neuron types reveals two distinct adaptation
mechanisms. Journal of Neurophysiology. 107(6), pp.1756-1775.
"""
###############################################################################
# Import all necessary modules for simulation and plotting.
import nest
import nest.raster_plot
import matplotlib.pyplot as plt
nest.ResetKernel()
###############################################################################
# Assigning the simulation parameters to variables.
dt = 0.1
simtime = 2000.0
###############################################################################
# Definition of neural parameters for the GIF model. These parameters are
# extracted by fitting the model to experimental data [2]_.
neuron_params = {"C_m": 83.1,
"g_L": 3.7,
"E_L": -67.0,
"Delta_V": 1.4,
"V_T_star": -39.6,
"t_ref": 4.0,
"V_reset": -36.7,
"lambda_0": 1.0,
"q_stc": [56.7, -6.9],
"tau_stc": [57.8, 218.2],
"q_sfa": [11.7, 1.8],
"tau_sfa": [53.8, 640.0],
"tau_syn_ex": 10.0,
}
###############################################################################
# Definition of the parameters for the population of GIF neurons.
N_ex = 100 # size of the population
p_ex = 0.3 # connection probability inside the population
w_ex = 30.0 # synaptic weights inside the population (pA)
###############################################################################
# Definition of the parameters for the Poisson group and its connection with
# GIF neurons population.
N_noise = 50 # size of Poisson group
rate_noise = 10.0 # firing rate of Poisson neurons (Hz)
w_noise = 20.0 # synaptic weights from Poisson to population neurons (pA)
###############################################################################
# Configuration of the simulation kernel with the previously defined time
# resolution.
nest.SetKernelStatus({"resolution": dt})
###############################################################################
# Building a population of GIF neurons, a group of Poisson neurons and a
# spike recorder device for capturing spike times of the population.
population = nest.Create("gif_psc_exp", N_ex, params=neuron_params)
noise = nest.Create("poisson_generator", N_noise, params={'rate': rate_noise})
spike_det = nest.Create("spike_recorder")
###############################################################################
# Build connections inside the population of GIF neurons population, between
# Poisson group and the population, and also connecting spike recorder to
# the population.
nest.Connect(
population, population, {'rule': 'pairwise_bernoulli', 'p': p_ex},
syn_spec={"weight": w_ex}
)
nest.Connect(noise, population, 'all_to_all', syn_spec={"weight": w_noise})
nest.Connect(population, spike_det)
###############################################################################
# Simulation of the network.
nest.Simulate(simtime)
###############################################################################
# Plotting the results of simulation including raster plot and histogram of
# population activity.
nest.raster_plot.from_device(spike_det, hist=True)
plt.title('Population dynamics')
plt.show()
| gpl-2.0 |
beepee14/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
hjoliver/cylc | cylc/flow/main_loop/log_memory.py | 1 | 4691 | # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Log the memory usage of a running scheduler over time.
.. note::
This plugin is for Cylc developers debugging cylc memory usage.
For general interest memory measurement try
``/usr/bin/time -v cylc play`` or ``cylc play --profile``.
.. note::
Pympler associates memory with the first object which references it.
In Cylc we have some objects (e.g. the configuration) which are references
from multiple places.
This can result in a certain amount of "jitter" in the results where
pympler has swapper from associating memory with one object to another.
Watch out for matching increase/decrease in reported memory in
different objects.
.. warning::
This plugin can slow down a workflow significantly due to the
complexity of memory calculations.
Set a sensible interval before running workflows.
If ``matplotlib`` is installed this plugin will plot results as a PDF in
the run directory when the workflow is shut down (cleanly).
"""
import json
from pathlib import Path
from time import time
from cylc.flow.main_loop import (startup, shutdown, periodic)
try:
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
PLT = True
except ModuleNotFoundError:
PLT = False
from pympler.asizeof import asized
# TODO: make this configurable in the global config
MIN_SIZE = 10000
@startup
async def init(scheduler, state):
"""Take an initial memory snapshot."""
state['data'] = []
await take_snapshot(scheduler, state)
@periodic
async def take_snapshot(scheduler, state):
"""Take a memory snapshot"""
state['data'].append((
time(),
_compute_sizes(scheduler, min_size=MIN_SIZE)
))
@shutdown
async def report(scheduler, state):
"""Take a final memory snapshot and dump the results."""
await take_snapshot(scheduler, state)
_dump(state['data'], scheduler.workflow_run_dir)
fields, times = _transpose(state['data'])
_plot(
fields,
times,
scheduler.workflow_run_dir,
f'cylc.flow.scheduler.Scheduler attrs > {MIN_SIZE / 1000}kb'
)
def _compute_sizes(obj, min_size=10000):
"""Return the sizes of the attributes of an object."""
size = asized(obj, detail=2)
for ref in size.refs:
if ref.name == '__dict__':
break
else:
raise Exception('Cannot find __dict__ reference')
return {
item.name.split(':')[0][4:]: item.size
for item in ref.refs
if item.size > min_size
}
def _transpose(data):
"""Pivot data from snapshot to series oriented."""
all_keys = set()
for _, datum in data:
all_keys.update(datum.keys())
# sort keys by the size of the last checkpoint so that the fields
# get plotted from largest to smallest
all_keys = list(all_keys)
all_keys.sort(key=lambda x: data[-1][1].get(x, 0), reverse=True)
# extract data for each field, if not present
fields = {}
for key in all_keys:
fields[key] = [
datum.get(key, -1)
for _, datum in data
]
start_time = data[0][0]
times = [
timestamp - start_time
for timestamp, _ in data
]
return fields, times
def _dump(data, path):
json.dump(
data,
Path(path, f'{__name__}.json').open('w+')
)
return True
def _plot(fields, times, path, title='Objects'):
if (
not PLT
or len(times) < 2
):
return False
fig, ax1 = plt.subplots(figsize=(10, 7.5))
fig.suptitle(title)
ax1.set_xlabel('Time (s)')
ax1.set_ylabel('Memory (kb)')
for key, sizes in fields.items():
ax1.plot(times, [x / 1000 for x in sizes], label=key)
ax1.legend(loc=0)
# start both axis at 0
ax1.set_xlim(0, ax1.get_xlim()[1])
ax1.set_ylim(0, ax1.get_ylim()[1])
plt.savefig(
Path(path, f'{__name__}.pdf')
)
return True
| gpl-3.0 |
DonBeo/statsmodels | statsmodels/tsa/statespace/tests/test_representation.py | 6 | 19651 | """
Tests for representation module
Author: Chad Fulton
License: Simplified-BSD
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
from statsmodels.tsa.statespace.representation import Representation
from statsmodels.tsa.statespace.model import Model
from .results import results_kalman_filter
from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose
from nose.exc import SkipTest
current_path = os.path.dirname(os.path.abspath(__file__))
class Clark1987(object):
"""
Clark's (1987) univariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, **kwargs):
self.true = results_kalman_filter.uc_uni
self.true_states = pd.DataFrame(self.true['states'])
# GDP, Quarterly, 1947.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP']
)
data['lgdp'] = np.log(data['GDP'])
# Construct the statespace representation
k_states = 4
self.model = Model(data['lgdp'], k_states=k_states, **kwargs)
self.model.design[:, :, 0] = [1, 1, 0, 0]
self.model.transition[([0, 0, 1, 1, 2, 3],
[0, 3, 1, 2, 1, 3],
[0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
self.model.selection = np.eye(self.model.k_states)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
self.true['parameters']
)
self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.model.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, sigma_w**2
]
# Initialization
initial_state = np.zeros((k_states,))
initial_state_cov = np.eye(k_states)*100
# Initialization: modification
initial_state_cov = np.dot(
np.dot(self.model.transition[:, :, 0], initial_state_cov),
self.model.transition[:, :, 0].T
)
self.model.initialize_known(initial_state, initial_state_cov)
def run_filter(self):
# Filter the data
self.results = self.model.filter()
def test_loglike(self):
assert_almost_equal(
self.results.llf_obs[self.true['start']:].sum(),
self.true['loglike'], 5
)
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[3][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
class TestClark1987Single(Clark1987):
"""
Basic single precision test for the loglikelihood and filtered states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987Single, self).__init__(
dtype=np.float32, conserve_memory=0
)
self.run_filter()
class TestClark1987Double(Clark1987):
"""
Basic double precision test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Double, self).__init__(
dtype=float, conserve_memory=0
)
self.run_filter()
class TestClark1987SingleComplex(Clark1987):
"""
Basic single precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987SingleComplex, self).__init__(
dtype=np.complex64, conserve_memory=0
)
self.run_filter()
class TestClark1987DoubleComplex(Clark1987):
"""
Basic double precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987DoubleComplex, self).__init__(
dtype=complex, conserve_memory=0
)
self.run_filter()
class TestClark1987Conserve(Clark1987):
"""
Memory conservation test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class Clark1987Forecast(Clark1987):
"""
Forecasting test for the loglikelihood and filtered states.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1987Forecast, self).__init__(
dtype=dtype, conserve_memory=conserve_memory
)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self.model.endog = np.array(
np.r_[self.model.endog[0, :], [np.nan]*nforecast],
ndmin=2, dtype=dtype, order="F"
)
self.model.nobs = self.model.endog.shape[1]
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[3][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
class TestClark1987ForecastDouble(Clark1987Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987ForecastDouble, self).__init__()
self.run_filter()
class TestClark1987ForecastDoubleComplex(Clark1987Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.run_filter()
class TestClark1987ForecastConserve(Clark1987Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class TestClark1987ConserveAll(Clark1987):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08
)
self.model.loglikelihood_burn = self.true['start']
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.results.llf_obs[0], self.true['loglike'], 5
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.results.filtered_state[0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][-1],
self.true_states.iloc[end-1, 1], 4
)
class Clark1989(object):
"""
Clark's (1989) bivariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Tests two-dimensional observation data.
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, **kwargs):
self.true = results_kalman_filter.uc_bi
self.true_states = pd.DataFrame(self.true['states'])
# GDP and Unemployment, Quarterly, 1948.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP', 'UNEMP']
)[4:]
data['GDP'] = np.log(data['GDP'])
data['UNEMP'] = (data['UNEMP']/100)
k_states = 6
self.model = Model(data, k_states=k_states, **kwargs)
# Statespace representation
self.model.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]
self.model.transition[
([0, 0, 1, 1, 2, 3, 4, 5],
[0, 4, 1, 2, 1, 2, 4, 5],
[0, 0, 0, 0, 0, 0, 0, 0])
] = [1, 1, 0, 0, 1, 1, 1, 1]
self.model.selection = np.eye(self.model.k_states)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec,
phi_1, phi_2, alpha_1, alpha_2, alpha_3) = np.array(
self.true['parameters'],
)
self.model.design[([1, 1, 1], [1, 2, 3], [0, 0, 0])] = [
alpha_1, alpha_2, alpha_3
]
self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.model.obs_cov[1, 1, 0] = sigma_ec**2
self.model.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, 0, sigma_w**2, sigma_vl**2
]
# Initialization
initial_state = np.zeros((k_states,))
initial_state_cov = np.eye(k_states)*100
# Initialization: self.modelification
initial_state_cov = np.dot(
np.dot(self.model.transition[:, :, 0], initial_state_cov),
self.model.transition[:, :, 0].T
)
self.model.initialize_known(initial_state, initial_state_cov)
def run_filter(self):
# Filter the data
self.results = self.model.filter()
def test_loglike(self):
assert_almost_equal(
# self.results.llf_obs[self.true['start']:].sum(),
self.results.llf_obs[0:].sum(),
self.true['loglike'], 2
)
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[4][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.results.filtered_state[5][self.true['start']:],
self.true_states.iloc[:, 3], 4
)
class TestClark1989(Clark1989):
"""
Basic double precision test for the loglikelihood and filtered
states with two-dimensional observation vector.
"""
def __init__(self):
super(TestClark1989, self).__init__(dtype=float, conserve_memory=0)
self.run_filter()
class TestClark1989Conserve(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
def __init__(self):
super(TestClark1989Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class Clark1989Forecast(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1989Forecast, self).__init__(
dtype=dtype, conserve_memory=conserve_memory
)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self.model.endog = np.array(
np.c_[
self.model.endog,
np.r_[[np.nan, np.nan]*nforecast].reshape(2, nforecast)
],
ndmin=2, dtype=dtype, order="F"
)
self.model.nobs = self.model.endog.shape[1]
self.run_filter()
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[4][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.results.filtered_state[5][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 3], 4
)
class TestClark1989ForecastDouble(Clark1989Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1989ForecastDouble, self).__init__()
self.run_filter()
class TestClark1989ForecastDoubleComplex(Clark1989Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.run_filter()
class TestClark1989ForecastConserve(Clark1989Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class TestClark1989ConserveAll(Clark1989):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08
)
# self.model.loglikelihood_burn = self.true['start']
self.model.loglikelihood_burn = 0
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.results.llf_obs[0], self.true['loglike'], 2
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.results.filtered_state[0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][-1],
self.true_states.iloc[end-1, 1], 4
)
assert_almost_equal(
self.results.filtered_state[4][-1],
self.true_states.iloc[end-1, 2], 4
)
assert_almost_equal(
self.results.filtered_state[5][-1],
self.true_states.iloc[end-1, 3], 4
)
# Miscellaneous coverage-related tests
def test_slice_notation():
endog = np.arange(10)*1.0
mod = Model(endog, k_states=2)
# Test invalid __setitem__
def set_designs():
mod['designs'] = 1
def set_designs2():
mod['designs',0,0] = 1
def set_designs3():
mod[0] = 1
assert_raises(IndexError, set_designs)
assert_raises(IndexError, set_designs2)
assert_raises(IndexError, set_designs3)
# Test invalid __getitem__
assert_raises(IndexError, lambda: mod['designs'])
assert_raises(IndexError, lambda: mod['designs',0,0,0])
assert_raises(IndexError, lambda: mod[0])
# Test valid __setitem__, __getitem__
assert_equal(mod.design[0,0,0], 0)
mod['design',0,0,0] = 1
assert_equal(mod['design'].sum(), 1)
assert_equal(mod.design[0,0,0], 1)
assert_equal(mod['design',0,0,0], 1)
# Test valid __setitem__, __getitem__ with unspecified time index
mod['design'] = np.zeros(mod['design'].shape)
assert_equal(mod.design[0,0], 0)
mod['design',0,0] = 1
assert_equal(mod.design[0,0], 1)
assert_equal(mod['design',0,0], 1)
def test_representation():
# Test an invalid number of states
def zero_kstates():
mod = Representation(1, 0)
assert_raises(ValueError, zero_kstates)
# Test an invalid endogenous array
def empty_endog():
endog = np.zeros((0,0))
mod = Representation(endog, k_states=2)
assert_raises(ValueError, empty_endog)
# Test a Fortran-ordered endogenous array (which will be assumed to be in
# wide format: k_endog x nobs)
nobs = 10
k_endog = 2
endog = np.asfortranarray(np.arange(nobs*k_endog).reshape(k_endog,nobs)*1.)
mod = Representation(endog, k_states=2)
assert_equal(mod.nobs, nobs)
assert_equal(mod.k_endog, k_endog)
# Test a C-ordered endogenous array (which will be assumed to be in
# tall format: nobs x k_endog)
nobs = 10
k_endog = 2
endog = np.arange(nobs*k_endog).reshape(nobs,k_endog)*1.
mod = Representation(endog, k_states=2)
assert_equal(mod.nobs, nobs)
assert_equal(mod.k_endog, k_endog)
# Test getting the statespace representation
assert_equal(mod._statespace, None)
mod._initialize_representation()
assert(mod._statespace is not None)
def test_bind():
mod = Representation(1, k_states=2)
# Test invalid endogenous array (it must be ndarray)
assert_raises(ValueError, lambda: mod.bind([1,2,3,4]))
# Test valid (nobs x 1) endogenous array
mod.bind(np.arange(10)*1.)
assert_equal(mod.nobs, 10)
# Test valid (k_endog x 0) endogenous array
mod.bind(np.zeros(0,dtype=np.float64))
# Test invalid (3-dim) endogenous array
assert_raises(ValueError, lambda: mod.bind(np.arange(12).reshape(2,2,3)*1.))
# Test valid F-contiguous
mod.bind(np.asfortranarray(np.arange(10).reshape(1,10)))
assert_equal(mod.nobs, 10)
# Test valid C-contiguous
mod.bind(np.arange(10).reshape(10,1))
assert_equal(mod.nobs, 10)
# Test invalid F-contiguous
assert_raises(ValueError, lambda: mod.bind(np.asfortranarray(np.arange(10).reshape(10,1))))
# Test invalid C-contiguous
assert_raises(ValueError, lambda: mod.bind(np.arange(10).reshape(1,10)))
def test_initialization():
mod = Representation(1, k_states=2)
# Test invalid state initialization
assert_raises(RuntimeError, lambda: mod._initialize_state())
# Test valid initialization
initial_state = np.zeros(2,) + 1.5
initial_state_cov = np.eye(2) * 3.
mod.initialize_known(initial_state, initial_state_cov)
assert_equal(mod._initial_state.sum(), 3)
assert_equal(mod._initial_state_cov.diagonal().sum(), 6)
# Test invalid initial_state
initial_state = np.zeros(10,)
assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov))
initial_state = np.zeros((10,10))
assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov))
# Test invalid initial_state_cov
initial_state = np.zeros(2,) + 1.5
initial_state_cov = np.eye(3)
assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov))
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/e417.py | 2 | 6371 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
e400
'learn_init': False
independently_centre_inputs : True
e401
input is in range [0,1]
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
# max_input_power=100,
max_diff = 100,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.75,
skip_probability_for_first_appliance=0,
one_target_per_seq=False,
n_seq_per_batch=64,
# subsample_target=4,
include_diff=True,
include_power=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
# standardise_input=True,
# standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-6,
learning_rate_changes_by_iteration={
# 1000: 1e-4,
# 4000: 1e-5
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=10)
)
def exp_a(name):
# ReLU hidden layers
# linear output
# output one appliance
# 0% skip prob for first appliance
# 100% skip prob for other appliances
# input is diff
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': BidirectionalRecurrentLayer,
'num_units': 50,
'W_in_to_hid': Normal(std=1),
'W_hid_to_hid': Identity(scale=0.9),
'nonlinearity': rectify,
'learn_init': False,
'precompute_input': True
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=1/sqrt(50))
}
]
net = Net(**net_dict_copy)
net.load_params(5000)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
# EXPERIMENTS = list('abcdefghi')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
h2educ/scikit-learn | sklearn/cluster/dbscan_.py | 92 | 12380 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
# Joel Nothman <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None,
random_state=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if random_state is not None:
warnings.warn("The parameter random_state is deprecated in 0.16 "
"and will be removed in version 0.18. "
"DBSCAN is deterministic except for rare border cases.",
category=DeprecationWarning)
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, random_state=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.random_state = random_state
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
nitin-cherian/LifeLongLearning | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/jupyter_core/tests/dotipython/profile_default/ipython_console_config.py | 24 | 21691 | # Configuration file for ipython-console.
c = get_config()
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp configuration
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp will inherit config from: TerminalIPythonApp,
# BaseIPythonApplication, Application, InteractiveShellApp, IPythonConsoleApp,
# ConnectionFileMixin
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.ZMQTerminalIPythonApp.hide_initial_ns = True
# set the heartbeat port [default: random]
# c.ZMQTerminalIPythonApp.hb_port = 0
# A list of dotted module names of IPython extensions to load.
# c.ZMQTerminalIPythonApp.extensions = []
# Execute the given command string.
# c.ZMQTerminalIPythonApp.code_to_run = ''
# Path to the ssh key to use for logging in to the ssh server.
# c.ZMQTerminalIPythonApp.sshkey = ''
# The date format used by logging formatters for %(asctime)s
# c.ZMQTerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the control (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.control_port = 0
# Reraise exceptions encountered loading IPython extensions?
# c.ZMQTerminalIPythonApp.reraise_ipython_extension_failures = False
# Set the log level by value or name.
# c.ZMQTerminalIPythonApp.log_level = 30
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.ZMQTerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.ZMQTerminalIPythonApp.pylab = None
# Run the module as a script.
# c.ZMQTerminalIPythonApp.module_to_run = ''
# Whether to display a banner upon starting IPython.
# c.ZMQTerminalIPythonApp.display_banner = True
# dotted module name of an IPython extension to load.
# c.ZMQTerminalIPythonApp.extra_extension = ''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.ZMQTerminalIPythonApp.verbose_crash = False
# Whether to overwrite existing config files when copying
# c.ZMQTerminalIPythonApp.overwrite = False
# The IPython profile to use.
# c.ZMQTerminalIPythonApp.profile = 'default'
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.ZMQTerminalIPythonApp.force_interact = False
# List of files to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_files = []
# Start IPython quickly by skipping the loading of config files.
# c.ZMQTerminalIPythonApp.quick = False
# The Logging format template
# c.ZMQTerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.ZMQTerminalIPythonApp.copy_config_files = False
# set the stdin (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.stdin_port = 0
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.ZMQTerminalIPythonApp.extra_config_file = ''
# lines of code to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_lines = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.ZMQTerminalIPythonApp.gui = None
# A file to be run
# c.ZMQTerminalIPythonApp.file_to_run = ''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.ZMQTerminalIPythonApp.matplotlib = None
# Suppress warning messages about legacy config files
# c.ZMQTerminalIPythonApp.ignore_old_config = False
# set the iopub (PUB) port [default: random]
# c.ZMQTerminalIPythonApp.iopub_port = 0
#
# c.ZMQTerminalIPythonApp.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ZMQTerminalIPythonApp.connection_file = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.ZMQTerminalIPythonApp.ipython_dir = ''
# The SSH server to use to connect to the kernel.
# c.ZMQTerminalIPythonApp.sshserver = ''
# Set to display confirmation dialog on exit. You can always use 'exit' or
# 'quit', to force a direct exit without any confirmation.
# c.ZMQTerminalIPythonApp.confirm_exit = True
# set the shell (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.shell_port = 0
# The name of the default kernel to start.
# c.ZMQTerminalIPythonApp.kernel_name = 'python'
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.ZMQTerminalIPythonApp.pylab_import_all = True
# Connect to an already running kernel
# c.ZMQTerminalIPythonApp.existing = ''
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ZMQTerminalIPythonApp.ip = ''
#------------------------------------------------------------------------------
# ZMQTerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of TerminalInteractiveShell that uses the 0MQ kernel
# ZMQTerminalInteractiveShell will inherit config from:
# TerminalInteractiveShell, InteractiveShell
#
# c.ZMQTerminalInteractiveShell.history_length = 10000
# auto editing of files with syntax errors.
# c.ZMQTerminalInteractiveShell.autoedit_syntax = False
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQTerminalInteractiveShell.display_page = False
#
# c.ZMQTerminalInteractiveShell.debug = False
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQTerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.ZMQTerminalInteractiveShell.logstart = False
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQTerminalInteractiveShell.cache_size = 1000
# The shell program to be used for paging.
# c.ZMQTerminalInteractiveShell.pager = 'less'
# The name of the logfile to use.
# c.ZMQTerminalInteractiveShell.logfile = ''
# Save multi-line entries as one entry in readline history
# c.ZMQTerminalInteractiveShell.multiline_history = True
#
# c.ZMQTerminalInteractiveShell.readline_remove_delims = '-/~'
# Enable magic commands to be called without the leading %.
# c.ZMQTerminalInteractiveShell.automagic = True
# Prefix to add to outputs coming from clients other than this one.
#
# Only relevant if include_other_output is True.
# c.ZMQTerminalInteractiveShell.other_output_prefix = '[remote] '
#
# c.ZMQTerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQTerminalInteractiveShell.color_info = True
# Callable object called via 'callable' image handler with one argument, `data`,
# which is `msg["content"]["data"]` where `msg` is the message from iopub
# channel. For exmaple, you can find base64 encoded PNG data as
# `data['image/png']`.
# c.ZMQTerminalInteractiveShell.callable_image_handler = None
# Command to invoke an image viewer program when you are using 'stream' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. Raw image data
# is given as STDIN to the program.
# c.ZMQTerminalInteractiveShell.stream_image_handler = []
#
# c.ZMQTerminalInteractiveShell.separate_out2 = ''
# Autoindent IPython code entered interactively.
# c.ZMQTerminalInteractiveShell.autoindent = True
# The part of the banner to be printed after the profile
# c.ZMQTerminalInteractiveShell.banner2 = ''
# Don't call post-execute functions that have failed in the past.
# c.ZMQTerminalInteractiveShell.disable_failing_post_execute = False
# Deprecated, use PromptManager.out_template
# c.ZMQTerminalInteractiveShell.prompt_out = 'Out[\\#]: '
#
# c.ZMQTerminalInteractiveShell.object_info_string_level = 0
#
# c.ZMQTerminalInteractiveShell.separate_out = ''
# Automatically call the pdb debugger after every exception.
# c.ZMQTerminalInteractiveShell.pdb = False
# Deprecated, use PromptManager.in_template
# c.ZMQTerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
#
# c.ZMQTerminalInteractiveShell.separate_in = '\n'
#
# c.ZMQTerminalInteractiveShell.wildcards_case_sensitive = True
# Enable auto setting the terminal title.
# c.ZMQTerminalInteractiveShell.term_title = False
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQTerminalInteractiveShell.deep_reload = False
# Deprecated, use PromptManager.in2_template
# c.ZMQTerminalInteractiveShell.prompt_in2 = ' .\\D.: '
# Whether to include output from clients other than this one sharing the same
# kernel.
#
# Outputs are not displayed until enter is pressed.
# c.ZMQTerminalInteractiveShell.include_other_output = False
# Preferred object representation MIME type in order. First matched MIME type
# will be used.
# c.ZMQTerminalInteractiveShell.mime_preference = ['image/png', 'image/jpeg', 'image/svg+xml']
#
# c.ZMQTerminalInteractiveShell.readline_use = True
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQTerminalInteractiveShell.autocall = 0
# The part of the banner to be printed before the profile
# c.ZMQTerminalInteractiveShell.banner1 = 'Python 3.4.3 |Continuum Analytics, Inc.| (default, Mar 6 2015, 12:07:41) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://binstar.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Handler for image type output. This is useful, for example, when connecting
# to the kernel in which pylab inline backend is activated. There are four
# handlers defined. 'PIL': Use Python Imaging Library to popup image; 'stream':
# Use an external program to show the image. Image will be fed into the STDIN
# of the program. You will need to configure `stream_image_handler`;
# 'tempfile': Use an external program to show the image. Image will be saved in
# a temporally file and the program is called with the temporally file. You
# will need to configure `tempfile_image_handler`; 'callable': You can set any
# Python callable which is called with the image data. You will need to
# configure `callable_image_handler`.
# c.ZMQTerminalInteractiveShell.image_handler = None
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQTerminalInteractiveShell.colors = 'LightBG'
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.ZMQTerminalInteractiveShell.editor = 'mate -w'
# Show rewritten input, e.g. for autocall.
# c.ZMQTerminalInteractiveShell.show_rewritten_input = True
#
# c.ZMQTerminalInteractiveShell.xmode = 'Context'
#
# c.ZMQTerminalInteractiveShell.quiet = False
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQTerminalInteractiveShell.ast_transformers = []
#
# c.ZMQTerminalInteractiveShell.ipython_dir = ''
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.ZMQTerminalInteractiveShell.confirm_exit = True
# Deprecated, use PromptManager.justify
# c.ZMQTerminalInteractiveShell.prompts_pad_left = True
# Timeout for giving up on a kernel (in seconds).
#
# On first connect and restart, the console tests whether the kernel is running
# and responsive by sending kernel_info_requests. This sets the timeout in
# seconds for how long the kernel can take before being presumed dead.
# c.ZMQTerminalInteractiveShell.kernel_timeout = 60
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.ZMQTerminalInteractiveShell.screen_length = 0
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.ZMQTerminalInteractiveShell.logappend = ''
# Command to invoke an image viewer program when you are using 'tempfile' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. You can use
# {file} and {format} in the string to represent the location of the generated
# image file and image format.
# c.ZMQTerminalInteractiveShell.tempfile_image_handler = []
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# set the heartbeat port [default: random]
# c.KernelManager.hb_port = 0
# set the stdin (ROUTER) port [default: random]
# c.KernelManager.stdin_port = 0
#
# c.KernelManager.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.KernelManager.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.KernelManager.control_port = 0
# set the shell (ROUTER) port [default: random]
# c.KernelManager.shell_port = 0
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = ''
# set the iopub (PUB) port [default: random]
# c.KernelManager.iopub_port = 0
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'minrk'
# Debug output in the Session
# c.Session.debug = False
# path to file containing execution key.
# c.Session.keyfile = ''
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The UUID identifying this session.
# c.Session.session = ''
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# execution key, for signing messages.
# c.Session.key = b''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
| mit |
moschlar/SAUCE | sauce/controllers/similarity.py | 1 | 5600 | # -*- coding: utf-8 -*-
"""Similarity controller module
"""
#
## SAUCE - System for AUtomated Code Evaluation
## Copyright (C) 2013 Moritz Schlarb
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import matplotlib
matplotlib.use('Agg') # Only backend available in server environments
import pylab
from ripoff import all_pairs, dendrogram, distances
from itertools import combinations
from functools import partial
# turbogears imports
from tg import expose, abort, cache, tmpl_context as c, redirect
#from tg import redirect, validate, flash
# third party imports
#from tg.i18n import ugettext as _
#from repoze.what import predicates
import status
from repoze.what.predicates import Any, has_permission
from tw2.pygmentize import Pygmentize
# project specific imports
from sauce.lib.base import BaseController
from sauce.model import Submission
from sauce.lib.helpers import udiff
from sauce.lib.authz import user_is_in
from sauce.lib.menu import menu
from sauce.widgets import SourceDisplay
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
log = logging.getLogger(__name__)
similarity_combined = lambda a, b: distances.combined(a or u'', b or u'')
def rgb(v, cmap_name='RdYlGn'):
'''Get CSS rgb representation from color map with name'''
cmap = pylab.get_cmap(cmap_name)
(r, g, b, _) = cmap(v)
return 'rgb(' + ','.join('%d' % int(x * 255) for x in (r, g, b)) + ')'
class SimilarityController(BaseController):
def __init__(self, assignment):
self.assignment = assignment
self.submissions = sorted((s for s in self.assignment.submissions if s.source),
key=lambda s: s.id)
self.key = str(self.assignment.id)
if self.submissions:
self.key += '_' + '-'.join(str(s.id) for s in self.submissions)
self.key += '_' + (max(self.submissions, key=lambda s: s.modified)
.modified.strftime('%Y-%m-%d-%H-%M-%S'))
self.allow_only = Any(
user_is_in('teachers', self.assignment.sheet.event),
user_is_in('tutors', self.assignment.sheet.event),
has_permission('manage'),
msg=u'You are not allowed to access this page.'
)
def _before(self, *args, **kwargs):
'''Prepare tmpl_context with navigation menus'''
c.sub_menu = menu(self.assignment)
c.source_display = SourceDisplay(mode='diff')
def get_similarity(self):
def calc():
log.debug('Calculating similarity matrix for key %s...', self.key)
return all_pairs([s.source for s in self.submissions])
simcache = cache.get_cache('similarity')
matrix = simcache.get_value(key=self.key, createfunc=calc, expiretime=7 * 24 * 60 * 60) # 7 days
return matrix
@expose()
def index(self, *args, **kwargs):
redirect(self.assignment.url + '/similarity/table', *args, **kwargs)
@expose('sauce.templates.similarity')
def table(self, cmap_name='RdYlGn', *args, **kwargs):
c.rgb = partial(rgb, cmap_name=cmap_name)
c.url = self.assignment.url + '/similarity'
matrix = self.get_similarity()
return dict(page='assignment', view='table',
assignment=self.assignment, matrix=matrix,
submissions=self.submissions)
@expose('sauce.templates.similarity')
def list(self, cmap_name='RdYlGn', *args, **kwargs):
c.rgb = partial(rgb, cmap_name=cmap_name)
c.url = self.assignment.url + '/similarity'
matrix = self.get_similarity()
l = sorted((((a, b), matrix[i, j])
for (i, a), (j, b) in combinations(enumerate(self.submissions), 2)),
key=lambda x: x[1])
return dict(page='assignment', view='list',
assignment=self.assignment,
submissions=self.submissions, l=l)
@expose(content_type="image/png")
def dendrogram(self, *args, **kwargs):
return dendrogram(self.get_similarity(),
leaf_label_func=lambda i: unicode(self.submissions[i].id),
leaf_rotation=45)
@expose('sauce.templates.similarity_diff')
def diff(self, *args, **kwargs):
c.rgb = rgb
try:
a = Submission.query.filter_by(id=int(args[0])).one()
b = Submission.query.filter_by(id=int(args[1])).one()
except ValueError:
abort(status.HTTP_400_BAD_REQUEST)
except IndexError:
abort(status.HTTP_400_BAD_REQUEST)
except NoResultFound:
abort(status.HTTP_404_NOT_FOUND)
except MultipleResultsFound: # pragma: no cover
log.warn('Database inconsistency', exc_info=True)
abort(status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return dict(page='assignment', view='diff',
assignment=self.assignment, x=distances.combined(a.source or u'', b.source or u''),
a=a, b=b, source=udiff(a.source, b.source, unicode(a), unicode(b)))
| agpl-3.0 |
soylentdeen/BlurryApple | Alignment/kmirror.py | 1 | 10308 | import scipy
import numpy
import matplotlib.pyplot as pyplot
class Derotator( object ):
def __init__(self):
self.Z = 10.0 # mm
self.Y = 17.3 # mm
self.origin = Point(0.0, 0.0, 0.0)
self.theta = numpy.arctan2(self.Z, self.Y)
self.beta = numpy.pi/4.0 + self.theta/2.0
#print numpy.rad2deg(self.beta)
self.m1Point = Point(0.0, 0.0, self.Z)
self.m2Point = Point(0.0, self.Y, 0.0)
self.m3Point = Point(0.0, 0.0, -self.Z)
self.m1Normal = numpy.array([0.0, numpy.sin(self.beta), numpy.cos(self.beta)])
self.m2Normal = numpy.array([0.0, -1.0, 0.0])
self.m3Normal = numpy.array([0.0, numpy.sin(self.beta), -numpy.cos(self.beta)])
self.makeMirrors()
def makeMirrors(self):
self.mirror1 = Plane(self.m1Point, self.m1Normal)
self.mirror2 = Plane(self.m2Point, self.m2Normal)
self.mirror3 = Plane(self.m3Point, self.m3Normal)
def rotate(self, angle):
self.mirror1.rotate(self.origin, angle, numpy.array([0.0, 0.0, 1.0]))
self.mirror2.rotate(self.origin, angle, numpy.array([0.0, 0.0, 1.0]))
self.mirror3.rotate(self.origin, angle, numpy.array([0.0, 0.0, 1.0]))
def propogate(self, line):
r1 = self.mirror1.reflection(line)
r2 = self.mirror2.reflection(r1)
r3 = self.mirror3.reflection(r2)
return r3
def translate(self, dx, dy, dz):
shift = Point(dx, dy, dz)
self.origin = self.origin+shift
self.m1Point = self.m1Point+shift
self.m2Point = self.m2Point+shift
self.m3Point = self.m3Point+shift
self.makeMirrors()
def tiptilt(self, angle, axis):
ux = axis[0]
uy = axis[1]
uz = axis[2]
rotation_matrix = numpy.array([
[numpy.cos(angle) + ux**2*(1.0-numpy.cos(angle)),
ux*uy*(1.0-numpy.cos(angle)) - uz*numpy.sin(angle),
ux*uz*(1.0-numpy.cos(angle)) + uy*numpy.sin(angle)],
[ux*uy*(1.0-numpy.cos(angle))+uz*numpy.sin(angle),
numpy.cos(angle)+uy**2*(1.0-numpy.cos(angle)),
uy*uz*(1.0-numpy.cos(angle))-ux*numpy.sin(angle)],
[uz*ux*(1.0-numpy.cos(angle))-uy*numpy.sin(angle),
uz*uy*(1.0-numpy.cos(angle))+ux*numpy.sin(angle),
numpy.cos(angle)+uz**2*(1.0-numpy.cos(angle))]])
v1 = self.origin- self.m1Point
v1 = numpy.array([v1.x, v1.y, v1.z])
new_vector = numpy.dot(rotation_matrix, v1)
self.m1Normal = numpy.dot(rotation_matrix, self.m1Normal)
v1 = Point(new_vector[0], new_vector[1], new_vector[2])
self.m1Point = self.origin + v1
v2 = self.origin- self.m2Point
v2 = numpy.array([v2.x, v2.y, v2.z])
new_vector = numpy.dot(rotation_matrix, v2)
self.m2Normal = numpy.dot(rotation_matrix, self.m2Normal)
v2 = Point(new_vector[0], new_vector[1], new_vector[2])
self.m2Point = self.origin + v2
v3 = self.origin- self.m3Point
v3 = numpy.array([v3.x, v3.y, v3.z])
new_vector = numpy.dot(rotation_matrix, v3)
self.m3Normal = numpy.dot(rotation_matrix, self.m3Normal)
v3 = Point(new_vector[0], new_vector[1], new_vector[2])
self.m3Point = self.origin + v3
self.makeMirrors()
class Point( object ):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __add__(self, other):
return Point(self.x+other.x, self.y+other.y, self.z+other.z)
def __sub__(self, other):
return Point(self.x-other.x, self.y-other.y, self.z-other.z)
def __repr__(self):
return "x: "+str(self.x)+" y: "+str(self.y)+" z: "+str(self.z)
def __str__(self):
return "x: "+str(self.x)+" y: "+str(self.y)+" z: "+str(self.z)
class Line( object):
def __init__(self, p, slope):
norm = numpy.sqrt(slope[0]**2.0 + slope[1]**2.0 + slope[2]**2.0)
self.slope = slope/norm
self.a = slope[0]/norm
self.b = slope[1]/norm
self.c = slope[2]/norm
self.p = p
def traverse(self, t):
newx = self.p.x + self.a*t
newy = self.p.y + self.b*t
newz = self.p.z + self.c*t
newpoint = Point(newx, newy, newz)
return newpoint
class Plane( object ):
def __init__(self, p, normal):
self.p = p
self.normal = normal/numpy.sqrt(numpy.sum(numpy.square(normal)))
self.calculatePlaneEqn()
def rotate(self, p, angle, axis):
vector = numpy.array([p.x-self.p.x, p.y-self.p.y, p.z-self.p.z])
ux = axis[0]
uy = axis[1]
uz = axis[2]
rotation_matrix = numpy.array([
[numpy.cos(angle) + ux**2*(1.0-numpy.cos(angle)),
ux*uy*(1.0-numpy.cos(angle)) - uz*numpy.sin(angle),
ux*uz*(1.0-numpy.cos(angle)) + uy*numpy.sin(angle)],
[ux*uy*(1.0-numpy.cos(angle))+uz*numpy.sin(angle),
numpy.cos(angle)+uy**2*(1.0-numpy.cos(angle)),
uy*uz*(1.0-numpy.cos(angle))-ux*numpy.sin(angle)],
[uz*ux*(1.0-numpy.cos(angle))-uy*numpy.sin(angle),
uz*uy*(1.0-numpy.cos(angle))+ux*numpy.sin(angle),
numpy.cos(angle)+uz**2*(1.0-numpy.cos(angle))]])
new_vector = numpy.dot(rotation_matrix, vector)
self.p = Point(p.x-new_vector[0], p.y-new_vector[1], p.z-new_vector[2])
self.normal = numpy.dot(rotation_matrix, self.normal)
self.calculatePlaneEqn()
def calculatePlaneEqn(self):
Ax = self.normal[0]
Ay = self.normal[1]
Az = self.normal[2]
if (numpy.abs(Ax) < 1e-5) & (numpy.abs(Ay) < 1e-5):
self.coeffs = numpy.array([0.0, 0.0, 1.0/self.p.z])
elif (numpy.abs(Ax) < 1e-5) & (numpy.abs(Az) < 1e-5 ):
self.coeffs = numpy.array([0.0, 1.0/self.p.y, 0.0])
elif (numpy.abs(Ay) < 1e-5) & (numpy.abs(Az) < 1e-5):
self.coeffs = numpy.array([1.0/self.p.x, 0.0, 0.0])
elif (numpy.abs(Ax) < 1e-5):
p2 = Point(self.p.x, self.p.y+1.0, self.p.z-Ay/Az)
X = numpy.array([
[self.p.y, self.p.z],
[p2.y, p2.z]])
Y = numpy.array([1.0, 1.0])
BC = numpy.linalg.solve(X, Y)
self.coeffs = numpy.array([0.0, BC[0], BC[1]])
elif (numpy.abs(Ay) < 1e-5):
p2 = Point(self.p.x+1.0, self.p.y, self.p.z-Ax/Az)
X = numpy.array([
[self.p.x, self.p.z],
[p2.x, p2.z]])
Y = numpy.array([1.0, 1.0])
AC = numpy.linalg.solve(X, Y)
self.coeffs = numpy.array([AC[0], 0.0, AC[1]])
elif (numpy.abs(Az) < 1e-5):
p2 = Point(self.p.x-Ay/Ax, self.p.y+1.0, self.p.z)
X = numpy.array([
[self.p.x, self.p.y],
[p2.x, p2.y]])
Y = numpy.array([1.0, 1.0])
AB = numpy.linalg.solve(X,Y)
self.coeffs = numpy.array([AB[0], AB[1], 0.0])
else:
p2 = Point(self.p.x, self.p.y+1.0, self.p.z-Ay/Az)
p3 = Point(self.p.x+1.0, self.p.y, self.p.z-Ax/Az)
p4 = Point(self.p.x-Ay/Ax, self.p.y+1.0, self.p.z)
X = numpy.array([
[p2.x, p2.y, p2.z],
[p3.x, p3.y, p3.z],
[p4.x, p4.y, p4.z]])
Y = numpy.array([1.0, 1.0, 1.0])
self.coeffs = numpy.linalg.solve(X, Y)
def intersection(self, line):
dotproduct = numpy.dot(self.normal, line.slope)
if dotproduct == 0.0:
return False
else:
D = (self.coeffs[0]*line.p.x + self.coeffs[1]*line.p.y +
self.coeffs[2]*line.p.z)
t = (1.0-D)/(self.coeffs[0]*line.a + self.coeffs[1]*line.b +
self.coeffs[2]*line.c)
return line.traverse(t)
def reflection(self, line):
reflection = self.intersection(line)
if reflection:
dot = numpy.dot(self.normal, line.slope)
newx = line.a - 2*dot*self.normal[0]
newy = line.b - 2*dot*self.normal[1]
newz = line.c - 2*dot*self.normal[2]
newLine = Line(reflection, [newx, newy, newz])
return newLine
else:
print "Error! Line does not intersect plane!"
return False
origin = Point(0.0, 0.0, 0.0)
p1 = Point(-0.002, 0.0, 0.0)
p2 = Point(0.00, 0.5, 0.0)
p3 = Point(1.0, 1.0, 0.0)
derot = Derotator()
l0 = Line(origin, [0.0, 0.0, -1.0])
l1 = Line(p1, [0.0, 0.0, -1.0])
l2 = Line(p3, [0.0, 0.0, -1.0])
l3 = Line(p3, [0.0, 0.00028, -1.0])
l4 = Line(origin, [0.00028, 0.0, -1.0])
detectorPlane = Plane(Point(0.0, 0.0, -1000.0), numpy.array([0.0, 0.0, 1.0]))
pupilPlane = Plane(Point(0.0, 0.0, -500.0), numpy.array([0.0, 0.0, 1.0]))
nsteps = 51
dangle = 2.0*numpy.pi/nsteps
angle = []
a = []
b = []
c = []
d = []
e = []
theta = 0.0
derot.tiptilt(0.000278, numpy.array([0.0, 1.0, 0.0]))
#derot.rotate(numpy.deg2rad(45.0))
#spot = detectorPlane.intersection(derot.propogate(l2))
#print asdf
for i in range(nsteps):
angle.append(theta)
spot = detectorPlane.intersection(derot.propogate(l0))
a.append([spot.x, spot.y])
spot = detectorPlane.intersection(derot.propogate(l1))
b.append([spot.x, spot.y])
spot = detectorPlane.intersection(derot.propogate(l2))
c.append([spot.x, spot.y])
spot = detectorPlane.intersection(derot.propogate(l3))
d.append([spot.x, spot.y])
spot = detectorPlane.intersection(derot.propogate(l4))
e.append([spot.x, spot.y])
derot.rotate(dangle)
theta += dangle
a = numpy.array(a)
b = numpy.array(b)
c = numpy.array(c)
d = numpy.array(d)
e = numpy.array(e)
pyplot.ion()
fig = pyplot.figure(0)
fig.clear()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
fig.show()
#for image in zip(angle, a, b, c):
# print("Angle : %.2f" % numpy.rad2deg(image[0]))
# line = numpy.array(image[1:])
# ax.plot(line[:,0], line[:,1], marker= 'x')
# pyplot.draw()
# raw_input()
ax.plot(a[:,0], a[:,1], c='r', marker='o')
ax.plot(b[:,0], b[:,1], c='b', marker='x')
#ax.plot(c[:,0], c[:,1], c='g', marker='+')
#ax.plot(d[:,0], d[:,1], c='k', marker='.')
#ax.plot(e[:,0], e[:,1], c='m', marker='x')
| gpl-2.0 |
almarklein/scikit-image | doc/examples/plot_denoise.py | 1 | 2065 | """
=============================
Denoising the picture of Lena
=============================
In this example, we denoise a noisy version of the picture of Lena using the
total variation and bilateral denoising filter.
These algorithms typically produce "posterized" images with flat domains
separated by sharp edges. It is possible to change the degree of posterization
by controlling the tradeoff between denoising and faithfulness to the original
image.
Total variation filter
----------------------
The result of this filter is an image that has a minimal total variation norm,
while being as close to the initial image as possible. The total variation is
the L1 norm of the gradient of the image.
Bilateral filter
----------------
A bilateral filter is an edge-preserving and noise reducing filter. It averages
pixels based on their spatial closeness and radiometric similarity.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.filter import denoise_tv_chambolle, denoise_bilateral
lena = img_as_float(data.lena())
lena = lena[220:300, 220:320]
noisy = lena + 0.6 * lena.std() * np.random.random(lena.shape)
noisy = np.clip(noisy, 0, 1)
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(8, 5))
plt.gray()
ax[0, 0].imshow(noisy)
ax[0, 0].axis('off')
ax[0, 0].set_title('noisy')
ax[0, 1].imshow(denoise_tv_chambolle(noisy, weight=0.1, multichannel=True))
ax[0, 1].axis('off')
ax[0, 1].set_title('TV')
ax[0, 2].imshow(denoise_bilateral(noisy, sigma_range=0.05, sigma_spatial=15))
ax[0, 2].axis('off')
ax[0, 2].set_title('Bilateral')
ax[1, 0].imshow(denoise_tv_chambolle(noisy, weight=0.2, multichannel=True))
ax[1, 0].axis('off')
ax[1, 0].set_title('(more) TV')
ax[1, 1].imshow(denoise_bilateral(noisy, sigma_range=0.1, sigma_spatial=15))
ax[1, 1].axis('off')
ax[1, 1].set_title('(more) Bilateral')
ax[1, 2].imshow(lena)
ax[1, 2].axis('off')
ax[1, 2].set_title('original')
fig.subplots_adjust(wspace=0.02, hspace=0.2,
top=0.9, bottom=0.05, left=0, right=1)
plt.show()
| bsd-3-clause |
wazeerzulfikar/scikit-learn | sklearn/setup.py | 69 | 3201 | import os
from os.path import join
import warnings
from sklearn._build_utils import maybe_cythonize_extensions
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
# submodules with build utilities
config.add_subpackage('__check_build')
config.add_subpackage('_build_utils')
# submodules which do not have their own setup.py
# we must manually add sub-submodules & tests
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('cross_decomposition/tests')
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('model_selection')
config.add_subpackage('model_selection/tests')
config.add_subpackage('neural_network')
config.add_subpackage('neural_network/tests')
config.add_subpackage('preprocessing')
config.add_subpackage('preprocessing/tests')
config.add_subpackage('semi_supervised')
config.add_subpackage('semi_supervised/tests')
# submodules which have their own setup.py
# leave out "linear_model" and "utils" for now; add them after cblas below
config.add_subpackage('cluster')
config.add_subpackage('datasets')
config.add_subpackage('decomposition')
config.add_subpackage('ensemble')
config.add_subpackage('externals')
config.add_subpackage('feature_extraction')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('metrics/cluster')
config.add_subpackage('neighbors')
config.add_subpackage('tree')
config.add_subpackage('svm')
# add cython extension module for isotonic regression
config.add_extension('_isotonic',
sources=['_isotonic.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
maybe_cythonize_extensions(top_path, config)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
markneville/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/fontconfig_pattern.py | 72 | 6429 | """
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# Author : Michael Droettboom <[email protected]>
# License : matplotlib license (PSF compatible)
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
import re
from matplotlib.pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser:
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException, e:
raise ValueError("Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
| agpl-3.0 |
jcornford/pyecog | pyecog/ndf/make_pdfs.py | 1 | 5148 | '''
This module is not currently being used
'''
from __future__ import print_function
import pickle
import os
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.font_manager as fm
dir = os.path.dirname(__file__)
filename = os.path.join(dir, '../HelveticaNeue-Light.otf')
prop = fm.FontProperties(fname=filename)
def plot_traces(to_plot,
start = 0,
labels = None,
savepath = None,
filename = None,
format_string = ".pdf",
prob_thresholds = None, verbose = False):
'''
Args:
to_plot: expecting this with
start:
labels: if None, traces are all assigned with 1's
savestring:
format_string:
prob_thresholds: if not None, used to color code the index (0-sure- "k", 1-unsure "r")
Returns:
'''
if labels is None:
labels = np.ones(to_plot.shape[0])
if prob_thresholds is None:
prob_thresholds = np.zeros(to_plot.shape[0]).astype(int)
colors = ['b','r','g','k']
colors = ['k','r','g','b','purple']
print('Plotting traces...')
for section in range(int(np.ceil(to_plot.shape[0]/40.0))):
fig = plt.figure(figsize=(8.27, 11.69), dpi=20)
plt.axis('off')
plt.title('Traces '+ str((section+start)*40)+ ':' + str(((section+start)+1)*40)+' 1:black 2:red 3:green 4:blue', fontproperties=prop, fontsize = 14)
time = np.linspace(1,10,to_plot.shape[1])
annotation_colors = ['k','r']
if section == to_plot.shape[0]/40:
n_plots = to_plot.shape[0]%40
if verbose:
print(str(section*40)+ ' : ' + str(((section)*40)+n_plots))
else:
n_plots = 40
if verbose:
print(str(section*40)+ ' : ' + str((section+1)*40)+',')
for i in [ii + (section)*40 for ii in range(n_plots)]:
ax = fig.add_subplot(20,2,(i%40)+1)
ax.plot(time,to_plot[i,:], color = colors[int(labels[i])-1], linewidth = 0.5)
ax.annotate(str(i+start), xy = (0,0.3), fontsize = 10, color = annotation_colors[prob_thresholds[i]], fontproperties=prop)
ax.axis('off')
ax.set_xlim((0,10))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_yaxis().tick_left()
ax.get_xaxis().tick_bottom()
if savepath:
plt.savefig(os.path.join(savepath,filename+'_'+str(section+ (start+1)/40)+format_string))
#plt.savefig(os.path.join(savepath,filename+'_'+str(section)+format_string))
else:
plt.show()
plt.close('all')
def plot_traces_hdf5(to_plot,
labels = None,
savepath = None,
filename = None,
format_string = ".pdf",
prob_thresholds = None,
trace_len_sec = 5,
verbose = False):
if not format_string.startswith('.'):
format_string = '.'+format_string
if labels is None:
labels = np.ones(to_plot.shape[0])
if prob_thresholds is None:
prob_thresholds = np.zeros(to_plot.shape[0]).astype(int)
colors = ['k','r','g','b','purple']
print('Plotting traces...')
for section in range(int(np.ceil(to_plot.shape[0]/40.0))):
fig = plt.figure(figsize=(8.27, 11.69), dpi=20)
plt.axis('off')
plt.title('Seconds '+ str(section*40*trace_len_sec)+ ':' + str((section+1)*40*trace_len_sec), fontsize = 14,fontproperties=prop)
time = np.linspace(1,10,to_plot.shape[1])
annotation_colors = ['k','r']
if section == to_plot.shape[0]/40:
n_plots = to_plot.shape[0]%40
if verbose:
print(str(section*40)+ ' : ' + str(((section)*40)+n_plots))
else:
n_plots = 40
if verbose:
print(str(section*40)+ ' : ' + str((section+1)*40)+',')
for i in [ii + (section)*40 for ii in range(n_plots)]:
ax = fig.add_subplot(20,2,(i%40)+1)
ax.annotate(str(i), xy = (0,0.5), fontsize = 10,color = 'black', fontproperties=prop)
ax.plot(time, to_plot[i,:], color = colors[int(labels[i])], linewidth = 0.5)
ax.set_title(str(i*trace_len_sec), fontsize = 8, fontproperties=prop)
ax.axis('off')
ax.set_xlim((0,10))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_yaxis().tick_left()
ax.get_xaxis().tick_bottom()
if savepath:
plt.savefig(os.path.join(savepath,filename+'_'+str(section)+format_string))
plt.close('all')
print('Done')
if __name__ == "__main__":
training_tuple = pickle.load(open('../training_label_traces_tuple','rb'))
training_tuple = pickle.load(open('../validation_label_traces_tuple','rb'))
labels = training_tuple[0]
data = training_tuple[1]
print('plotting '+ str(data.shape[0])+ 'traces')
plot_traces(data,labels,savestring='../validation ',format_string='.pdf') | mit |
giumas/python-acoustics | tests/test_imaging.py | 3 | 1189 | import numpy as np
import pytest
has_matplotlib = pytest.importorskip("matplotlib")
if has_matplotlib:
from acoustics.bands import octave, third
from acoustics.imaging import plot_octave, plot_third, plot_bands
def setup_module(imaging):
imaging.octaves = octave(16, 16000)
imaging.thirds = third(63, 8000)
imaging.tl_oct = np.array([3, 4, 5, 12, 15, 24, 28, 23, 35, 45, 55])
imaging.tl_third = np.array([0, 0, 0, 1, 1, 2, 3, 5, 8, 13, 21,
32, 41, 47, 46, 44, 58, 77, 61, 75, 56, 54])
imaging.title = 'Title'
imaging.label = 'Label'
def test_plot_octave():
plot_octave(tl_oct, octaves)
def test_plot_octave_kHz():
plot_octave(tl_oct, octaves, kHz=True, xlabel=label, ylabel=label,
title=title, separator='.')
def test_plot_third_octave():
plot_third(tl_third, thirds, marker='s', separator=',')
def test_plot_third_octave_kHz():
plot_third(tl_third, thirds, marker='s', kHz=True, xlabel=label,
ylabel=label, title=title)
def test_plot_band_oct():
plot_bands(tl_oct, octaves, axes=None, band_type='octave')
def teardown_module(imaging):
pass
| bsd-3-clause |
semio/zipline | zipline/protocol.py | 15 | 17562 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from six import iteritems, iterkeys
import pandas as pd
from pandas.tseries.tools import normalize_date
import numpy as np
from . utils.protocol_utils import Enum
from . utils.math_utils import nanstd, nanmean, nansum
from zipline.finance.trading import with_environment
from zipline.utils.algo_instance import get_algo_instance
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = Enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION',
'CLOSE_POSITION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = [
'id',
'payment_sid',
'cash_amount',
'share_count',
]
def dividend_payment(data=None):
"""
Take a dictionary whose values are in DIVIDEND_PAYMENT_FIELDS and return a
series representing the payment of a dividend.
Ids are assigned to each historical dividend in
PerformanceTracker.update_dividends. They are guaranteed to be unique
integers with the context of a single simulation. If @data is non-empty, a
id is required to identify the historical dividend associated with this
payment.
Additionally, if @data is non-empty, either data['cash_amount'] should be
nonzero or data['payment_sid'] should be an asset identifier and
data['share_count'] should be nonzero.
The returned Series is given its id value as a name so that concatenating
payments results in a DataFrame indexed by id. (Note, however, that the
name value is not used to construct an index when this series is returned
by function passed to `DataFrame.apply`. In such a case, pandas preserves
the index of the DataFrame on which `apply` is being called.)
"""
return pd.Series(
data=data,
name=data['id'] if data is not None else None,
index=DIVIDEND_PAYMENT_FIELDS,
dtype=object,
)
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__ = initial_values
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
class Order(Event):
pass
class Portfolio(object):
def __init__(self):
self.capital_used = 0.0
self.starting_cash = 0.0
self.portfolio_value = 0.0
self.pnl = 0.0
self.returns = 0.0
self.cash = 0.0
self.positions = Positions()
self.start_date = None
self.positions_value = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
# Have to convert to primitive dict
state_dict['positions'] = dict(self.positions)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Portfolio saved state is too old.")
self.positions = Positions()
self.positions.update(state.pop('positions'))
self.__dict__.update(state)
class Account(object):
'''
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
'''
def __init__(self):
self.settled_cash = 0.0
self.accrued_interest = 0.0
self.buying_power = float('inf')
self.equity_with_loan = 0.0
self.total_positions_value = 0.0
self.regt_equity = 0.0
self.regt_margin = float('inf')
self.initial_margin_requirement = 0.0
self.maintenance_margin_requirement = 0.0
self.available_funds = 0.0
self.excess_liquidity = 0.0
self.cushion = 0.0
self.day_trades_remaining = float('inf')
self.leverage = 0.0
self.net_leverage = 0.0
self.net_liquidation = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Account({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Account saved state is too old.")
self.__dict__.update(state)
class Position(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Position({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Protocol Position saved state is too old.")
self.__dict__.update(state)
class Positions(dict):
def __missing__(self, key):
pos = Position(key)
self[key] = pos
return pos
class SIDData(object):
# Cache some data on the class so that this is shared for all instances of
# siddata.
# The dt where we cached the history.
_history_cache_dt = None
# _history_cache is a a dict mapping fields to pd.DataFrames. This is the
# most data we have for a given field for the _history_cache_dt.
_history_cache = {}
# This is the cache that is used for returns. This will have a different
# structure than the other history cache as this is always daily.
_returns_cache_dt = None
_returns_cache = None
# The last dt that we needed to cache the number of minutes.
_minute_bar_cache_dt = None
# If we are in minute mode, there is some cost associated with computing
# the number of minutes that we need to pass to the bar count of history.
# This will remain constant for a given bar and day count.
# This maps days to number of minutes.
_minute_bar_cache = {}
def __init__(self, sid, initial_values=None):
self._sid = sid
self._freqstr = None
# To check if we have data, we use the __len__ which depends on the
# __dict__. Because we are foward defining the attributes needed, we
# need to account for their entrys in the __dict__.
# We will add 1 because we need to account for the _initial_len entry
# itself.
self._initial_len = len(self.__dict__) + 1
if initial_values:
self.__dict__.update(initial_values)
@property
def datetime(self):
"""
Provides an alias from data['foo'].datetime -> data['foo'].dt
`datetime` was previously provided by adding a seperate `datetime`
member of the SIDData object via a generator that wrapped the incoming
data feed and added the field to each equity event.
This alias is intended to be temporary, to provide backwards
compatibility with existing algorithms, but should be considered
deprecated, and may be removed in the future.
"""
return self.dt
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
def __len__(self):
return len(self.__dict__) - self._initial_len
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "SIDData({0})".format(self.__dict__)
def _get_buffer(self, bars, field='price', raw=False):
"""
Gets the result of history for the given number of bars and field.
This will cache the results internally.
"""
cls = self.__class__
algo = get_algo_instance()
now = algo.datetime
if now != cls._history_cache_dt:
# For a given dt, the history call for this field will not change.
# We have a new dt, so we should reset the cache.
cls._history_cache_dt = now
cls._history_cache = {}
if field not in self._history_cache \
or bars > len(cls._history_cache[field][0].index):
# If we have never cached this field OR the amount of bars that we
# need for this field is greater than the amount we have cached,
# then we need to get more history.
hst = algo.history(
bars, self._freqstr, field, ffill=True,
)
# Assert that the column holds ints, not security objects.
if not isinstance(self._sid, str):
hst.columns = hst.columns.astype(int)
self._history_cache[field] = (hst, hst.values, hst.columns)
# Slice of only the bars needed. This is because we strore the LARGEST
# amount of history for the field, and we might request less than the
# largest from the cache.
buffer_, values, columns = cls._history_cache[field]
if raw:
sid_index = columns.get_loc(self._sid)
return values[-bars:, sid_index]
else:
return buffer_[self._sid][-bars:]
def _get_bars(self, days):
"""
Gets the number of bars needed for the current number of days.
Figures this out based on the algo datafrequency and caches the result.
This caches the result by replacing this function on the object.
This means that after the first call to _get_bars, this method will
point to a new function object.
"""
def daily_get_max_bars(days):
return days
def minute_get_max_bars(days):
# max number of minute. regardless of current days or short
# sessions
return days * 390
def daily_get_bars(days):
return days
@with_environment()
def minute_get_bars(days, env=None):
cls = self.__class__
now = get_algo_instance().datetime
if now != cls._minute_bar_cache_dt:
cls._minute_bar_cache_dt = now
cls._minute_bar_cache = {}
if days not in cls._minute_bar_cache:
# Cache this calculation to happen once per bar, even if we
# use another transform with the same number of days.
prev = env.previous_trading_day(now)
ds = env.days_in_range(
env.add_trading_days(-days + 2, prev),
prev,
)
# compute the number of minutes in the (days - 1) days before
# today.
# 210 minutes in a an early close and 390 in a full day.
ms = sum(210 if d in env.early_closes else 390 for d in ds)
# Add the number of minutes for today.
ms += int(
(now - env.get_open_and_close(now)[0]).total_seconds() / 60
)
cls._minute_bar_cache[days] = ms + 1 # Account for this minute
return cls._minute_bar_cache[days]
if get_algo_instance().sim_params.data_frequency == 'daily':
self._freqstr = '1d'
# update this method to point to the daily variant.
self._get_bars = daily_get_bars
self._get_max_bars = daily_get_max_bars
else:
self._freqstr = '1m'
# update this method to point to the minute variant.
self._get_bars = minute_get_bars
self._get_max_bars = minute_get_max_bars
# Not actually recursive because we have already cached the new method.
return self._get_bars(days)
def mavg(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanmean(prices)
def stddev(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanstd(prices, ddof=1)
def vwap(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
vols = self._get_buffer(max_bars, field='volume', raw=True)[-bars:]
vol_sum = nansum(vols)
try:
ret = nansum(prices * vols) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
def returns(self):
algo = get_algo_instance()
now = algo.datetime
if now != self._returns_cache_dt:
self._returns_cache_dt = now
self._returns_cache = algo.history(2, '1d', 'price', ffill=True)
hst = self._returns_cache[self._sid]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
class BarData(object):
"""
Holds the event data for all sids for a given dt.
This is what is passed as `data` to the `handle_data` function.
Note: Many methods are analogues of dictionary because of historical
usage of what this replaced as a dictionary subclass.
"""
def __init__(self, data=None):
self._data = data or {}
self._contains_override = None
self._factor_matrix = None
self._factor_matrix_expires = pd.Timestamp(0, tz='UTC')
@property
def factors(self):
algo = get_algo_instance()
today = normalize_date(algo.get_datetime())
if today > self._factor_matrix_expires:
self._factor_matrix, self._factor_matrix_expires = \
algo.compute_factor_matrix(today)
return self._factor_matrix.loc[today]
def __contains__(self, name):
if self._contains_override:
if self._contains_override(name):
return name in self._data
else:
return False
else:
return name in self._data
def has_key(self, name):
"""
DEPRECATED: __contains__ is preferred, but this method is for
compatibility with existing algorithms.
"""
return name in self
def __setitem__(self, name, value):
self._data[name] = value
def __getitem__(self, name):
return self._data[name]
def __delitem__(self, name):
del self._data[name]
def __iter__(self):
for sid, data in iteritems(self._data):
# Allow contains override to filter out sids.
if sid in self:
if len(data):
yield sid
def iterkeys(self):
# Allow contains override to filter out sids.
return (sid for sid in iterkeys(self._data) if sid in self)
def keys(self):
# Allow contains override to filter out sids.
return list(self.iterkeys())
def itervalues(self):
return (value for _sid, value in self.iteritems())
def values(self):
return list(self.itervalues())
def iteritems(self):
return ((sid, value) for sid, value
in iteritems(self._data)
if sid in self)
def items(self):
return list(self.iteritems())
def __len__(self):
return len(self.keys())
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self._data)
| apache-2.0 |
tdhopper/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
boland1992/seissuite_iran | bin/combination_stack.py | 2 | 26624 | #!/usr/bin/python -u
"""
This script reads seismic waveform data from a set of stations, and
calculates the cross-correlations between all pairs of stations. The
data (in miniseed format) must be located in folder *MSEED_DIR*. The
stations information (coordinates, instrument response) can be read
from dataless seed files (if *USE_DATALESSPAZ* = True) located in
folder *DATALESS_DIR*, and/or stationXML files (if *USE_STATIONXML* =
True) located in folder *STATIONXML_DIR*. Note that two different
stations MUST HAVE DIFFERENT NAMES, even if they do not belong to
the same network. Also, one given station cannot have several
sets of coordinates: if so, it will be skipped.
In the current version of the program, miniseed files MUST be
organized inside their directory as:
<year>-<month>/<network>.<station>.<channel>.mseed, e.g.:
1988-10/BL.JFOB.BHZ.mseed
So, there is one sub-directory per month, and inside it, one miniseed
file per month and per station.
The implemented algorithm follows the lines of Bensen et al.,
"Processing seismic ambient noise data to obtain reliable broad-band
surface wave dispersion measurements", Geophys. J. Int. (2007).
The procedure consists in stacking daily cross-correlations between
pairs of stations, from *FIRSTDAY* to *LASTDAY* and, in each given day,
rejecting stations whose data fill is < *MINFILL*. Define a subset of
stations to cross-correlate in *CROSSCORR_STATIONS_SUBSET* (or let it
empty to cross-correlate all stations). Define a list of locations to
skip in *CROSSCORR_SKIPLOCS*, if any. The cross-correlations are
calculated between -/+ *CROSSCORR_TMAX* seconds.
Several pre-processing steps are applied to the daily seismic waveform
data, before the daily cross-correlation is calculated and stacked:
(1) removal of the instrument response, the mean and the trend;
(2) band-pass filter between *PERIODMIN* and *PERIODMAX* sec
(3) down-sampling to sampling step = *PERIOD_RESAMPLE* sec
(4) time-normalization:
- if *ONEBIT_NORM* = False, normalization of the signal by its
(smoothed) absolute amplitude in the earthquake period band,
defined as *PERIODMIN_EARTHQUAKE* - *PERIODMIN_EARTHQUAKE* sec.
The smoothing window is *PERIODMAX_EARTHQUAKE* / 2;
- if *ONEBIT_NORM* = False, one-bit normalization, wherein
only the sign of the signal is kept (+1 or -1);
(5) spectral whitening of the Fourier amplitude spectrum: the Fourier
amplitude spectrum of the signal is divided by a smoothed version
of itself. The smoonthing window is *WINDOW_FREQ*.
Note that all the parameters mentioned above are defined in the
configuration file.
When all the cross-correlations are calculated, the script exports
several files in dir *CROSS*
"""
import os
import warnings
import datetime as dt
import itertools as it
import pickle
import obspy.signal.cross_correlation
import time
import glob
import sqlite3 as lite
import shutil
import numpy as np
import matplotlib.pyplot as plt
# set epoch timestamp
epoch = dt.datetime(1970, 1, 1)
total_verbose = True
psd = False
# DECLUSTER STATIONS!
# remove stations that are too close to one another (set by degree radius!)
#from seissuite.spacing.search_station import Coordinates
#import matplotlib.pyplot as plt
#import numpy as np
# turn on multiprocessing to get one merged trace per station?
# to preprocess trace? to stack cross-correlations?
MULTIPROCESSING = {'merge trace': False,
'process trace': False,
'cross-corr': False}
# how many concurrent processes? (set None to let multiprocessing module decide)
NB_PROCESSES = None
if any(MULTIPROCESSING.values()):
import multiprocessing as mp
# create a list of configuration files that will be iterated over!
# must be 1 or more
from seissuite.ant.psconfig import (create_config_list, run_config,
remove_config)
config_list = create_config_list()
total_time0 = dt.datetime.now()
for config_file in config_list:
# global variables MUST be defined
# with the function in the seissuite.ant.psconfig module
run_config(config_file)
from seissuite.ant import (pscrosscorr, psstation, pspreprocess, pserrors,
psstationSQL)
# import CONFIG class initalised in ./configs/tmp_config.pickle
config_pickle = 'configs/tmp_config.pickle'
f = open(name=config_pickle, mode='rb')
CONFIG = pickle.load(f)
f.close()
# import variables from initialised CONFIG class.
MSEED_DIR = CONFIG.MSEED_DIR
DATABASE_DIR = CONFIG.DATABASE_DIR
DATALESS_DIR = CONFIG.DATALESS_DIR
STATIONXML_DIR = CONFIG.STATIONXML_DIR
CROSSCORR_DIR = CONFIG.CROSSCORR_DIR
USE_DATALESSPAZ = CONFIG.USE_DATALESSPAZ
USE_STATIONXML = CONFIG.USE_STATIONXML
CROSSCORR_STATIONS_SUBSET = CONFIG.CROSSCORR_STATIONS_SUBSET
CROSSCORR_SKIPLOCS = CONFIG.CROSSCORR_SKIPLOCS
FIRSTDAY = CONFIG.FIRSTDAY
LASTDAY = CONFIG.LASTDAY
MINFILL = CONFIG.MINFILL
FREQMIN = CONFIG.FREQMIN
FREQMAX = CONFIG.FREQMAX
CORNERS = CONFIG.CORNERS
ZEROPHASE = CONFIG.ZEROPHASE
PERIOD_RESAMPLE = CONFIG.PERIOD_RESAMPLE
ONEBIT_NORM = CONFIG.ONEBIT_NORM
FREQMIN_EARTHQUAKE = CONFIG.FREQMIN_EARTHQUAKE
FREQMAX_EARTHQUAKE = CONFIG.FREQMAX_EARTHQUAKE
WINDOW_TIME = CONFIG.WINDOW_TIME
WINDOW_FREQ = CONFIG.WINDOW_FREQ
XCORR_INTERVAL = CONFIG.XCORR_INTERVAL
CROSSCORR_TMAX = CONFIG.CROSSCORR_TMAX
PLOT_CLASSIC = CONFIG.PLOT_CLASSIC
PLOT_DISTANCE = CONFIG.PLOT_DISTANCE
MAX_DISTANCE = CONFIG.MAX_DISTANCE
RESP_REMOVE = CONFIG.RESP_REMOVE
FULL_COMB = CONFIG.FULL_COMB
# initialise the required databases if they haven't already been.
#if no two SQL databases exist, then create them!
TIMELINE_DB = os.path.join(DATABASE_DIR, 'timeline.db')
RESP_DB = os.path.join(DATABASE_DIR, 'response.db')
# RESP_DB = os.path.join(DATABASE_DIR, 'response.db')
# if not os.path.exists(RESP_DB):
# initialise response database for use with automated data selection!
# lite.connect(RESP_DB)
# from seissuite.database import response_database
print TIMELINE_DB
if not os.path.exists(RESP_DB):
lite.connect(RESP_DB)
print "\nCreating response database. Please be patient ... "
from seissuite.database import response_database
if not os.path.exists(TIMELINE_DB):
# initialise timeline database to help the application find files!
lite.connect(TIMELINE_DB)
print "\nCreating timeline database. Please be patient ... "
from seissuite.database import create_database
if psd:
import powerdensity
print "\nProcessing parameters:"
print "- dir of miniseed data: " + MSEED_DIR
print "- dir of dataless seed data: " + DATALESS_DIR
print "- dir of stationXML data: " + STATIONXML_DIR
print "- output dir: " + CROSSCORR_DIR
print "- cross-correlation length (mins): " + str(XCORR_INTERVAL)
print "- cross-correlation maximum time interval (s): " + str(CROSSCORR_TMAX)
print "- band-pass: {:.1f}-{:.1f} s".format(1.0 / FREQMAX, 1.0 / FREQMIN)
if ONEBIT_NORM:
print "- normalization in time-domain: one-bit normalization"
else:
s = ("- normalisation in time-domain: "
"running normalisation in earthquake band ({:.1f}-{:.1f} s)")
print s.format(1.0 / FREQMAX_EARTHQUAKE, 1.0 / FREQMIN_EARTHQUAKE)
fmt = '%d/%m/%Y'
s = "- cross-correlation will be stacked between {}-{}"
print s.format(FIRSTDAY.strftime(fmt), LASTDAY.strftime(fmt))
subset = CROSSCORR_STATIONS_SUBSET
if subset:
print " for stations: {}".format(', '.join(subset))
print
# Initializing collection of cross-correlations
xc = pscrosscorr.CrossCorrelationCollection()
#create a metadata list, may need dictionary based on how much info required
metadata = []
#ask if system has crashed or stopped before another process was finished?
print "\nScanning for partial pickle cross-correlation files ... "
#maybe create pause statement for interesting load menu.
# loading cross-correlations (looking for *.part.pickle files in folders in
#in dir *CROSSCORR_DIR*)
folder_list = sorted(glob.glob(os.path.join(CROSSCORR_DIR, '*')))
pickle_list = []
index = 0 #creating index for call
for folder in folder_list:
#check to see if there are any pickle files in the xcorr time folder
if len(glob.glob(os.path.join(folder, '*.part.pickle'))) < 1:
#print("There are no .pickle files in this folder. Skipping ...")
continue
else:
#append name of pickle file path location string to pickle_list
pickle_list.append(glob.glob(os.path.join(folder, \
'*.part.pickle'))[0])
if len(pickle_list) < 1:
print("\nThere are no partial pickle files to begin again from.")
print("\nThe program will start from the beginning")
res = ""
else:
print "\nPlease choose a file to begin again from, or a combination thereof."
print "Else hit enter to continue anew"
#print combinations of partial pickle files available
print '\n0 - All except backups (*~)'
print '\n'.join('{} - {}'.format(i + 1, f.split('/')[-2])
for i, f in enumerate(pickle_list))
#change folder_list to pickle_list if this gives problems
#res = False#raw_input('\n')
res = raw_input('\n')
#IF LIST INDEX OUT OF RANGE START PROGRAM ALSO
#if beginning again, reset time-series intervals to the where the selected
# .part.pickle file left off!
if not res:
# ========================================
#set output file name as normal
# ========================================
time_string = str(time.strftime("%d.%m.%Y") + "-" + time.strftime("%X"))
responsefrom = []
if USE_DATALESSPAZ:
responsefrom.append('datalesspaz')
if USE_STATIONXML:
responsefrom.append('xmlresponse')
OUTBASENAME_PARTS = [
'XCORR-STACK',
'-'.join(s for s in CROSSCORR_STATIONS_SUBSET) \
if CROSSCORR_STATIONS_SUBSET else None,
'{}-{}'.format(FIRSTDAY.strftime("%d.%m.%Y"),
LASTDAY.strftime("%d.%m.%Y")),
'1bitnorm' if ONEBIT_NORM else None,
'+'.join(responsefrom)
]
OUTFILESNAME = '_'.join(p for p in OUTBASENAME_PARTS if p)
OUTFILESPATH = os.path.join(CROSSCORR_DIR, time_string, OUTFILESNAME)
OUTFOLDERS = os.path.join(CROSSCORR_DIR,
time_string,
'XCORR_PLOTS')
OUT_SNR = os.path.join(CROSSCORR_DIR, time_string, 'SNR_PLOTS')
#create unique folder in CROSS output folder named by the present time.
if not os.path.exists(OUTFOLDERS):\
os.makedirs(OUTFOLDERS)
if not os.path.exists(OUT_SNR):\
os.makedirs(OUT_SNR)
# copy configuration file to output so parameters are known for each run
OUTCONFIG = os.path.join(CROSSCORR_DIR, time_string,
os.path.basename(config_file))
print 'Copying configuration file to output directory ... '
shutil.copy(config_file, OUTCONFIG)
METADATA_PATH = '{}metadata.pickle'.format(OUTFILESPATH.\
replace(os.path.basename(OUTFILESPATH), ""))
else:
# ========================================
#reset time as previous time, reset output paths as previous path name
#reset cross-correlation dictionaries
# ========================================
print
PART_PICKLE = pickle_list[int(res)-1]
OUTFILESPATH = PART_PICKLE[:-12]
out_basename = os.path.basename(OUTFILESPATH)
print "Opening {} partial file for restart ... ".format(out_basename)
# re-initialising .part.pickle collection of cross-correlations
xc = pscrosscorr.load_pickled_xcorr(PART_PICKLE)
for key in xc.keys():
for key2 in xc[key].keys():
#help(xc[key][key2])
#print xc[key][key2].endday
a=5
#most recent day last endday of list
#read in metadata to find latest time slot. Then assign this to FIRSTDAY
METADATA_PATH = '{}metadata.pickle'.format(OUTFILESPATH.\
replace(os.path.basename(OUTFILESPATH), ""))
metadata = pscrosscorr.load_pickled_xcorr(METADATA_PATH)
#print "metadata: ", metadata[-5:]
#re-assign FIRSTDAY variable to where the data was cut off
#del metadata[-1]
FIRSTDAY = metadata[len(metadata) - 1] #+ \
#dt.timedelta(minutes=XCORR_INTERVAL)
# FIND RESTART DATE FROM PARTIAL PICKLE FILE, NOT THE METADATA PICKLE
# ============
# Main program
# ============
# Reading inventories in dataless seed and/or StationXML files
dataless_inventories = []
if USE_DATALESSPAZ:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
dataless_inventories = psstationSQL.get_dataless_inventories(DATALESS_DIR,
verbose=False)
xml_inventories = []
if USE_STATIONXML:
xml_inventories = psstationSQL.get_stationxml_inventories(STATIONXML_DIR,
verbose=False)
# Getting list of stations
#stations, subdir_len = psstation.get_stations(mseed_dir=MSEED_DIR,
# xml_inventories=xml_inventories,
# dataless_inventories=dataless_inventories,
# startday=FIRSTDAY,
# endday=LASTDAY,
# verbose=False)
#connect SQL database
SQL_db = os.path.join(DATABASE_DIR, 'timeline.db')
stations, subdir_len = psstationSQL.get_stationsSQL(SQL_db,
xml_inventories=xml_inventories,
dataless_inventories=dataless_inventories,
startday=FIRSTDAY,
endday=LASTDAY,
verbose=False)
stat_coords = np.asarray([station.coord for station in stations])
DECLUSTER = False
#if DECLUSTER:
# stat_coords = np.asarray([station.coord for station in stations])
# COORDS = Coordinates(input_list=stat_coords)
# declustered_coords = COORDS.decluster(degree_dist=0.1)
# stations = [station for station in stations if
# station.coord in declustered_coords]
# Loop on time interval
#number of time steps
N = int(((LASTDAY - FIRSTDAY).days + 1)*60*24 / XCORR_INTERVAL)
dates = [FIRSTDAY + dt.timedelta(minutes=i) for i in \
[j*XCORR_INTERVAL for j in range(N)]]
#begin = raw_input("\nPress enter to begin the program ")
# initialise preprocess class: METHOD - Bensen et al. (2007)
Preprocess = pspreprocess.Preprocess(FREQMIN, FREQMAX,
FREQMIN_EARTHQUAKE,
FREQMAX_EARTHQUAKE,
CORNERS, ZEROPHASE,
PERIOD_RESAMPLE,
WINDOW_TIME,
WINDOW_FREQ,
ONEBIT_NORM)
#loop on time-series. Date now represents XCORR_INTERVAL long time intervals
counter = 0
# loop_time0 = dt.datetime.now()
# print "\nProcessing data for date {} with a {} minute cross-correlation\
# time-interval between times: {} and {}".format(date.date(), \
# int(XCORR_INTERVAL) , date.time(), \
# (date + dt.timedelta(minutes=XCORR_INTERVAL)).time())
iterate_stations = sorted(sta for sta in stations)
# =====================================================================
# check iterate stations have a file in the SQL database
# =====================================================================
# connect the database
# conn = lite.connect(SQL_db)
# create cursor object
# c = conn.cursor()
# convert to UTC timestamp to search in SQL database
# search_start = (date - dt.timedelta(minutes=1) - epoch).total_seconds()
# search_end = (date + dt.timedelta(minutes=XCORR_INTERVAL+1) - epoch).total_seconds()
# check if files have data within the time frame search_end-search_start
# populated_stations = c.execute('SELECT station FROM file_extrema WHERE \
#starttime <= ? AND endtime >= ?', (search_start, search_end))
# populated_stations = list(it.chain(*list(populated_stations.fetchall())))
# filter stations with no data for the given time period of this loop!
# for stat in iterate_stations:
# stat_code = unicode('{}.{}.{}'.format(stat.network,
# stat.name,
# stat.channel))
# if stat_code not in populated_stations:
# iterate_stations.remove(stat)
# close timeline.db database
# conn.close()
iterate_stations = iterate_stations[1:]
stat_pairs = list(it.combinations(iterate_stations, 2))
for stat_pair in stat_pairs:
for station in stat_pair:
# =================
# processing traces
# =================
# =============================================================
# preparing functions that get one merged trace per station
# and pre-process trace, ready to be parallelized (if required)
# =============================================================
def get_merged_trace(date):
"""
Preparing func that returns one trace from selected station,
at current date. Function is ready to be parallelized.
"""
try:
trace = Preprocess.get_merged_trace(station=station,
date=date,
xcorr_interval=XCORR_INTERVAL,
skiplocs=CROSSCORR_SKIPLOCS,
minfill=MINFILL)
#plt.figure()
#plt.plot(trace.data)
#plt.show()
#plt.clf()
if total_verbose:
msg = 'merged'
print '{}.{} [{}] '.format(trace.stats.network,
trace.stats.station,
msg),
errmsg = None
except pserrors.CannotPreprocess as err:
# cannot preprocess if no trace or daily fill < *minfill*
trace = None
errmsg = '{}: skipping'.format(err)
except Exception as err:
# unhandled exception!
trace = None
errmsg = 'Unhandled error: {}'.format(err)
if errmsg:
# printing error message
if total_verbose:
print '{}.{} [{}] '.format(station.network,
station.name, errmsg),
return trace
def preprocessed_trace((trace, response)):
"""
Preparing func that returns processed trace: processing includes
removal of instrumental response, band-pass filtering, demeaning,
detrending, downsampling, time-normalization and spectral whitening
(see pscrosscorr.preprocess_trace()'s doc)
Function is ready to be parallelized.
"""
if not trace or response is False:
return
try:
Preprocess.preprocess_trace(trace=trace, paz=response, verbose=True)
msg = 'ok'
if total_verbose:
print '{}.{} [{}] '.format(trace.stats.network,
trace.stats.station,
msg),
except pserrors.CannotPreprocess as err:
# cannot preprocess if no instrument response was found,
# trace data are not consistent etc. (see function's doc)
trace = None
print(err)
print 'skipping'
except Exception as err:
# unhandled exception!
trace = None
print(err)
print 'skipping'
# printing output (error or ok) message
# although processing is performed in-place, trace is returned
# in order to get it back after multi-processing
return trace
# ====================================
# getting one merged trace per station
# ====================================
merge_t0 = dt.datetime.now()
print '\nMerging traces ... '
if MULTIPROCESSING['merge trace']:
# multiprocessing turned on: one process per station
pool = mp.Pool(None)
traces = pool.map(get_merged_trace, dates)
pool.close()
pool.join()
else:
# multiprocessing turned off: processing stations one after another
traces = [get_merged_trace(date) for date in dates]
# =====================================================
# getting or attaching instrumental response
# (parallelization is difficult because of inventories)
# =====================================================
print "traces: ", traces
responses = []
for tr in traces:
if not tr:
responses.append(None)
continue
# responses elements can be (1) dict of PAZ if response found in
# dataless inventory, (2) None if response found in StationXML
# inventory (directly attached to trace) or (3) False if no
# response found
if RESP_REMOVE:
try:
response = Preprocess.get_or_attach_response(
trace=tr,
dataless_inventories=dataless_inventories,
xml_inventories=xml_inventories)
errmsg = None
except pserrors.CannotPreprocess as err:
# response not found
response = False
errmsg = '{}: skipping'.format(err)
except Exception as err:
# unhandled exception!
response = False
errmsg = 'Unhandled error: {}'.format(err)
responses.append(response)
if errmsg:
# printing error message
if total_verbose:
print '{}.{} [{}] '.format(tr.stats.network,
tr.stats.station,
errmsg),
else:
responses.append(None)
print '\nTraces merged and responses removed in {:.1f} seconds'\
.format((dt.datetime.now() - merge_t0).total_seconds())
# =================
# processing traces
# =================
print '\nPre-processing traces ... '
t0 = dt.datetime.now()
# must have more than one trace for cross-correlations!
traces = np.array(traces)
traces_check = traces[traces != np.array(None)]
if len(traces_check) > 1:
if MULTIPROCESSING['process trace']:
# multiprocessing turned on: one process per station
pool = mp.Pool(NB_PROCESSES)
traces = pool.map(preprocessed_trace, zip(traces, responses))
pool.close()
pool.join()
else:
# multiprocessing turned off: processing stations one after another
try:
traces = [preprocessed_trace((tr, resp)) for tr,
resp in zip(traces, responses)]
except:
continue
# setting up dict of current date's traces, {station: trace}
tracedict = {s.name: trace for s, trace in zip(iterate_stations,
traces) if trace}
pairs = list(it.combinations(sorted(tracedict.items()), 2))
print pairs
| gpl-3.0 |
sserrot/champion_relationships | venv/Lib/site-packages/IPython/testing/iptest.py | 1 | 16957 | # -*- coding: utf-8 -*-
"""IPython Test Suite Runner.
This module provides a main entry point to a user script to test IPython
itself from the command line. There are two ways of running this script:
1. With the syntax `iptest all`. This runs our entire test suite by
calling this script (with different arguments) recursively. This
causes modules and package to be tested in different processes, using nose
or trial where appropriate.
2. With the regular nose syntax, like `iptest IPython -- -vvs`. In this form
the script simply calls nose, but with special command line flags and
plugins loaded. Options after `--` are passed to nose.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import glob
from io import BytesIO
import os
import os.path as path
import sys
from threading import Thread, Lock, Event
import warnings
import nose.plugins.builtin
from nose.plugins.xunit import Xunit
from nose import SkipTest
from nose.core import TestProgram
from nose.plugins import Plugin
from nose.util import safe_str
from IPython import version_info
from IPython.utils.py3compat import decode
from IPython.utils.importstring import import_item
from IPython.testing.plugin.ipdoctest import IPythonDoctest
from IPython.external.decorators import KnownFailure, knownfailureif
pjoin = path.join
# Enable printing all warnings raise by IPython's modules
warnings.filterwarnings('ignore', message='.*Matplotlib is building the font cache.*', category=UserWarning, module='.*')
warnings.filterwarnings('error', message='.*', category=ResourceWarning, module='.*')
warnings.filterwarnings('error', message=".*{'config': True}.*", category=DeprecationWarning, module='IPy.*')
warnings.filterwarnings('default', message='.*', category=Warning, module='IPy.*')
warnings.filterwarnings('error', message='.*apply_wrapper.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*make_label_dec', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*decorated_dummy.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*skip_file_no_x11.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*onlyif_any_cmd_exists.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*disable_gui.*', category=DeprecationWarning, module='.*')
warnings.filterwarnings('error', message='.*ExceptionColors global is deprecated.*', category=DeprecationWarning, module='.*')
# Jedi older versions
warnings.filterwarnings(
'error', message='.*elementwise != comparison failed and.*', category=FutureWarning, module='.*')
if version_info < (6,):
# nose.tools renames all things from `camelCase` to `snake_case` which raise an
# warning with the runner they also import from standard import library. (as of Dec 2015)
# Ignore, let's revisit that in a couple of years for IPython 6.
warnings.filterwarnings(
'ignore', message='.*Please use assertEqual instead', category=Warning, module='IPython.*')
if version_info < (8,):
warnings.filterwarnings('ignore', message='.*Completer.complete.*',
category=PendingDeprecationWarning, module='.*')
else:
warnings.warn(
'Completer.complete was pending deprecation and should be changed to Deprecated', FutureWarning)
# ------------------------------------------------------------------------------
# Monkeypatch Xunit to count known failures as skipped.
# ------------------------------------------------------------------------------
def monkeypatch_xunit():
try:
dec.knownfailureif(True)(lambda: None)()
except Exception as e:
KnownFailureTest = type(e)
def addError(self, test, err, capt=None):
if issubclass(err[0], KnownFailureTest):
err = (SkipTest,) + err[1:]
return self.orig_addError(test, err, capt)
Xunit.orig_addError = Xunit.addError
Xunit.addError = addError
#-----------------------------------------------------------------------------
# Check which dependencies are installed and greater than minimum version.
#-----------------------------------------------------------------------------
def extract_version(mod):
return mod.__version__
def test_for(item, min_version=None, callback=extract_version):
"""Test to see if item is importable, and optionally check against a minimum
version.
If min_version is given, the default behavior is to check against the
`__version__` attribute of the item, but specifying `callback` allows you to
extract the value you are interested in. e.g::
In [1]: import sys
In [2]: from IPython.testing.iptest import test_for
In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info)
Out[3]: True
"""
try:
check = import_item(item)
except (ImportError, RuntimeError):
# GTK reports Runtime error if it can't be initialized even if it's
# importable.
return False
else:
if min_version:
if callback:
# extra processing step to get version to compare
check = callback(check)
return check >= min_version
else:
return True
# Global dict where we can store information on what we have and what we don't
# have available at test run time
have = {'matplotlib': test_for('matplotlib'),
'pygments': test_for('pygments'),
'sqlite3': test_for('sqlite3')}
#-----------------------------------------------------------------------------
# Test suite definitions
#-----------------------------------------------------------------------------
test_group_names = ['core',
'extensions', 'lib', 'terminal', 'testing', 'utils',
]
class TestSection(object):
def __init__(self, name, includes):
self.name = name
self.includes = includes
self.excludes = []
self.dependencies = []
self.enabled = True
def exclude(self, module):
if not module.startswith('IPython'):
module = self.includes[0] + "." + module
self.excludes.append(module.replace('.', os.sep))
def requires(self, *packages):
self.dependencies.extend(packages)
@property
def will_run(self):
return self.enabled and all(have[p] for p in self.dependencies)
# Name -> (include, exclude, dependencies_met)
test_sections = {n:TestSection(n, ['IPython.%s' % n]) for n in test_group_names}
# Exclusions and dependencies
# ---------------------------
# core:
sec = test_sections['core']
if not have['sqlite3']:
sec.exclude('tests.test_history')
sec.exclude('history')
if not have['matplotlib']:
sec.exclude('pylabtools'),
sec.exclude('tests.test_pylabtools')
# lib:
sec = test_sections['lib']
sec.exclude('kernel')
if not have['pygments']:
sec.exclude('tests.test_lexers')
# We do this unconditionally, so that the test suite doesn't import
# gtk, changing the default encoding and masking some unicode bugs.
sec.exclude('inputhookgtk')
# We also do this unconditionally, because wx can interfere with Unix signals.
# There are currently no tests for it anyway.
sec.exclude('inputhookwx')
# Testing inputhook will need a lot of thought, to figure out
# how to have tests that don't lock up with the gui event
# loops in the picture
sec.exclude('inputhook')
# testing:
sec = test_sections['testing']
# These have to be skipped on win32 because they use echo, rm, cd, etc.
# See ticket https://github.com/ipython/ipython/issues/87
if sys.platform == 'win32':
sec.exclude('plugin.test_exampleip')
sec.exclude('plugin.dtexample')
# don't run jupyter_console tests found via shim
test_sections['terminal'].exclude('console')
# extensions:
sec = test_sections['extensions']
# This is deprecated in favour of rpy2
sec.exclude('rmagic')
# autoreload does some strange stuff, so move it to its own test section
sec.exclude('autoreload')
sec.exclude('tests.test_autoreload')
test_sections['autoreload'] = TestSection('autoreload',
['IPython.extensions.autoreload', 'IPython.extensions.tests.test_autoreload'])
test_group_names.append('autoreload')
#-----------------------------------------------------------------------------
# Functions and classes
#-----------------------------------------------------------------------------
def check_exclusions_exist():
from IPython.paths import get_ipython_package_dir
from warnings import warn
parent = os.path.dirname(get_ipython_package_dir())
for sec in test_sections:
for pattern in sec.exclusions:
fullpath = pjoin(parent, pattern)
if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'):
warn("Excluding nonexistent file: %r" % pattern)
class ExclusionPlugin(Plugin):
"""A nose plugin to effect our exclusions of files and directories.
"""
name = 'exclusions'
score = 3000 # Should come before any other plugins
def __init__(self, exclude_patterns=None):
"""
Parameters
----------
exclude_patterns : sequence of strings, optional
Filenames containing these patterns (as raw strings, not as regular
expressions) are excluded from the tests.
"""
self.exclude_patterns = exclude_patterns or []
super(ExclusionPlugin, self).__init__()
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
def configure(self, options, config):
Plugin.configure(self, options, config)
# Override nose trying to disable plugin.
self.enabled = True
def wantFile(self, filename):
"""Return whether the given filename should be scanned for tests.
"""
if any(pat in filename for pat in self.exclude_patterns):
return False
return None
def wantDirectory(self, directory):
"""Return whether the given directory should be scanned for tests.
"""
if any(pat in directory for pat in self.exclude_patterns):
return False
return None
class StreamCapturer(Thread):
daemon = True # Don't hang if main thread crashes
started = False
def __init__(self, echo=False):
super(StreamCapturer, self).__init__()
self.echo = echo
self.streams = []
self.buffer = BytesIO()
self.readfd, self.writefd = os.pipe()
self.buffer_lock = Lock()
self.stop = Event()
def run(self):
self.started = True
while not self.stop.is_set():
chunk = os.read(self.readfd, 1024)
with self.buffer_lock:
self.buffer.write(chunk)
if self.echo:
sys.stdout.write(decode(chunk))
os.close(self.readfd)
os.close(self.writefd)
def reset_buffer(self):
with self.buffer_lock:
self.buffer.truncate(0)
self.buffer.seek(0)
def get_buffer(self):
with self.buffer_lock:
return self.buffer.getvalue()
def ensure_started(self):
if not self.started:
self.start()
def halt(self):
"""Safely stop the thread."""
if not self.started:
return
self.stop.set()
os.write(self.writefd, b'\0') # Ensure we're not locked in a read()
self.join()
class SubprocessStreamCapturePlugin(Plugin):
name='subprocstreams'
def __init__(self):
Plugin.__init__(self)
self.stream_capturer = StreamCapturer()
self.destination = os.environ.get('IPTEST_SUBPROC_STREAMS', 'capture')
# This is ugly, but distant parts of the test machinery need to be able
# to redirect streams, so we make the object globally accessible.
nose.iptest_stdstreams_fileno = self.get_write_fileno
def get_write_fileno(self):
if self.destination == 'capture':
self.stream_capturer.ensure_started()
return self.stream_capturer.writefd
elif self.destination == 'discard':
return os.open(os.devnull, os.O_WRONLY)
else:
return sys.__stdout__.fileno()
def configure(self, options, config):
Plugin.configure(self, options, config)
# Override nose trying to disable plugin.
if self.destination == 'capture':
self.enabled = True
def startTest(self, test):
# Reset log capture
self.stream_capturer.reset_buffer()
def formatFailure(self, test, err):
# Show output
ec, ev, tb = err
captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace')
if captured.strip():
ev = safe_str(ev)
out = [ev, '>> begin captured subprocess output <<',
captured,
'>> end captured subprocess output <<']
return ec, '\n'.join(out), tb
return err
formatError = formatFailure
def finalize(self, result):
self.stream_capturer.halt()
def run_iptest():
"""Run the IPython test suite using nose.
This function is called when this script is **not** called with the form
`iptest all`. It simply calls nose with appropriate command line flags
and accepts all of the standard nose arguments.
"""
# Apply our monkeypatch to Xunit
if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'):
monkeypatch_xunit()
arg1 = sys.argv[1]
if arg1.startswith('IPython/'):
if arg1.endswith('.py'):
arg1 = arg1[:-3]
sys.argv[1] = arg1.replace('/', '.')
arg1 = sys.argv[1]
if arg1 in test_sections:
section = test_sections[arg1]
sys.argv[1:2] = section.includes
elif arg1.startswith('IPython.') and arg1[8:] in test_sections:
section = test_sections[arg1[8:]]
sys.argv[1:2] = section.includes
else:
section = TestSection(arg1, includes=[arg1])
argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks
# We add --exe because of setuptools' imbecility (it
# blindly does chmod +x on ALL files). Nose does the
# right thing and it tries to avoid executables,
# setuptools unfortunately forces our hand here. This
# has been discussed on the distutils list and the
# setuptools devs refuse to fix this problem!
'--exe',
]
if '-a' not in argv and '-A' not in argv:
argv = argv + ['-a', '!crash']
if nose.__version__ >= '0.11':
# I don't fully understand why we need this one, but depending on what
# directory the test suite is run from, if we don't give it, 0 tests
# get run. Specifically, if the test suite is run from the source dir
# with an argument (like 'iptest.py IPython.core', 0 tests are run,
# even if the same call done in this directory works fine). It appears
# that if the requested package is in the current dir, nose bails early
# by default. Since it's otherwise harmless, leave it in by default
# for nose >= 0.11, though unfortunately nose 0.10 doesn't support it.
argv.append('--traverse-namespace')
plugins = [ ExclusionPlugin(section.excludes), KnownFailure(),
SubprocessStreamCapturePlugin() ]
# we still have some vestigial doctests in core
if (section.name.startswith(('core', 'IPython.core', 'IPython.utils'))):
plugins.append(IPythonDoctest())
argv.extend([
'--with-ipdoctest',
'--ipdoctest-tests',
'--ipdoctest-extension=txt',
])
# Use working directory set by parent process (see iptestcontroller)
if 'IPTEST_WORKING_DIR' in os.environ:
os.chdir(os.environ['IPTEST_WORKING_DIR'])
# We need a global ipython running in this process, but the special
# in-process group spawns its own IPython kernels, so for *that* group we
# must avoid also opening the global one (otherwise there's a conflict of
# singletons). Ultimately the solution to this problem is to refactor our
# assumptions about what needs to be a singleton and what doesn't (app
# objects should, individual shells shouldn't). But for now, this
# workaround allows the test suite for the inprocess module to complete.
if 'kernel.inprocess' not in section.name:
from IPython.testing import globalipapp
globalipapp.start_ipython()
# Now nose can run
TestProgram(argv=argv, addplugins=plugins)
if __name__ == '__main__':
run_iptest()
| mit |
jjcc/trading-with-python | lib/bats.py | 78 | 3458 | #-------------------------------------------------------------------------------
# Name: BATS
# Purpose: get data from BATS exchange
#
# Author: jev
#
# Created: 17/08/2013
# Copyright: (c) Jev Kuznetsov 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import urllib
import re
import pandas as pd
import datetime as dt
import zipfile
import StringIO
from extra import ProgressBar
import os
import yahooFinance as yf
from string import Template
import numpy as np
def fileName2date( fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
m = re.findall('\d+',name)[0]
return dt.datetime.strptime(m,'%Y%m%d').date()
def date2fileName(date):
return 'BATSshvol%s.txt.zip' % date.strftime('%Y%m%d')
def downloadUrl(date):
s = Template('http://www.batstrading.com/market_data/shortsales/$year/$month/$fName-dl?mkt=bzx')
url = s.substitute(fName=date2fileName(date), year=date.year, month='%02d' % date.month)
return url
class BATS_Data(object):
def __init__(self, dataDir):
''' create class. dataDir: directory to which files are downloaded '''
self.dataDir = dataDir
self.shortRatio = None
self._checkDates()
def _checkDates(self):
''' update list of available dataset dates'''
self.dates = []
for fName in os.listdir(self.dataDir):
self.dates.append(fileName2date(fName))
def _missingDates(self):
''' check for missing dates based on spy data'''
print 'Getting yahoo data to determine business dates... ',
spy = yf.getHistoricData('SPY',sDate = (2010,1,1))
busDates = [d.date() for d in spy.index ]
print 'Date range: ', busDates[0] ,'-', busDates[-1]
missing = []
for d in busDates:
if d not in self.dates:
missing.append(d)
return missing
def updateDb(self):
print 'Updating database'
missing = self._missingDates()
for i, date in enumerate(missing):
source = downloadUrl(date)
dest = os.path.join(self.dataDir,date2fileName(date))
if not os.path.exists(dest):
print 'Downloading [%i/%i]' %(i,len(missing)), source
urllib.urlretrieve(source, dest)
else:
print 'x',
print 'Update done.'
self._checkDates()
def loadDate(self,date):
fName = os.path.join(self.dataDir, date2fileName(date))
zipped = zipfile.ZipFile(fName) # open zip file
lines = zipped.read(zipped.namelist()[0]) # read first file from to lines
buf = StringIO.StringIO(lines) # create buffer
df = pd.read_csv(buf,sep='|',index_col=1,parse_dates=False,dtype={'Date':object,'Short Volume':np.float32,'Total Volume':np.float32})
s = df['Short Volume']/df['Total Volume']
s.name = dt.datetime.strptime(df['Date'][-1],'%Y%m%d')
return s
def loadData(self):
''' load data from zip files '''
data = []
pb = ProgressBar(len(self.dates)-1)
for idx, date in enumerate(self.dates):
data.append(self.loadDate(date))
pb.animate(idx)
self.shortRatio = pd.DataFrame(data)
return self.shortRatio
| bsd-3-clause |
virneo/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtkcairo.py | 69 | 2207 | """
GTK+ Matplotlib interface using cairo (not GDK) drawing operations.
Author: Steve Chaplin
"""
import gtk
if gtk.pygtk_version < (2,7,0):
import cairo.gtk
from matplotlib.backends import backend_cairo
from matplotlib.backends.backend_gtk import *
backend_version = 'PyGTK(%d.%d.%d) ' % gtk.pygtk_version + \
'Pycairo(%s)' % backend_cairo.backend_version
_debug = False
#_debug = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if _debug: print 'backend_gtkcairo.%s()' % fn_name()
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTKCairo(thisFig)
return FigureManagerGTK(canvas, num)
class RendererGTKCairo (backend_cairo.RendererCairo):
if gtk.pygtk_version >= (2,7,0):
def set_pixmap (self, pixmap):
self.ctx = pixmap.cairo_create()
self.ctx.save() # restore, save - when call new_gc()
else:
def set_pixmap (self, pixmap):
self.ctx = cairo.gtk.gdk_cairo_create (pixmap)
self.ctx.save() # restore, save - when call new_gc()
class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(backend_cairo.FigureCanvasCairo.filetypes)
def _renderer_init(self):
"""Override to use cairo (rather than GDK) renderer"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self._renderer = RendererGTKCairo (self.figure.dpi)
class FigureManagerGTKCairo(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKCairo (canvas, self.window)
else:
toolbar = None
return toolbar
class NavigationToolbar2Cairo(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKCairo(fig)
| agpl-3.0 |
Alex-Ian-Hamilton/solarbextrapolation | solarbextrapolation/example_data_generator.py | 3 | 7504 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 15:38:51 2015
Function for creating dummy boundary map datas for use with extrapolator
routines.
@author: alex_
"""
import numpy as np
import math
import matplotlib.pyplot as plt
import random
import sunpy.map as mp
import re
from astropy import units as u
from datetime import datetime
# Function to generate the grid with Gaussian points.
# Arguments are:
# - in_arr_area: 2 tuple for the x and y dimensions.
# - *argv: manual parameters for all the spots. Optional: defaults to 2 random spots.
def generate_example_data(shape, xrange, yrange, *argv):
"""
A function to generate a 2D numpy.array of example data for testing
extrapolation code.
The result is a mid-value region with a number of gausian spots with
positive/negative values.
The gausians can be specifially defined, or randomly generated.
Parameters
----------
shape : list
A list of the axis grid sizes, (nx,ny).
xrange : astropy.units.Quantity
The xrange for the returned dataset.
yrange : astropy.units.Quantity
The yrange for the returned dataset.
*argv : int or list, optional
Either given the integer number of the number of poles to randomly
generate, which defaults to 2.
Otherwise, the user can put in lists of parameters that define a pole.
Each list contains:
position : astropy.units.Quantity
both x and y coordinates as physical or percentage units
sigma : astropy.units.Quantity
spot size as physical or percentage units
max : astropy.units.Quantity
the maximum spot intensity
"""
# If the list is empty then create random data.
arr_args = []
if not argv: # If no particle parameters or numbers were given.
arr_args = [2] # [ random.randrange(1, 6) ]
else:
arr_args = list(argv)
arr_poles = []
# If we are only given the number, then generate randomly.
if isinstance( arr_args[0], ( int, long ) ):
for pole in range(0, arr_args[0]):
# random parameters in percentage
sigma = random.uniform(2, 15) * u.percent
x_pos = random.uniform(2.0 * sigma.value, 100.0 - 2.0 * sigma.value)
y_pos = random.uniform(2.0 * sigma.value, 100.0 - 2.0 * sigma.value)
An_max = random.uniform(0.1, 0.2) * ((float(pole % 2) * 2.0) - 1) * u.T # Alternate pos/neg
arrPole = [ u.Quantity([x_pos, y_pos] * u.percent), sigma, An_max ]
arr_poles.append(arrPole)
else:
# We are given the hard-coded parameters, so use them.
arr_poles = arr_args
# Build the empty data array
arr_data = np.zeros((shape[1], shape[0]))
# Grid pixel shape
qua_pixel = u.Quantity([ ( xrange[1] - xrange[0] ) / shape[0], ( yrange[1] - yrange[0] ) / shape[1] ])
# Convert percentage positions/sigmas to physical units (units from ranges)
for pole in range(0, len(arr_poles)):
if arr_poles[pole][0].unit is u.percent:
position = u.Quantity([ (arr_poles[pole][0][0].value / 100.0) * (xrange[1] - xrange[0]) + xrange[0],
(arr_poles[pole][0][1].value / 100.0) * (yrange[1] - yrange[0]) + yrange[0] ])
arr_poles[pole] = [ position, arr_poles[pole][1], arr_poles[pole][2] ]
if arr_poles[pole][1].unit is u.percent:
sigma = (arr_poles[pole][1].value / 100.0) * (xrange[1] - xrange[0])
arr_poles[pole] = [ arr_poles[pole][0], sigma, arr_poles[pole][2] ]
# Iterate through the 2D array/matrix.
for i in range(0,shape[0]): # Row/Y
for j in range(0,shape[1]): # Column/X
# The current position
floXPrime = i * qua_pixel[0]
floYPrime = j * qua_pixel[1]
# A variable to store the sum of the magnetic fields for this point.
flo_value = 0.0
# Add all the contributions.
for tupPole in arr_poles:
# A0 (positive) and A1 (negative) parameters
An_max = tupPole[2].value
An_x = tupPole[0][0]
An_y = tupPole[0][1]
An_Dx = floXPrime - An_x + xrange[0]
An_Dy = floYPrime - An_y + yrange[0]
An_DxSqu = np.power(An_Dx.value, 2.0)
An_DySqu = np.power(An_Dy.value, 2.0)
An_Sigma = tupPole[1].value
# So this contibution is calculated and added.
flo_An_cont = An_max * math.exp( - ( (An_DxSqu + An_DySqu) / (2 * np.power(An_Sigma, 2.0)) ))
flo_value += flo_An_cont
# Now add this to the data array.
arr_data[j][i] = flo_value
# Now return the 2D numpy array.
return arr_data
# A function that creates a dummy header and saves the input as a fits file.
def dummyDataToMap(data, xrange, yrange, **kwargs):
"""
Basic function for taking generated data and returning a valid sunpy.map.
"""
# The kwargs
dic_user_def_meta = kwargs.get('meta', {})
# Create a header dictionary.
dicHeader = {
't_obs': datetime.now().isoformat(),
'bunit': 'Tesla', #'Gauss',
'bitpix': 64, #re.search('\\d+', 'float64')[0],#64, # Automatic
'naxis': 2, # Automatic
'naxis1': data.shape[1], # Automatic
'naxis2': data.shape[0], # Automatic
'cdelt1': (xrange[1].value - xrange[0].value) / data.shape[1], # 0.504295,
'cdelt2': (yrange[1].value - yrange[0].value) / data.shape[0],
'cunit1': str(xrange.unit), #'arcsec',
'cunit2': str(yrange.unit), #'arcsec',
'crpix1': data.shape[1] / 2.0 + 0.5, # central x-pixel.
'crpix2': data.shape[0] / 2.0 + 0.5, # cnetral y-pixel.
'rsun_ref': 696000000,
'dsun_ref': 149597870691,
'datamax': data.max(),
'datamin': data.min(),
'datavals': data.shape[0] * data.shape[1],
'CRVAL1': (xrange[0].value + xrange[1].value)/2.0, #0.000000,
'CRVAL2': (yrange[0].value + yrange[1].value)/2.0
}
# Add the user defined meta entries
for key, value in dic_user_def_meta.iteritems():
dicHeader[key] = value
#print str(key) + ': ' + str(value)
# Create and return a sunpy map from the data
return mp.Map((data, dicHeader))
if __name__ == '__main__':
# Generate an example map
# The input parameters:
arr_grid_shape = [ 20, 22 ] # [ y-size, x-size ]
qua_xrange = u.Quantity([ -10.0, 10.0 ] * u.arcsec)
qua_yrange = u.Quantity([ -11.0, 11.0 ] * u.arcsec)
# Manual Pole Details
#arrA0 = [ u.Quantity([ 1.0, 1.0 ] * u.arcsec), 2.0 * u.arcsec, 0.2 * u.T ]
arrA0 = [ u.Quantity([ 25, 25 ] * u.percent), 10.0 * u.percent, 0.2 * u.T ]
arrA1 = [ u.Quantity([ 75, 75 ] * u.percent), 10.0 * u.percent, -0.2 * u.T ]
# Generate the data and save to a map
arr_Data = generate_example_data(arr_grid_shape, qua_xrange, qua_yrange, arrA0, arrA1)#, arrA0, arrA1)
#arr_Data = generate_example_data(arr_grid_shape, qua_xrange, qua_yrange)#, arrA0, arrA1)
aMap = dummyDataToMap(arr_Data, qua_xrange, qua_yrange)
aMap.save('C://fits//temp6.fits')
| mit |
alanmarazzi/mepcheck | tests/test_package.py | 1 | 1374 | import pytest
from mepcheck import EUvotes, get_meps
from mepcheck.savemeps import save_meps
import requests
from bs4 import BeautifulSoup
import pickle
import os
from datetime import date, timedelta
import json
import pandas as pd
def test_save_meps():
save_meps()
assert os.path.isfile(os.path.expanduser("~/.meps"))
def test_EUvotes():
eu = EUvotes(1, limit=10)
assert eu.limit == 10
def test_mep_name():
assert EUvotes(1, limit=1).name == EUvotes(1, limit=1)._mep_name(1)
def test_get_votes():
assert len(EUvotes(1, limit=10).absent) == 10
def test_to_date():
assert isinstance(EUvotes(1, limit=1)._to_date("2017-01-01"), date)
def test_last_vote_period():
votes = EUvotes(1, limit=1)
dates = [date.today(),
date.today() - timedelta(weeks=1),
date.today() - timedelta(weeks=4),
date.today() - timedelta(weeks=52)]
assert votes._last_vote_period(dates) == [
"This week",
"This month",
"More than one month",
"More than one month"
]
def test_change_limit():
lim = EUvotes(1, 1)
lim_before = lim.limit
lim.change_limit(5)
assert lim_before != lim.limit
def test_data_():
data = EUvotes(1, 2)
assert isinstance(data.data_("json"), str) and isinstance(data.data_("list"), list) and isinstance(data.data_("df"), pd.DataFrame)
| mit |
waterponey/scikit-learn | doc/sphinxext/sphinx_gallery/notebook.py | 14 | 4867 | # -*- coding: utf-8 -*-
r"""
============================
Parser for Jupyter notebooks
============================
Class that holds the Jupyter notebook information
"""
# Author: Óscar Nájera
# License: 3-clause BSD
from __future__ import division, absolute_import, print_function
from functools import partial
import json
import os
import re
import sys
def ipy_notebook_skeleton():
"""Returns a dictionary with the elements of a Jupyter notebook"""
py_version = sys.version_info
notebook_skeleton = {
"cells": [],
"metadata": {
"kernelspec": {
"display_name": "Python " + str(py_version[0]),
"language": "python",
"name": "python" + str(py_version[0])
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": py_version[0]
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython" + str(py_version[0]),
"version": '{0}.{1}.{2}'.format(*sys.version_info[:3])
}
},
"nbformat": 4,
"nbformat_minor": 0
}
return notebook_skeleton
def directive_fun(match, directive):
"""Helper to fill in directives"""
directive_to_alert = dict(note="info", warning="danger")
return ('<div class="alert alert-{0}"><h4>{1}</h4><p>{2}</p></div>'
.format(directive_to_alert[directive], directive.capitalize(),
match.group(1).strip()))
def rst2md(text):
"""Converts the RST text from the examples docstrigs and comments
into markdown text for the Jupyter notebooks"""
top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M)
text = re.sub(top_heading, r'# \1', text)
math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M)
text = re.sub(math_eq,
lambda match: r'\begin{{align}}{0}\end{{align}}'.format(
match.group(1).strip()),
text)
inline_math = re.compile(r':math:`(.+?)`', re.DOTALL)
text = re.sub(inline_math, r'$\1$', text)
directives = ('warning', 'note')
for directive in directives:
directive_re = re.compile(r'^\.\. %s::((?:.+)?(?:\n+^ .+)*)'
% directive, flags=re.M)
text = re.sub(directive_re,
partial(directive_fun, directive=directive), text)
links = re.compile(r'^ *\.\. _.*:.*$\n', flags=re.M)
text = re.sub(links, '', text)
refs = re.compile(r':ref:`')
text = re.sub(refs, '`', text)
contents = re.compile(r'^\s*\.\. contents::.*$(\n +:\S+: *$)*\n',
flags=re.M)
text = re.sub(contents, '', text)
images = re.compile(
r'^\.\. image::(.*$)(?:\n *:alt:(.*$)\n)?(?: +:\S+:.*$\n)*',
flags=re.M)
text = re.sub(
images, lambda match: '![{1}]({0})\n'.format(
match.group(1).strip(), (match.group(2) or '').strip()), text)
return text
class Notebook(object):
"""Jupyter notebook object
Constructs the file cell-by-cell and writes it at the end"""
def __init__(self, file_name, target_dir):
"""Declare the skeleton of the notebook
Parameters
----------
file_name : str
original script file name, .py extension will be renamed
target_dir: str
directory where notebook file is to be saved
"""
self.file_name = file_name.replace('.py', '.ipynb')
self.write_file = os.path.join(target_dir, self.file_name)
self.work_notebook = ipy_notebook_skeleton()
self.add_code_cell("%matplotlib inline")
def add_code_cell(self, code):
"""Add a code cell to the notebook
Parameters
----------
code : str
Cell content
"""
code_cell = {
"cell_type": "code",
"execution_count": None,
"metadata": {"collapsed": False},
"outputs": [],
"source": [code.strip()]
}
self.work_notebook["cells"].append(code_cell)
def add_markdown_cell(self, text):
"""Add a markdown cell to the notebook
Parameters
----------
code : str
Cell content
"""
markdown_cell = {
"cell_type": "markdown",
"metadata": {},
"source": [rst2md(text)]
}
self.work_notebook["cells"].append(markdown_cell)
def save_file(self):
"""Saves the notebook to a file"""
with open(self.write_file, 'w') as out_nb:
json.dump(self.work_notebook, out_nb, indent=2)
| bsd-3-clause |
hainm/statsmodels | statsmodels/datasets/strikes/data.py | 25 | 1951 | #! /usr/bin/env python
"""U.S. Strike Duration Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
This is a subset of the data used in Kennan (1985). It was originally
published by the Bureau of Labor Statistics.
::
Kennan, J. 1985. "The duration of contract strikes in US manufacturing.
`Journal of Econometrics` 28.1, 5-28.
"""
DESCRSHORT = """Contains data on the length of strikes in US manufacturing and
unanticipated industrial production."""
DESCRLONG = """Contains data on the length of strikes in US manufacturing and
unanticipated industrial production. The data is a subset of the data originally
used by Kennan. The data here is data for the months of June only to avoid
seasonal issues."""
#suggested notes
NOTE = """::
Number of observations - 62
Number of variables - 2
Variable name definitions::
duration - duration of the strike in days
iprod - unanticipated industrial production
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the strikes data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the strikes data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/strikes.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
mkness/TheCannon | code/makeplots_talks/makeplot_fits_self_cluster_1.py | 1 | 5646 | #!/usr/bin/python
import scipy
import numpy
import pickle
from numpy import *
from scipy import ndimage
from scipy import interpolate
from numpy import loadtxt
import os
import numpy as np
from numpy import *
import matplotlib
from pylab import rcParams
from pylab import *
from matplotlib import pyplot
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.pyplot import axes
from matplotlib.pyplot import colorbar
#from matplotlib.ticker import NullFormatter
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
s = matplotlib.font_manager.FontProperties()
s.set_family('serif')
s.set_size(14)
from matplotlib import rc
rc('text', usetex=False)
rc('font', family='serif')
def plotfits():
file_in = "self_tags.pickle"
file_in2 = open(file_in, 'r')
params, icovs_params = pickle.load(file_in2)
params = array(params)
file_in2.close()
filein2 = 'starsin_test2.txt' # this is for self test this is dangerous - need to implement the same logg cut here, this is original data values or otherwise metadata
filein2 = 'starsin_new_all_ordered.txt' # this is for self test this is dangerous - need to implement the same logg cut here, this is original data values or otherwise metadata
filein2 = 'test4_selfg.txt' # this is for self test this is dangerous - need to implement the same logg cut here, this is original data values or otherwise metadata
a = open(filein2)
al = a.readlines()
names = []
for each in al:
names.append(each.split()[1])
unames = unique(names)
starind = arange(0,len(names), 1)
name_ind = []
names = array(names)
for each in unames:
takeit = each == names
name_ind.append(np.int(starind[takeit][-1]+1. ) )
cluster_ind = [0] + list(sort(name_ind))# + [len(al)]
plot_markers = ['ko', 'yo', 'ro', 'bo', 'co','k*', 'y*', 'r*', 'b*', 'c*', 'ks', 'rs', 'bs', 'cs', 'rd', 'kd', 'bd', 'cd', 'mo', 'ms' ]
#plot_markers = ['k', 'y', 'r', 'b', 'c','k', 'y', 'r', 'b', 'c', 'k', 'r', 'b', 'c', 'r', 'k', 'b', 'c', 'm', 'm' ]
#cv_ind = np.arange(395,469,1)
#a = open(filein2)
#al = a.readlines()
#bl = []
#for each in al:
# bl.append(each.strip())
#bl = np.delete(bl, [cv_ind], axis = 0)
#savetxt("starsin_cut.txt", bl, fmt = "%s")
#filein3 = 'starsin_cut.txt'
t,g,feh,t_err,feh_err = loadtxt(filein2, usecols = (4,6,8,16,17), unpack =1)
g_err = [0]*len(g)
g_err = array(g_err)
params = array(params)
covs_params = np.linalg.inv(icovs_params)
rcParams['figure.figsize'] = 12.0, 10.0
fig, temp = pyplot.subplots(3,1, sharex=False, sharey=False)
ax1 = temp[0]
ax2 = temp[1]
ax3 = temp[2]
params_labels = [params[:,0], params[:,1], params[:,2] , covs_params[:,0,0]**0.5, covs_params[:,1,1]**0.5, covs_params[:,2,2]**0.5 ]
cval = ['k', 'b', 'r']
input_ASPCAP = [t, g, feh, t_err, g_err, feh_err]
listit_1 = [0,1,2]
listit_2 = [1,0,0]
axs = [ax1,ax2,ax3]
labels = ["ASPCAP log g", "ASPCAP Teff", "ASPCAP Teff"]
for i in range(0,len(cluster_ind)-1):
indc1 = cluster_ind[i]
indc2 = cluster_ind[i+1]
for ax, num,num2,label1,x1,y1 in zip(axs, listit_1,listit_2,labels, [4800,3.0,0.3], [3400,1,-1.5]):
pick = logical_and(g[indc1:indc2] > 0, logical_and(t_err[indc1:indc2] < 300, feh[indc1:indc2] > -4.0) )
cind = array(input_ASPCAP[1][indc1:indc2][pick])
cind = array(input_ASPCAP[num2][indc1:indc2][pick]).flatten()
ax.plot(input_ASPCAP[num][indc1:indc2][pick], params_labels[num][indc1:indc2][pick], plot_markers[i])
#ax.errorbar(input_ASPCAP[num][indc1:indc2][pick], params_labels[num][indc1:indc2][pick],yerr= params_labels[num+3][indc1:indc2][pick],marker='',ls='',zorder=0, fmt = None,elinewidth = 1,capsize = 0)
#ax.errorbar(input_ASPCAP[num][indc1:indc2][pick], params_labels[num][indc1:indc2][pick],xerr=input_ASPCAP[num+3][indc1:indc2][pick],marker='',ls='',zorder=0, fmt = None,elinewidth = 1,capsize = 0)
ax.text(x1,y1,"y-axis, $<\sigma>$ = "+str(round(mean(params_labels[num+3][pick]),2)),fontsize = 14)
ax1.plot([0,6000], [0,6000], linewidth = 1.5, color = 'k' )
ax2.plot([0,5], [0,5], linewidth = 1.5, color = 'k' )
ax3.plot([-3,2], [-3,2], linewidth = 1.5, color = 'k' )
ax1.set_xlim(3500, 5500)
ax2.set_xlim(0, 5)
ax3.set_xlim(-3, 2)
ax1.set_xlabel("ASPCAP Teff, [K]", fontsize = 14,labelpad = 5)
ax1.set_ylabel("NHR+ Teff, [K]", fontsize = 14,labelpad = 5)
ax2.set_xlabel("ASPCAP logg, [dex]", fontsize = 14,labelpad = 5)
ax2.set_ylabel("NHR+ logg, [dex]", fontsize = 14,labelpad = 5)
ax3.set_xlabel("ASPCAP [Fe/H], [dex]", fontsize = 14,labelpad = 5)
ax3.set_ylabel("NHR+ [Fe/H], [dex]", fontsize = 14,labelpad = 5)
ax1.set_ylim(1000,6000)
ax1.set_ylim(3000,5500)
ax2.set_ylim(-3,6)
ax3.set_ylim(-3,2)
# attach lines to plots
fig.subplots_adjust(hspace=0.22)
#prefix = "/Users/ness/Downloads/Apogee_Raw/calibration_apogeecontinuum/documents/plots/fits_3_self_cut"
## prefix = "/Users/ness/Downloads/Apogee_Raw/calibration_apogeecontinuum/documents/plots/test_self"
#savefig(fig, prefix, transparent=False, bbox_inches='tight', pad_inches=0.5)
return
def savefig(fig, prefix, **kwargs):
for suffix in (".eps", ".png"):
print "writing %s" % (prefix + suffix)
fig.savefig(prefix + suffix, **kwargs)
if __name__ == "__main__": #args in command line
wl1,wl2,wl3,wl4,wl5,wl6 = 15392, 15697, 15958.8, 16208.6, 16120.4, 16169.5
plotfits()
| mit |
phoebe-project/phoebe2-docs | development/tutorials/grav_redshift.py | 2 | 3008 | #!/usr/bin/env python
# coding: utf-8
# Gravitational Redshift (rv_grav)
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[1]:
#!pip install -I "phoebe>=2.3,<2.4"
# As always, let's do imports and initialize a logger and a new bundle.
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
# In[3]:
b.add_dataset('rv', times=np.linspace(0,1,101), dataset='rv01')
# In[4]:
b.set_value_all('ld_mode', 'manual')
b.set_value_all('ld_func', 'logarithmic')
b.set_value_all('ld_coeffs', [0.0, 0.0])
b.set_value_all('atm', 'blackbody')
# Relevant Parameters
# --------------------
#
# Gravitational redshifts are only accounted for flux-weighted RVs (dynamical RVs literally only return the z-component of the velocity of the center-of-mass of each star).
#
# First let's run a model with the default radii for our stars.
# In[5]:
print(b['value@requiv@primary@component'], b['value@requiv@secondary@component'])
# Note that gravitational redshift effects for RVs (rv_grav) are disabled by default. We could call add_compute and then set them to be true, or just temporarily override them by passing rv_grav to the run_compute call.
# In[6]:
b.run_compute(rv_method='flux-weighted', rv_grav=True, irrad_method='none', model='defaultradii_true')
# Now let's run another model but with much smaller stars (but with the same masses).
# In[7]:
b['requiv@primary'] = 0.4
b['requiv@secondary'] = 0.4
# In[8]:
b.run_compute(rv_method='flux-weighted', rv_grav=True, irrad_method='none', model='smallradii_true')
# Now let's run another model, but with gravitational redshift effects disabled
# In[9]:
b.run_compute(rv_method='flux-weighted', rv_grav=False, irrad_method='none', model='smallradii_false')
# Influence on Radial Velocities
# ------------------
# In[10]:
afig, mplfig = b.filter(model=['defaultradii_true', 'smallradii_true']).plot(legend=True, show=True)
# In[11]:
afig, mplfig = b.filter(model=['smallradii_true', 'smallradii_false']).plot(legend=True, show=True)
# Besides the obvious change in the Rossiter-McLaughlin effect (not due to gravitational redshift), we can see that making the radii smaller shifts the entire RV curve up (the spectra are redshifted as they have to climb out of a steeper potential at the surface of the stars).
# In[12]:
print(b['rvs@rv01@primary@defaultradii_true'].get_value().min())
print(b['rvs@rv01@primary@smallradii_true'].get_value().min())
print(b['rvs@rv01@primary@smallradii_false'].get_value().min())
# In[13]:
print(b['rvs@rv01@primary@defaultradii_true'].get_value().max())
print(b['rvs@rv01@primary@smallradii_true'].get_value().max())
print(b['rvs@rv01@primary@smallradii_false'].get_value().max())
# In[ ]:
| gpl-3.0 |
chenyyx/scikit-learn-doc-zh | examples/zh/linear_model/plot_polynomial_interpolation.py | 168 | 2088 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
colors = ['teal', 'yellowgreen', 'gold']
lw = 2
plt.plot(x_plot, f(x_plot), color='cornflowerblue', linewidth=lw,
label="ground truth")
plt.scatter(x, y, color='navy', s=30, marker='o', label="training points")
for count, degree in enumerate([3, 4, 5]):
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, color=colors[count], linewidth=lw,
label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| gpl-3.0 |
bearishtrader/trading-with-python | lib/yahooFinance.py | 76 | 8290 | # -*- coding: utf-8 -*-
# Author: Jev Kuznetsov <[email protected]>
# License: BSD
"""
Toolset working with yahoo finance data
This module includes functions for easy access to YahooFinance data
Functions
----------
- `getHistoricData` get historic data for a single symbol
- `getQuote` get current quote for a symbol
- `getScreenerSymbols` load symbols from a yahoo stock screener file
Classes
---------
- `HistData` a class for working with multiple symbols
"""
from datetime import datetime, date
import urllib2
from pandas import DataFrame, Index, HDFStore, WidePanel
import numpy as np
import os
from extra import ProgressBar
def parseStr(s):
''' convert string to a float or string '''
f = s.strip()
if f[0] == '"':
return f.strip('"')
elif f=='N/A':
return np.nan
else:
try: # try float conversion
prefixes = {'M':1e6, 'B': 1e9}
prefix = f[-1]
if prefix in prefixes: # do we have a Billion/Million character?
return float(f[:-1])*prefixes[prefix]
else: # no, convert to float directly
return float(f)
except ValueError: # failed, return original string
return s
class HistData(object):
''' a class for working with yahoo finance data '''
def __init__(self, autoAdjust=True):
self.startDate = (2008,1,1)
self.autoAdjust=autoAdjust
self.wp = WidePanel()
def load(self,dataFile):
"""load data from HDF"""
if os.path.exists(dataFile):
store = HDFStore(dataFile)
symbols = [str(s).strip('/') for s in store.keys() ]
data = dict(zip(symbols,[store[symbol] for symbol in symbols]))
self.wp = WidePanel(data)
store.close()
else:
raise IOError('Data file does not exist')
def save(self,dataFile):
""" save data to HDF"""
print 'Saving data to', dataFile
store = HDFStore(dataFile)
for symbol in self.wp.items:
store[symbol] = self.wp[symbol]
store.close()
def downloadData(self,symbols='all'):
''' get data from yahoo '''
if symbols == 'all':
symbols = self.symbols
#store = HDFStore(self.dataFile)
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
try:
df = getSymbolData(symbol,sDate=self.startDate,verbose=False)
if self.autoAdjust:
df = _adjust(df,removeOrig=True)
if len(self.symbols)==0:
self.wp = WidePanel({symbol:df})
else:
self.wp[symbol] = df
except Exception,e:
print e
p.animate(idx+1)
def getDataFrame(self,field='close'):
''' return a slice on wide panel for a given field '''
return self.wp.minor_xs(field)
@property
def symbols(self):
return self.wp.items.tolist()
def __repr__(self):
return str(self.wp)
def getQuote(symbols):
''' get current yahoo quote, return a DataFrame '''
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
if not isinstance(symbols,list):
symbols = [symbols]
header = ['symbol','last','change_pct','PE','time','short_ratio','prev_close','eps','market_cap']
request = str.join('', ['s', 'l1', 'p2' , 'r', 't1', 's7', 'p', 'e' , 'j1'])
data = dict(zip(header,[[] for i in range(len(header))]))
urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+',symbols), request)
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
for line in lines:
fields = line.strip().split(',')
#print fields, len(fields)
for i,field in enumerate(fields):
data[header[i]].append( parseStr(field))
idx = data.pop('symbol')
return DataFrame(data,index=idx)
def _historicDataUrll(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3]):
"""
generate url
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
return urlStr
def getHistoricData(symbols, **options):
'''
get data from Yahoo finance and return pandas dataframe
Will get OHLCV data frame if sinle symbol is provided.
If many symbols are provided, it will return a wide panel
Parameters
------------
symbols: Yahoo finanance symbol or a list of symbols
sDate: start date (y,m,d)
eDate: end date (y,m,d)
adjust : T/[F] adjust data based on adj_close
'''
assert isinstance(symbols,(list,str)), 'Input must be a string symbol or a list of symbols'
if isinstance(symbols,str):
return getSymbolData(symbols,**options)
else:
data = {}
print 'Downloading data:'
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
p.animate(idx+1)
data[symbol] = getSymbolData(symbol,verbose=False,**options)
return WidePanel(data)
def getSymbolData(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3], adjust=False, verbose=True):
"""
get data from Yahoo finance and return pandas dataframe
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
return None
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
#print line
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
if verbose:
print 'Got %i days of data' % len(df)
if adjust:
return _adjust(df,removeOrig=True)
else:
return df
def _adjust(df, removeOrig=False):
'''
_adjustust hist data based on adj_close field
'''
c = df['close']/df['adj_close']
df['adj_open'] = df['open']/c
df['adj_high'] = df['high']/c
df['adj_low'] = df['low']/c
if removeOrig:
df=df.drop(['open','close','high','low'],axis=1)
renames = dict(zip(['adj_open','adj_close','adj_high','adj_low'],['open','close','high','low']))
df=df.rename(columns=renames)
return df
def getScreenerSymbols(fileName):
''' read symbols from a .csv saved by yahoo stock screener '''
with open(fileName,'r') as fid:
lines = fid.readlines()
symbols = []
for line in lines[3:]:
fields = line.strip().split(',')
field = fields[0].strip()
if len(field) > 0:
symbols.append(field)
return symbols
| bsd-3-clause |
cwu2011/scikit-learn | sklearn/tests/test_lda.py | 71 | 5883 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import make_blobs
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct values
# for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = lda.LDA(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = lda.LDA(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = lda.LDA(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = lda.LDA(solver="svd")
clf_lda_lsqr = lda.LDA(solver="lsqr")
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = lda.LDA(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = lda.LDA(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/core/algorithms.py | 9 | 18486 | """
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from __future__ import division
from warnings import warn
import numpy as np
from pandas import compat, lib, _np_version_under1p8
import pandas.core.common as com
import pandas.algos as algos
import pandas.hashtable as htable
from pandas.compat import string_types
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
Parameters
----------
to_match : array-like
values to find positions of
values : array-like
Unique set of values
na_sentinel : int, default -1
Value to mark "not found"
Examples
--------
Returns
-------
match : ndarray of integers
"""
values = com._asarray_tuplesafe(values)
if issubclass(values.dtype.type, string_types):
values = np.array(values, dtype='O')
f = lambda htype, caster: _match_generic(to_match, values, htype, caster)
result = _hashtable_algo(f, values.dtype, np.int64)
if na_sentinel != -1:
# replace but return a numpy array
# use a Series because it handles dtype conversions properly
from pandas.core.series import Series
result = Series(result.ravel()).replace(-1,na_sentinel).values.reshape(result.shape)
return result
def unique(values):
"""
Compute unique values (not necessarily sorted) efficiently from input array
of values
Parameters
----------
values : array-like
Returns
-------
uniques
"""
values = com._asarray_tuplesafe(values)
f = lambda htype, caster: _unique_generic(values, htype, caster)
return _hashtable_algo(f, values.dtype)
def isin(comps, values):
"""
Compute the isin boolean array
Parameters
----------
comps: array-like
values: array-like
Returns
-------
boolean array same length as comps
"""
if not com.is_list_like(comps):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(comps).__name__))
comps = np.asarray(comps)
if not com.is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(values).__name__))
# GH11232
# work-around for numpy < 1.8 and comparisions on py3
# faster for larger cases to use np.in1d
if (_np_version_under1p8 and compat.PY3) or len(comps) > 1000000:
f = lambda x, y: np.in1d(x,np.asarray(list(y)))
else:
f = lambda x, y: lib.ismember_int64(x,set(y))
# may need i8 conversion for proper membership testing
if com.is_datetime64_dtype(comps):
from pandas.tseries.tools import to_datetime
values = to_datetime(values)._values.view('i8')
comps = comps.view('i8')
elif com.is_timedelta64_dtype(comps):
from pandas.tseries.timedeltas import to_timedelta
values = to_timedelta(values)._values.view('i8')
comps = comps.view('i8')
elif com.is_int64_dtype(comps):
pass
else:
f = lambda x, y: lib.ismember(x, set(values))
return f(comps, values)
def _hashtable_algo(f, dtype, return_dtype=None):
"""
f(HashTable, type_caster) -> result
"""
if com.is_float_dtype(dtype):
return f(htable.Float64HashTable, com._ensure_float64)
elif com.is_integer_dtype(dtype):
return f(htable.Int64HashTable, com._ensure_int64)
elif com.is_datetime64_dtype(dtype):
return_dtype = return_dtype or 'M8[ns]'
return f(htable.Int64HashTable, com._ensure_int64).view(return_dtype)
elif com.is_timedelta64_dtype(dtype):
return_dtype = return_dtype or 'm8[ns]'
return f(htable.Int64HashTable, com._ensure_int64).view(return_dtype)
else:
return f(htable.PyObjectHashTable, com._ensure_object)
def _match_generic(values, index, table_type, type_caster):
values = type_caster(values)
index = type_caster(index)
table = table_type(min(len(index), 1000000))
table.map_locations(index)
return table.lookup(values)
def _unique_generic(values, table_type, type_caster):
values = type_caster(values)
table = table_type(min(len(values), 1000000))
uniques = table.unique(values)
return type_caster(uniques)
def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
"""
Encode input values as an enumerated type or categorical variable
Parameters
----------
values : ndarray (1-d)
Sequence
sort : boolean, default False
Sort by values
order : deprecated
na_sentinel : int, default -1
Value to mark "not found"
size_hint : hint to the hashtable sizer
Returns
-------
labels : the indexer to the original array
uniques : ndarray (1-d) or Index
the unique values. Index is returned when passed values is Index or Series
note: an array of Periods will ignore sort as it returns an always sorted PeriodIndex
"""
if order is not None:
msg = "order is deprecated. See https://github.com/pydata/pandas/issues/6926"
warn(msg, FutureWarning, stacklevel=2)
from pandas.core.index import Index
from pandas.core.series import Series
vals = np.asarray(values)
is_datetime = com.is_datetime64_dtype(vals)
is_timedelta = com.is_timedelta64_dtype(vals)
(hash_klass, vec_klass), vals = _get_data_algo(vals, _hashtables)
table = hash_klass(size_hint or len(vals))
uniques = vec_klass()
labels = table.get_labels(vals, uniques, 0, na_sentinel)
labels = com._ensure_platform_int(labels)
uniques = uniques.to_array()
if sort and len(uniques) > 0:
try:
sorter = uniques.argsort()
except:
# unorderable in py3 if mixed str/int
t = hash_klass(len(uniques))
t.map_locations(com._ensure_object(uniques))
# order ints before strings
ordered = np.concatenate([
np.sort(np.array([ e for i, e in enumerate(uniques) if f(e) ],dtype=object)) for f in [ lambda x: not isinstance(x,string_types),
lambda x: isinstance(x,string_types) ]
])
sorter = com._ensure_platform_int(t.lookup(com._ensure_object(ordered)))
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
labels = reverse_indexer.take(labels)
np.putmask(labels, mask, -1)
uniques = uniques.take(sorter)
if is_datetime:
uniques = uniques.astype('M8[ns]')
elif is_timedelta:
uniques = uniques.astype('m8[ns]')
if isinstance(values, Index):
uniques = values._shallow_copy(uniques, name=None)
elif isinstance(values, Series):
uniques = Index(uniques)
return labels, uniques
def value_counts(values, sort=True, ascending=False, normalize=False,
bins=None, dropna=True):
"""
Compute a histogram of the counts of non-null values.
Parameters
----------
values : ndarray (1-d)
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
normalize: boolean, default False
If True then compute a relative histogram
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : boolean, default True
Don't include counts of NaN
Returns
-------
value_counts : Series
"""
from pandas.core.series import Series
from pandas.tools.tile import cut
from pandas import Index, PeriodIndex, DatetimeIndex
name = getattr(values, 'name', None)
values = Series(values).values
if bins is not None:
try:
cat, bins = cut(values, bins, retbins=True)
except TypeError:
raise TypeError("bins argument only works with numeric data.")
values = cat.codes
if com.is_categorical_dtype(values.dtype):
result = values.value_counts(dropna)
else:
dtype = values.dtype
is_period = com.is_period_arraylike(values)
is_datetimetz = com.is_datetimetz(values)
if com.is_datetime_or_timedelta_dtype(dtype) or is_period or is_datetimetz:
if is_period:
values = PeriodIndex(values)
elif is_datetimetz:
tz = getattr(values, 'tz', None)
values = DatetimeIndex(values).tz_localize(None)
values = values.view(np.int64)
keys, counts = htable.value_count_scalar64(values, dropna)
if dropna:
from pandas.tslib import iNaT
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
# localize to the original tz if necessary
if is_datetimetz:
keys = DatetimeIndex(keys).tz_localize(tz)
# convert the keys back to the dtype we came in
else:
keys = keys.astype(dtype)
elif com.is_integer_dtype(dtype):
values = com._ensure_int64(values)
keys, counts = htable.value_count_scalar64(values, dropna)
elif com.is_float_dtype(dtype):
values = com._ensure_float64(values)
keys, counts = htable.value_count_scalar64(values, dropna)
else:
values = com._ensure_object(values)
mask = com.isnull(values)
keys, counts = htable.value_count_object(values, mask)
if not dropna and mask.any():
keys = np.insert(keys, 0, np.NaN)
counts = np.insert(counts, 0, mask.sum())
if not isinstance(keys, Index):
keys = Index(keys)
result = Series(counts, index=keys, name=name)
if bins is not None:
# TODO: This next line should be more efficient
result = result.reindex(np.arange(len(cat.categories)), fill_value=0)
result.index = bins[:-1]
if sort:
result = result.sort_values(ascending=ascending)
if normalize:
result = result / float(values.size)
return result
def mode(values):
"""Returns the mode or mode(s) of the passed Series or ndarray (sorted)"""
# must sort because hash order isn't necessarily defined.
from pandas.core.series import Series
if isinstance(values, Series):
constructor = values._constructor
values = values.values
else:
values = np.asanyarray(values)
constructor = Series
dtype = values.dtype
if com.is_integer_dtype(values):
values = com._ensure_int64(values)
result = constructor(sorted(htable.mode_int64(values)), dtype=dtype)
elif issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):
dtype = values.dtype
values = values.view(np.int64)
result = constructor(sorted(htable.mode_int64(values)), dtype=dtype)
elif com.is_categorical_dtype(values):
result = constructor(values.mode())
else:
mask = com.isnull(values)
values = com._ensure_object(values)
res = htable.mode_object(values, mask)
try:
res = sorted(res)
except TypeError as e:
warn("Unable to sort modes: %s" % e)
result = constructor(res, dtype=dtype)
return result
def rank(values, axis=0, method='average', na_option='keep',
ascending=True, pct=False):
"""
"""
if values.ndim == 1:
f, values = _get_data_algo(values, _rank1d_functions)
ranks = f(values, ties_method=method, ascending=ascending,
na_option=na_option, pct=pct)
elif values.ndim == 2:
f, values = _get_data_algo(values, _rank2d_functions)
ranks = f(values, axis=axis, ties_method=method,
ascending=ascending, na_option=na_option, pct=pct)
return ranks
def quantile(x, q, interpolation_method='fraction'):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = com.isnull(x)
x = x[~mask]
values = np.sort(x)
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == 'fraction':
score = _interpolate(values[int(idx)], values[int(idx) + 1],
idx % 1)
elif interpolation_method == 'lower':
score = values[np.floor(idx)]
elif interpolation_method == 'higher':
score = values[np.ceil(idx)]
else:
raise ValueError("interpolation_method can only be 'fraction' "
", 'lower' or 'higher'")
return score
if np.isscalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
return algos.arrmap_float64(q, _get_score)
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_data_algo(values, func_map):
mask = None
if com.is_float_dtype(values):
f = func_map['float64']
values = com._ensure_float64(values)
elif com.needs_i8_conversion(values):
# if we have NaT, punt to object dtype
mask = com.isnull(values)
if mask.ravel().any():
f = func_map['generic']
values = com._ensure_object(values)
values[mask] = np.nan
else:
f = func_map['int64']
values = values.view('i8')
elif com.is_integer_dtype(values):
f = func_map['int64']
values = com._ensure_int64(values)
else:
f = func_map['generic']
values = com._ensure_object(values)
return f, values
def group_position(*args):
"""
Get group position
"""
from collections import defaultdict
table = defaultdict(int)
result = []
for tup in zip(*args):
result.append(table[tup])
table[tup] += 1
return result
_dtype_map = {'datetime64[ns]': 'int64', 'timedelta64[ns]': 'int64'}
def _finalize_nsmallest(arr, kth_val, n, keep, narr):
ns, = np.nonzero(arr <= kth_val)
inds = ns[arr[ns].argsort(kind='mergesort')][:n]
if keep == 'last':
# reverse indices
return narr - 1 - inds
else:
return inds
def nsmallest(arr, n, keep='first'):
'''
Find the indices of the n smallest values of a numpy array.
Note: Fails silently with NaN.
'''
if keep == 'last':
arr = arr[::-1]
narr = len(arr)
n = min(n, narr)
sdtype = str(arr.dtype)
arr = arr.view(_dtype_map.get(sdtype, sdtype))
kth_val = algos.kth_smallest(arr.copy(), n - 1)
return _finalize_nsmallest(arr, kth_val, n, keep, narr)
def nlargest(arr, n, keep='first'):
"""
Find the indices of the n largest values of a numpy array.
Note: Fails silently with NaN.
"""
sdtype = str(arr.dtype)
arr = arr.view(_dtype_map.get(sdtype, sdtype))
return nsmallest(-arr, n, keep=keep)
def select_n_slow(dropped, n, keep, method):
reverse_it = (keep == 'last' or method == 'nlargest')
ascending = method == 'nsmallest'
slc = np.s_[::-1] if reverse_it else np.s_[:]
return dropped[slc].sort_values(ascending=ascending).head(n)
_select_methods = {'nsmallest': nsmallest, 'nlargest': nlargest}
def select_n(series, n, keep, method):
"""Implement n largest/smallest.
Parameters
----------
n : int
keep : {'first', 'last'}, default 'first'
method : str, {'nlargest', 'nsmallest'}
Returns
-------
nordered : Series
"""
dtype = series.dtype
if not issubclass(dtype.type, (np.integer, np.floating, np.datetime64,
np.timedelta64)):
raise TypeError("Cannot use method %r with dtype %s" % (method, dtype))
if keep not in ('first', 'last'):
raise ValueError('keep must be either "first", "last"')
if n <= 0:
return series[[]]
dropped = series.dropna()
if n >= len(series):
return select_n_slow(dropped, n, keep, method)
inds = _select_methods[method](dropped.values, n, keep)
return dropped.iloc[inds]
_rank1d_functions = {
'float64': algos.rank_1d_float64,
'int64': algos.rank_1d_int64,
'generic': algos.rank_1d_generic
}
_rank2d_functions = {
'float64': algos.rank_2d_float64,
'int64': algos.rank_2d_int64,
'generic': algos.rank_2d_generic
}
_hashtables = {
'float64': (htable.Float64HashTable, htable.Float64Vector),
'int64': (htable.Int64HashTable, htable.Int64Vector),
'generic': (htable.PyObjectHashTable, htable.ObjectVector)
}
| gpl-2.0 |
jrdurrant/insect_analysis | vision/io_functions.py | 3 | 1853 | import csv
import glob
import os
import sys
import skimage.io
import matplotlib.pyplot as plt
import numpy as np
def read_image(filename, **kwargs):
return plt.imread(filename, **kwargs)[:, :, :3]
def write_image(filename, image, **kwargs):
if image.dtype.type == np.bool_:
image_out = 255 * image
else:
image_out = image
return skimage.io.imsave(filename, image_out, **kwargs)
def apply_all_images(input_folder, function, output_folder=None):
images = [image_file
for image_file
in os.listdir(input_folder)
if os.path.splitext(image_file)[1].lower() == '.jpg']
# Ignoring exceptions only for the sake of not interrupting during batch processing
for image_file in images:
if output_folder is not None:
try:
function(os.path.join(input_folder, image_file), output_folder)
except Exception:
sys.exc_clear()
else:
try:
function(os.path.join(input_folder, image_file))
except Exception:
sys.exc_clear()
def specimen_ids_from_images(filenames, prefix='color_'):
prefix_len = len(prefix)
for filename in filenames:
if filename.startswith(prefix):
yield filename[prefix_len:]
def get_specimen_ids(filename):
with open(filename, 'rU') as csvfile:
reader = csv.reader(csvfile)
specimen_ids = []
for row in reader:
sex = row[3]
if sex == 'male' or sex == 'female':
ids = glob.glob(os.path.join('data', 'full_image', sex, '*{}*'.format(row[0])))
if ids:
if len(ids) > 1:
ids = sorted(ids, key=len)
specimen_ids.append((row[0], ids[0]))
return specimen_ids
| gpl-2.0 |
toastedcornflakes/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
wasit7/book_pae | pae/final_code/src/convert_sub_home_name_tojson.py | 1 | 5762 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 01 21:37:20 2016
@author: Administrator
"""
import pandas as pd
import pandas.io.sql as pd_sql
import sqlite3 as sql
df_file_all = pd.read_csv('../data/CS_table_No2_No4_new.csv',delimiter=";", skip_blank_lines = True,
error_bad_lines=False,encoding='utf8')
df_file_less = pd.read_csv('../data/df_dropSub_less20.csv',delimiter=",", skip_blank_lines = True,
error_bad_lines=False,encoding='utf8')
df_file_all = df_file_all.drop(['STUDENTID','ACADYEAR','CAMPUSID','SEMESTER','CURRIC','CAMPUSNAME','SECTIONGROUP','GRADE'],axis=1)
subjects = []
names = []
countSub = 0
subjects = []
link = []
out={}
sources=[]
targets=[]
for sub in df_file_less['3COURSEID']:
if sub not in subjects:
subjects.append(sub)
countSub = countSub+1
subjects.sort()
df_db = df_file_all[df_file_all["COURSEID"].isin(subjects)]
df_db = df_db.drop_duplicates(['COURSEID'], take_last=True)
df_db = df_db.sort(['COURSEID'])
#Create lis of names subject
for name in df_db['COURSENAME']:
names.append(name)
subjects_home = []
node = []
cs = 'CS'
tu = 'TU'
el = 'EL'
for index in xrange(0,111):
s = subjects[index]
n = names[index]
if cs in s:
subjects_home.append(s)
node.append({"id":s,"name":n})
elif tu in s:
subjects_home.append(s)
node.append({"id":s,"name":n})
elif el in s:
subjects_home.append(s)
node.append({"id":s,"name":n})
subjects_home.remove("CS105")
node.pop(2)
subjects_home.remove("CS115")
node.pop(3)
subjects_home.remove("CS211")
node.pop(3)
subjects_home.remove("CS215")
node.pop(5)
subjects_home.remove("CS231")
node.pop(7)
subjects_home.remove("CS300")
node.pop(18)
subjects_home.append('CS112')
node.append({"id":'CS112',"name":'Introduction to Object-Oriented Programming'})
subjects_home.append('CS327')
node.append({"id":'CS327',"name":'Digital Logic Design'})
subjects_home.append('CS328')
node.append({"id":'CS328',"name":'Compiler Construction'})
subjects_home.append('CS357')
node.append({"id":'CS357',"name":'Electronic Business'})
subjects_home.append('CS358')
node.append({"id":'CS358',"name":'Computer Simulation and Forecasting Techniques in Business'})
subjects_home.append('CS359')
node.append({"id":'CS359',"name":'Document Indexing and Retrieval'})
subjects_home.append('CS389')
node.append({"id":'CS389',"name":'Software Architecture'})
subjects_home.append('CS406')
node.append({"id":'CS406',"name":'Selected Topics in Advance Sofware Engineering Technology'})
subjects_home.append('CS428')
node.append({"id":'CS428',"name":'Principles of Multiprocessors Programming'})
subjects_home.append('CS439')
node.append({"id":'CS439',"name":'Selected Topics in Programming Languages'})
subjects_home.append('CS447')
node.append({"id":'CS447',"name":'Operating Systems II'})
subjects_home.append('CS448')
node.append({"id":'CS448',"name":'Software systems for advanced distributed computing'})
subjects_home.append('CS458')
node.append({"id":'CS458',"name":'Information Systems for Entrepreneur Management'})
subjects_home.append('CS469')
node.append({"id":'CS469',"name":'Selected Topics in Artificial Intelligent Systems'})
subjects_home.append('CS479')
node.append({"id":'CS479',"name":'Selected Topics in Computer Interface and Multimedia'})
subjects_home.append('CS496')
node.append({"id":'CS496',"name":'Rendering II'})
subjects_home.append('CS497')
node.append({"id":'CS497',"name":'Real-time Graphics'})
subjects_home.append('CS499')
node.append({"id":'CS499',"name":'Selected Topics in Computer Graphics'})
subjects_home.append('TH161')
node.append({"id":'TH161',"name":'Thai Usage'})
subjects_home.append('PY228')
node.append({"id":'PY228',"name":'Psychology Of Interpersonal Relations'})
subjects_home.append('BA291')
node.append({"id":'BA291',"name":'Introduction Of Business'})
subjects_home.append('EC210')
node.append({"id":'EC210',"name":'Introductory Economics'})
subjects_home.append('HO201')
node.append({"id":'HO201',"name":'Principles Of Management'})
subjects_home.append('MA211')
node.append({"id":'MA211',"name":'Calculus 1'})
subjects_home.append('SC135')
node.append({"id":'SC135',"name":'General Physics'})
subjects_home.append('SC185')
node.append({"id":'SC185',"name":'General Physics Laboratory'})
subjects_home.append('SC123')
node.append({"id":'SC123',"name":'Fundamental Chemistry'})
subjects_home.append('SC173')
node.append({"id":'SC173',"name":'Fundamental Chemistry Laboratory'})
subjects_home.append('MA212')
node.append({"id":'MA212',"name":'Calculus 2'})
subjects_home.append('MA332')
node.append({"id":'MA332',"name":'Linear Algebra'})
subjects_home.append('ST216')
node.append({"id":'ST216',"name":'Statistics For Social Science Students 1'})
subjects_home.sort()
node.sort()
## Find index of source and target from book/graph1.gv
df_st = pd.read_csv('../data/source-target_home.csv',delimiter=";", skip_blank_lines = True,
error_bad_lines=False)
headers_st=list(df_st.columns.values)
df_st = df_st.dropna()
for source in df_st[headers_st[0]]:
#print "source is %s, index is %d"%(source,subjects_db.index(source))
sources.append(subjects_home.index(source))
for target in df_st[headers_st[1]]:
#print "target is %s, index is %d"%(target,subjects_db.index(target))
targets.append(subjects_home.index(target))
for i in xrange(0,82): #In Bachelor has 83 links
link.append({"source":sources[i],"target":targets[i],"type": "licensing"})
out["node"]=node
out["link"]=link
#with open("subjects_name.json","w") as outfile:
# json.dump(out,outfile,sort_keys=True, indent=4, separators=(',',': ')) | mit |
Ziqi-Li/bknqgis | bokeh/examples/app/crossfilter/main.py | 4 | 2214 | import pandas as pd
from bokeh.layouts import row, widgetbox
from bokeh.models import Select
from bokeh.palettes import Spectral5
from bokeh.plotting import curdoc, figure
from bokeh.sampledata.autompg import autompg_clean as df
df = df.copy()
SIZES = list(range(6, 22, 3))
COLORS = Spectral5
# data cleanup
df.cyl = df.cyl.astype(str)
df.yr = df.yr.astype(str)
del df['name']
columns = sorted(df.columns)
discrete = [x for x in columns if df[x].dtype == object]
continuous = [x for x in columns if x not in discrete]
quantileable = [x for x in continuous if len(df[x].unique()) > 20]
def create_figure():
xs = df[x.value].values
ys = df[y.value].values
x_title = x.value.title()
y_title = y.value.title()
kw = dict()
if x.value in discrete:
kw['x_range'] = sorted(set(xs))
if y.value in discrete:
kw['y_range'] = sorted(set(ys))
kw['title'] = "%s vs %s" % (x_title, y_title)
p = figure(plot_height=600, plot_width=800, tools='pan,box_zoom,reset', **kw)
p.xaxis.axis_label = x_title
p.yaxis.axis_label = y_title
if x.value in discrete:
p.xaxis.major_label_orientation = pd.np.pi / 4
sz = 9
if size.value != 'None':
groups = pd.qcut(df[size.value].values, len(SIZES))
sz = [SIZES[xx] for xx in groups.codes]
c = "#31AADE"
if color.value != 'None':
groups = pd.qcut(df[color.value].values, len(COLORS))
c = [COLORS[xx] for xx in groups.codes]
p.circle(x=xs, y=ys, color=c, size=sz, line_color="white", alpha=0.6, hover_color='white', hover_alpha=0.5)
return p
def update(attr, old, new):
layout.children[1] = create_figure()
x = Select(title='X-Axis', value='mpg', options=columns)
x.on_change('value', update)
y = Select(title='Y-Axis', value='hp', options=columns)
y.on_change('value', update)
size = Select(title='Size', value='None', options=['None'] + quantileable)
size.on_change('value', update)
color = Select(title='Color', value='None', options=['None'] + quantileable)
color.on_change('value', update)
controls = widgetbox([x, y, color, size], width=200)
layout = row(controls, create_figure())
curdoc().add_root(layout)
curdoc().title = "Crossfilter"
| gpl-2.0 |
rubikloud/scikit-learn | examples/cluster/plot_color_quantization.py | 297 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
JT5D/scikit-learn | examples/applications/plot_out_of_core_classification.py | 2 | 13546 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents is the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import sgmllib
import tarfile
import time
import urllib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(sgmllib.SGMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, verbose=0):
sgmllib.SGMLParser.__init__(self, verbose)
self._reset()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk)
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename)):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [('{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
zihua/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 23 | 17698 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true, assert_greater
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Check we used multiple estimators
assert_greater(len(clf.estimators_), 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in clf.estimators_)),
len(clf.estimators_))
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
reg = AdaBoostRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
assert score > 0.85
# Check we used multiple estimators
assert_true(len(reg.estimators_) > 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in reg.estimators_)),
len(reg.estimators_))
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
ioam/holoviews | holoviews/plotting/plotly/element.py | 2 | 18826 | from __future__ import absolute_import, division, unicode_literals
import numpy as np
import param
from ...core import util
from ...core.element import Element
from ...core.spaces import DynamicMap
from ...util.transform import dim
from ..plot import GenericElementPlot, GenericOverlayPlot
from ..util import dim_range_key, dynamic_update
from .plot import PlotlyPlot
from .util import STYLE_ALIASES, get_colorscale, merge_figure
class ElementPlot(PlotlyPlot, GenericElementPlot):
aspect = param.Parameter(default='cube', doc="""
The aspect ratio mode of the plot. By default, a plot may
select its own appropriate aspect ratio but sometimes it may
be necessary to force a square aspect ratio (e.g. to display
the plot as an element of a grid). The modes 'auto' and
'equal' correspond to the axis modes of the same name in
matplotlib, a numeric value may also be passed.""")
bgcolor = param.ClassSelector(class_=(str, tuple), default=None, doc="""
If set bgcolor overrides the background color of the axis.""")
invert_axes = param.ObjectSelector(default=False, doc="""
Inverts the axes of the plot. Note that this parameter may not
always be respected by all plots but should be respected by
adjoined plots when appropriate.""")
invert_xaxis = param.Boolean(default=False, doc="""
Whether to invert the plot x-axis.""")
invert_yaxis = param.Boolean(default=False, doc="""
Whether to invert the plot y-axis.""")
invert_zaxis = param.Boolean(default=False, doc="""
Whether to invert the plot z-axis.""")
labelled = param.List(default=['x', 'y', 'z'], doc="""
Whether to label the 'x' and 'y' axes.""")
logx = param.Boolean(default=False, doc="""
Whether to apply log scaling to the x-axis of the Chart.""")
logy = param.Boolean(default=False, doc="""
Whether to apply log scaling to the y-axis of the Chart.""")
logz = param.Boolean(default=False, doc="""
Whether to apply log scaling to the y-axis of the Chart.""")
margins = param.NumericTuple(default=(50, 50, 50, 50), doc="""
Margins in pixel values specified as a tuple of the form
(left, bottom, right, top).""")
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
xaxis = param.ObjectSelector(default='bottom',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None], doc="""
Whether and where to display the xaxis, bare options allow suppressing
all axis labels including ticks and xlabel. Valid options are 'top',
'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
xticks = param.Parameter(default=None, doc="""
Ticks along x-axis specified as an integer, explicit list of
tick locations, list of tuples containing the locations.""")
yaxis = param.ObjectSelector(default='left',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None], doc="""
Whether and where to display the yaxis, bare options allow suppressing
all axis labels including ticks and ylabel. Valid options are 'left',
'right', 'bare' 'left-bare' and 'right-bare'.""")
yticks = param.Parameter(default=None, doc="""
Ticks along y-axis specified as an integer, explicit list of
tick locations, list of tuples containing the locations.""")
zlabel = param.String(default=None, doc="""
An explicit override of the z-axis label, if set takes precedence
over the dimension label.""")
zticks = param.Parameter(default=None, doc="""
Ticks along z-axis specified as an integer, explicit list of
tick locations, list of tuples containing the locations.""")
trace_kwargs = {}
_style_key = None
# Whether vectorized styles are applied per trace
_per_trace = False
# Declare which styles cannot be mapped to a non-scalar dimension
_nonvectorized_styles = []
def initialize_plot(self, ranges=None):
"""
Initializes a new plot object with the last available frame.
"""
# Get element key and ranges for frame
fig = self.generate_plot(self.keys[-1], ranges)
self.drawn = True
return fig
def generate_plot(self, key, ranges, element=None):
if element is None:
element = self._get_frame(key)
if element is None:
return self.handles['fig']
# Set plot options
plot_opts = self.lookup_options(element, 'plot').options
self.param.set_param(**{k: v for k, v in plot_opts.items()
if k in self.params()})
# Get ranges
ranges = self.compute_ranges(self.hmap, key, ranges)
ranges = util.match_spec(element, ranges)
# Get style
self.style = self.lookup_options(element, 'style')
style = self.style[self.cyclic_index]
# Get data and options and merge them
data = self.get_data(element, ranges, style)
opts = self.graph_options(element, ranges, style)
graphs = []
for i, d in enumerate(data):
# Initialize graph
graph = self.init_graph(d, opts, index=i)
graphs.append(graph)
self.handles['graphs'] = graphs
# Initialize layout
layout = self.init_layout(key, element, ranges)
self.handles['layout'] = layout
# Create figure and return it
self.drawn = True
fig = dict(data=graphs, layout=layout)
self.handles['fig'] = fig
return fig
def graph_options(self, element, ranges, style):
if self.overlay_dims:
legend = ', '.join([d.pprint_value_string(v) for d, v in
self.overlay_dims.items()])
else:
legend = element.label
opts = dict(
showlegend=self.show_legend, legendgroup=element.group,
name=legend, **self.trace_kwargs)
if self._style_key is not None:
styles = self._apply_transforms(element, ranges, style)
opts[self._style_key] = {STYLE_ALIASES.get(k, k): v
for k, v in styles.items()}
else:
opts.update({STYLE_ALIASES.get(k, k): v
for k, v in style.items() if k != 'cmap'})
return opts
def init_graph(self, data, options, index=0):
trace = dict(options)
for k, v in data.items():
if k in trace and isinstance(trace[k], dict):
trace[k].update(v)
else:
trace[k] = v
if self._style_key and self._per_trace:
vectorized = {k: v for k, v in options[self._style_key].items()
if isinstance(v, np.ndarray)}
trace[self._style_key] = dict(trace[self._style_key])
for s, val in vectorized.items():
trace[self._style_key][s] = val[index]
return trace
def get_data(self, element, ranges, style):
return []
def get_aspect(self, xspan, yspan):
"""
Computes the aspect ratio of the plot
"""
return self.width/self.height
def _get_axis_dims(self, element):
"""Returns the dimensions corresponding to each axis.
Should return a list of dimensions or list of lists of
dimensions, which will be formatted to label the axis
and to link axes.
"""
dims = element.dimensions()[:3]
pad = [None]*max(3-len(dims), 0)
return dims + pad
def _apply_transforms(self, element, ranges, style):
new_style = dict(style)
for k, v in dict(style).items():
if isinstance(v, util.basestring):
if k == 'marker' and v in 'xsdo':
continue
elif v in element:
v = dim(v)
elif any(d==v for d in self.overlay_dims):
v = dim([d for d in self.overlay_dims if d==v][0])
if not isinstance(v, dim):
continue
elif (not v.applies(element) and v.dimension not in self.overlay_dims):
new_style.pop(k)
self.warning('Specified %s dim transform %r could not be applied, as not all '
'dimensions could be resolved.' % (k, v))
continue
if len(v.ops) == 0 and v.dimension in self.overlay_dims:
val = self.overlay_dims[v.dimension]
else:
val = v.apply(element, ranges=ranges, flat=True)
if (not util.isscalar(val) and len(util.unique_array(val)) == 1
and not 'color' in k):
val = val[0]
if not util.isscalar(val):
if k in self._nonvectorized_styles:
element = type(element).__name__
raise ValueError('Mapping a dimension to the "{style}" '
'style option is not supported by the '
'{element} element using the {backend} '
'backend. To map the "{dim}" dimension '
'to the {style} use a groupby operation '
'to overlay your data along the dimension.'.format(
style=k, dim=v.dimension, element=element,
backend=self.renderer.backend))
# If color is not valid colorspec add colormapper
numeric = isinstance(val, np.ndarray) and val.dtype.kind in 'uifMm'
if ('color' in k and isinstance(val, np.ndarray) and numeric):
copts = self.get_color_opts(v, element, ranges, style)
new_style.pop('cmap', None)
new_style.update(copts)
new_style[k] = val
return new_style
def init_layout(self, key, element, ranges):
el = element.traverse(lambda x: x, [Element])
el = el[0] if el else element
extent = self.get_extents(element, ranges)
if len(extent) == 4:
l, b, r, t = extent
else:
l, b, z0, r, t, z1 = extent
options = {}
dims = self._get_axis_dims(el)
if len(dims) > 2:
xdim, ydim, zdim = dims
else:
xdim, ydim = dims
zdim = None
xlabel, ylabel, zlabel = self._get_axis_labels(dims)
if self.invert_axes:
xlabel, ylabel = ylabel, xlabel
ydim, xdim = xdim, ydim
l, b, r, t = b, l, t, r
if 'x' not in self.labelled:
xlabel = ''
if 'y' not in self.labelled:
ylabel = ''
if 'z' not in self.labelled:
zlabel = ''
if xdim:
xrange = [r, l] if self.invert_xaxis else [l, r]
xaxis = dict(range=xrange, title=xlabel)
if self.logx:
xaxis['type'] = 'log'
self._get_ticks(xaxis, self.xticks)
else:
xaxis = {}
if ydim:
yrange = [t, b] if self.invert_yaxis else [b, t]
yaxis = dict(range=yrange, title=ylabel)
if self.logy:
yaxis['type'] = 'log'
self._get_ticks(yaxis, self.yticks)
else:
yaxis = {}
if self.projection == '3d':
scene = dict(xaxis=xaxis, yaxis=yaxis)
if zdim:
zrange = [z1, z0] if self.invert_zaxis else [z0, z1]
zaxis = dict(range=zrange, title=zlabel)
if self.logz:
zaxis['type'] = 'log'
self._get_ticks(zaxis, self.zticks)
scene['zaxis'] = zaxis
if self.aspect == 'cube':
scene['aspectmode'] = 'cube'
else:
scene['aspectmode'] = 'manual'
scene['aspectratio'] = self.aspect
options['scene'] = scene
else:
l, b, r, t = self.margins
options['xaxis'] = xaxis
options['yaxis'] = yaxis
options['margin'] = dict(l=l, r=r, b=b, t=t, pad=4)
return dict(width=self.width, height=self.height,
title=self._format_title(key, separator=' '),
plot_bgcolor=self.bgcolor, **options)
def _get_ticks(self, axis, ticker):
axis_props = {}
if isinstance(ticker, (tuple, list)):
if all(isinstance(t, tuple) for t in ticker):
ticks, labels = zip(*ticker)
labels = [l if isinstance(l, util.basestring) else str(l)
for l in labels]
axis_props['tickvals'] = ticks
axis_props['ticktext'] = labels
else:
axis_props['tickvals'] = ticker
axis.update(axis_props)
def update_frame(self, key, ranges=None, element=None):
"""
Updates an existing plot with data corresponding
to the key.
"""
self.generate_plot(key, ranges, element)
class ColorbarPlot(ElementPlot):
clim = param.NumericTuple(default=(np.nan, np.nan), length=2, doc="""
User-specified colorbar axis range limits for the plot, as a tuple (low,high).
If specified, takes precedence over data and dimension ranges.""")
colorbar = param.Boolean(default=False, doc="""
Whether to display a colorbar.""")
color_levels = param.ClassSelector(default=None, class_=(int, list), doc="""
Number of discrete colors to use when colormapping or a set of color
intervals defining the range of values to map each color to.""")
colorbar_opts = param.Dict(default={}, doc="""
Allows setting including borderwidth, showexponent, nticks,
outlinecolor, thickness, bgcolor, outlinewidth, bordercolor,
ticklen, xpad, ypad, tickangle...""")
symmetric = param.Boolean(default=False, doc="""
Whether to make the colormap symmetric around zero.""")
def get_color_opts(self, eldim, element, ranges, style):
opts = {}
dim_name = dim_range_key(eldim)
if self.colorbar:
if isinstance(eldim, dim):
title = str(eldim) if eldim.ops else str(eldim)[1:-1]
else:
title = eldim.pprint_label
opts['colorbar'] = dict(title=title, **self.colorbar_opts)
else:
opts['showscale'] = False
if eldim:
auto = False
if util.isfinite(self.clim).all():
cmin, cmax = self.clim
elif dim_name in ranges:
cmin, cmax = ranges[dim_name]['combined']
elif isinstance(eldim, dim):
cmin, cmax = np.nan, np.nan
auto = True
else:
cmin, cmax = element.range(dim_name)
if self.symmetric:
cabs = np.abs([cmin, cmax])
cmin, cmax = -cabs.max(), cabs.max()
else:
auto = True
cmin, cmax = None, None
cmap = style.pop('cmap', 'viridis')
colorscale = get_colorscale(cmap, self.color_levels, cmin, cmax)
if cmin is not None:
opts['cmin'] = cmin
if cmax is not None:
opts['cmax'] = cmax
opts['cauto'] = auto
opts['colorscale'] = colorscale
return opts
class OverlayPlot(GenericOverlayPlot, ElementPlot):
_propagate_options = [
'width', 'height', 'xaxis', 'yaxis', 'labelled', 'bgcolor',
'invert_axes', 'show_frame', 'show_grid', 'logx', 'logy',
'xticks', 'toolbar', 'yticks', 'xrotation', 'yrotation',
'invert_xaxis', 'invert_yaxis', 'sizing_mode', 'title', 'title_format',
'padding', 'xlabel', 'ylabel', 'zlabel', 'xlim', 'ylim', 'zlim']
def initialize_plot(self, ranges=None):
"""
Initializes a new plot object with the last available frame.
"""
# Get element key and ranges for frame
return self.generate_plot(list(self.hmap.data.keys())[0], ranges)
def generate_plot(self, key, ranges, element=None):
if element is None:
element = self._get_frame(key)
items = [] if element is None else list(element.data.items())
# Update plot options
plot_opts = self.lookup_options(element, 'plot').options
inherited = self._traverse_options(element, 'plot',
self._propagate_options,
defaults=False)
plot_opts.update(**{k: v[0] for k, v in inherited.items() if k not in plot_opts})
self.set_param(**plot_opts)
ranges = self.compute_ranges(self.hmap, key, ranges)
figure = None
for okey, subplot in self.subplots.items():
if element is not None and subplot.drawn:
idx, spec, exact = dynamic_update(self, subplot, okey, element, items)
if idx is not None:
_, el = items.pop(idx)
else:
el = None
else:
el = None
fig = subplot.generate_plot(key, ranges, el)
if figure is None:
figure = fig
else:
merge_figure(figure, fig)
layout = self.init_layout(key, element, ranges)
figure['layout'].update(layout)
self.drawn = True
self.handles['fig'] = figure
return figure
def update_frame(self, key, ranges=None, element=None):
reused = isinstance(self.hmap, DynamicMap) and self.overlaid
if not reused and element is None:
element = self._get_frame(key)
elif element is not None:
self.current_frame = element
self.current_key = key
items = [] if element is None else list(element.data.items())
# Instantiate dynamically added subplots
for k, subplot in self.subplots.items():
# If in Dynamic mode propagate elements to subplots
if not (isinstance(self.hmap, DynamicMap) and element is not None):
continue
idx, _, _ = dynamic_update(self, subplot, k, element, items)
if idx is not None:
items.pop(idx)
if isinstance(self.hmap, DynamicMap) and items:
self._create_dynamic_subplots(key, items, ranges)
self.generate_plot(key, ranges, element)
| bsd-3-clause |
CSCfi/antero | pdi_integrations/arvo/python_scripts/get_arvo_vastaukset.py | 1 | 5436 | import requests
import os
from pandas.io.json import json_normalize
import base64
## import API use key, API user and base URL from Jenkins variables
try:
api_key = os.environ['AUTH_API_KEY']
api_user = os.environ['AUTH_API_USER']
base_url = os.environ['BASE_URL']
env = os.environ['ENV']
except KeyError:
print("One or more Jenkins variables are missing. Cannot continue ETL-job.")
exit(1)
vastaukset=[]
urls = []
url = "https://"+base_url+"/api/export/v1/vastaukset?limit=50000"
reqheaders = {'Content-Type': 'application/json'}
reqheaders['Accept'] = 'application/json'
csv_path = "d:/pdi_integrations/data/arvo/vastaukset.csv"
### encode API user and API key tothe request headers
tmp = "%s:%s" % (api_user, api_key)
reqheaders['Authorization'] = "Basic %s" % base64.b64encode(tmp.encode('utf-8')).decode('utf-8')
#set username and counter and i value for id column
username = os.environ['USERNAME']
i = 1
def keycheck(x,y):
if x in y:
return y[x]
else:
return None
def makerow_vastaukset():
return {
"id":None,
"vastausid": None,
"monivalintavaihtoehto_fi": None,
"monivalintavaihtoehto_sv": None,
"monivalintavaihtoehto_en": None,
"vastaajaid": None,
"kysymysid": None,
"kyselykertaid": None,
"koulutustoimija": None,
"numerovalinta": None,
"kyselyid": None,
"vastaajatunnusid": None,
"vapaateksti": None,
"vaihtoehto": None,
"vastausaika": None,
"source": url,
"loadtime": None,
"username": username
}
while url != None: ## The url is not null
response = requests.get(url, headers=reqheaders).json()
for vastaus in response['data']:
row = makerow_vastaukset()
row["id"] = i
row["vastausid"] = keycheck("vastausid",vastaus)
row["monivalintavaihtoehto_fi"] = keycheck("monivalintavaihtoehto_fi",vastaus)
row["monivalintavaihtoehto_sv"] = keycheck("monivalintavaihtoehto_sv",vastaus)
row["monivalintavaihtoehto_en"] = keycheck("monivalintavaihtoehto_en",vastaus)
row["vastaajaid"] = keycheck("vastaajaid",vastaus)
row["kysymysid"] = keycheck("kysymysid",vastaus)
row["kyselykertaid"] = keycheck("kyselykertaid",vastaus)
row["koulutustoimija"] = keycheck("koulutustoimija",vastaus)
#row["numerovalinta"] = keycheck("numerovalinta",vastaus)
if "numerovalinta" in vastaus:
if vastaus["numerovalinta"] == None:
row["numerovalinta"] = -1
else:
row["numerovalinta"] = vastaus["numerovalinta"]
else:
row["numerovalinta"] = -1
row["kyselyid"] = keycheck("kyselyid",vastaus)
row["vastaajatunnusid"] = keycheck("vastaajatunnusid",vastaus)
row["vapaateksti"] = keycheck("vapaateksti",vastaus)
row["vaihtoehto"] = keycheck("vaihtoehto",vastaus)
row["vastausaika"] = keycheck("vastausaika",vastaus)
vastaukset.append(row)
#this is for appending data to csv in the secont for-loop cycle
# DATA to csv for import to MSSQL - can be used also for BULK inserting
if i == 1:
data = json_normalize(vastaukset)
data.to_csv(path_or_buf=csv_path, sep='╡', na_rep='',
header=True, index=False, mode='w', encoding='utf-8', quoting=0,
quotechar='"', line_terminator='\n' , escapechar='$', columns = ["id",'vastausid',
'monivalintavaihtoehto_fi', 'monivalintavaihtoehto_sv','monivalintavaihtoehto_en','vastaajaid','kysymysid',
'kyselykertaid','koulutustoimija','numerovalinta','kyselyid','vastaajatunnusid','vapaateksti','loadtime','source',
'username','vaihtoehto', 'vastausaika'])
i+= 1
#add chunk of vastaus to data and then csv
else:
i+= 1
# DATA to csv for import to MSSQL - can be used also for BULK inserting
data = json_normalize(vastaukset)
data.to_csv(path_or_buf=csv_path, sep='╡', na_rep='',
header=False, index=False, mode='a', encoding='utf-8', quoting=0,
quotechar='"', line_terminator='\n' , escapechar='$', columns = ["id",'vastausid',
'monivalintavaihtoehto_fi', 'monivalintavaihtoehto_sv','monivalintavaihtoehto_en','vastaajaid','kysymysid',
'kyselykertaid','koulutustoimija','numerovalinta','kyselyid','vastaajatunnusid','vapaateksti','loadtime','source',
'username','vaihtoehto', 'vastausaika'])
#for debugging
#print (i-1, " rows exported to csv" )
#reset vastaukset
vastaukset= []
url = response['pagination']['next_url']
urls.append(url)
print (i-1, " rows exported to csv" )
print ("The End")
'''
#append the rest of vastauset into csv
data = json_normalize(vastaukset)
# DATA to csv for import to MSSQL - can be used also for BULK inserting
data.to_csv(path_or_buf=csv_path, sep='|', na_rep='',
header=False, index=False, mode='a', encoding='utf-8', quoting=0,
quotechar='"', line_terminator='\n' , escapechar='$', columns = ["id",'vastausid',
'monivalintavaihtoehto_fi', 'monivalintavaihtoehto_sv','monivalintavaihtoehto_en','vastaajaid','kysymysid',
'kyselykertaid','koulutustoimija','numerovalinta','kyselyid','vastaajatunnusid','vapaateksti','loadtime','source',
'username','vaihtoehto', 'vastausaika'])
''' | mit |
naripok/cryptotrader | cryptotrader/datafeed.py | 1 | 28137 | from functools import wraps as _wraps
from itertools import chain as _chain
import json
from .utils import convert_to, Logger, dec_con
from decimal import Decimal
import pandas as pd
from time import sleep
from datetime import datetime, timezone, timedelta
import zmq
import threading
from multiprocessing import Process
from .exceptions import *
from cryptotrader.utils import send_email
debug = True
# Base classes
class ExchangeConnection(object):
# Feed methods
@property
def balance(self):
return NotImplementedError("This class is not intended to be used directly.")
def returnBalances(self):
return NotImplementedError("This class is not intended to be used directly.")
def returnFeeInfo(self):
return NotImplementedError("This class is not intended to be used directly.")
def returnCurrencies(self):
return NotImplementedError("This class is not intended to be used directly.")
def returnChartData(self, currencyPair, period, start=None, end=None):
return NotImplementedError("This class is not intended to be used directly.")
# Trade execution methods
def sell(self, currencyPair, rate, amount, orderType=False):
return NotImplementedError("This class is not intended to be used directly.")
def buy(self, currencyPair, rate, amount, orderType=False):
return NotImplementedError("This class is not intended to be used directly.")
def pair_reciprocal(self, df):
df[['open', 'high', 'low', 'close']] = df.apply(
{col: lambda x: str((Decimal('1') / convert_to.decimal(x)).quantize(Decimal('0E-8')))
for col in ['open', 'low', 'high', 'close']}, raw=True).rename(columns={'low': 'high',
'high': 'low'}
)
return df.rename(columns={'quoteVolume': 'volume', 'volume': 'quoteVolume'})
## Feed daemon
# Server
class FeedDaemon(Process):
"""
Data Feed server
"""
def __init__(self, api={}, addr='ipc:///tmp/feed.ipc', n_workers=8, email={}):
"""
:param api: dict: exchange name: api instance
:param addr: str: client side address
:param n_workers: int: n threads
"""
super(FeedDaemon, self).__init__()
self.api = api
self.email = email
self.context = zmq.Context()
self.n_workers = n_workers
self.addr = addr
self.MINUTE, self.HOUR, self.DAY = 60, 60 * 60, 60 * 60 * 24
self.WEEK, self.MONTH = self.DAY * 7, self.DAY * 30
self.YEAR = self.DAY * 365
self._nonce = int("{:.6f}".format(datetime.utcnow().timestamp()).replace('.', ''))
@property
def nonce(self):
""" Increments the nonce"""
self._nonce += 33
return self._nonce
def handle_req(self, req):
req = req.split(' ')
if req[0] == '' or len(req) == 1:
return False
elif len(req) == 2:
return req[0], req[1]
else:
# Candle data
if req[1] == 'returnChartData':
if req[4] == 'None':
req[4] = datetime.utcnow().timestamp() - self.DAY
if req[5] == 'None':
req[5] = datetime.utcnow().timestamp()
call = (
req[0],
req[1],
{
'currencyPair': str(req[2]).upper(),
'period': str(req[3]),
'start': str(req[4]),
'end': str(req[5])
}
)
return call
if req[1] == 'returnTradeHistory':
args = {'currencyPair': str(req[2]).upper()}
if req[3] != 'None':
args['start'] = req[3]
if req[4] != 'None':
args['end'] = req[4]
return req[0], req[1], args
# Buy and sell orders
if req[1] == 'buy' or req[1] == 'sell':
args = {
'currencyPair': str(req[2]).upper(),
'rate': str(req[3]),
'amount': str(req[4]),
}
# order type specified?
try:
possTypes = ['fillOrKill', 'immediateOrCancel', 'postOnly']
# check type
if not req[5] in possTypes:
raise ExchangeError('Invalid orderType')
args[req[5]] = 1
except IndexError:
pass
return req[0], req[1], args
if req[1] == 'returnDepositsWithdrawals':
args = {}
if req[2] != 'None':
args['start'] = req[2]
if req[3] != 'None':
args['end'] = req[3]
return req[0], req[1], args
def worker(self):
# Init socket
sock = self.context.socket(zmq.REP)
sock.connect("inproc://workers.inproc")
while True:
try:
# Wait for request
req = sock.recv_string()
Logger.info(FeedDaemon.worker, req)
# Handle request
call = self.handle_req(req)
# Send request to api
if call:
try:
self.api[call[0]].nonce = self.nonce
rep = self.api[call[0]].__call__(*call[1:])
except ExchangeError as e:
rep = e.__str__()
Logger.error(FeedDaemon.worker, "Exchange error: %s\n%s" % (req, rep))
except DataFeedException as e:
rep = e.__str__()
Logger.error(FeedDaemon.worker, "DataFeedException: %s\n%s" % (req, rep))
if debug:
Logger.debug(FeedDaemon.worker, "Debug: %s" % req)
# send reply back to client
sock.send_json(rep)
else:
raise TypeError("Bad call format.")
except Exception as e:
send_email(self.email, "FeedDaemon Error", e)
sock.close()
raise e
def run(self):
try:
Logger.info(FeedDaemon, "Starting Feed Daemon...")
# Socket to talk to clients
clients = self.context.socket(zmq.ROUTER)
clients.bind(self.addr)
# Socket to talk to workers
workers = self.context.socket(zmq.DEALER)
workers.bind("inproc://workers.inproc")
# Launch pool of worker threads
for i in range(self.n_workers):
thread = threading.Thread(target=self.worker, args=())
thread.start()
Logger.info(FeedDaemon.run, "Feed Daemon running. Serving on %s" % self.addr)
zmq.proxy(clients, workers)
except KeyboardInterrupt:
clients.close()
workers.close()
self.context.term()
# Client
class DataFeed(ExchangeConnection):
"""
Data feeder for backtesting with TradingEnvironment.
"""
# TODO WRITE TESTS
retryDelays = [2 ** i for i in range(8)]
def __init__(self, exchange='', addr='ipc:///tmp/feed.ipc', timeout=30):
"""
:param period: int: Data sampling period
:param pairs: list: Pair symbols to trade
:param exchange: str: FeedDaemon exchange to query
:param addr: str: Client socked address
:param timeout: int:
"""
super(DataFeed, self).__init__()
# Sock objects
self.context = zmq.Context()
self.addr = addr
self.exchange = exchange
self.timeout = timeout * 1000
self.sock = self.context.socket(zmq.REQ)
self.sock.connect(addr)
self.poll = zmq.Poller()
self.poll.register(self.sock, zmq.POLLIN)
def __del__(self):
self.sock.close()
# Retry decorator
def retry(func):
""" Retry decorator """
@_wraps(func)
def retrying(*args, **kwargs):
problems = []
for delay in _chain(DataFeed.retryDelays, [None]):
try:
# attempt call
return func(*args, **kwargs)
# we need to try again
except DataFeedException as problem:
problems.append(problem)
if delay is None:
Logger.debug(DataFeed, problems)
raise MaxRetriesException('retryDelays exhausted ' + str(problem))
else:
# log exception and wait
Logger.debug(DataFeed, problem)
Logger.error(DataFeed, "No reply... -- delaying for %ds" % delay)
sleep(delay)
return retrying
def get_response(self, req):
req = self.exchange + ' ' + req
# Send request
try:
self.sock.send_string(req)
except zmq.ZMQError as e:
if 'Operation cannot be accomplished in current state' == e.__str__():
# If request timeout, restart socket
Logger.error(DataFeed.get_response, "%s request timeout." % req)
# Socket is confused. Close and remove it.
self.sock.setsockopt(zmq.LINGER, 0)
self.sock.close()
self.poll.unregister(self.sock)
# Create new connection
self.sock = self.context.socket(zmq.REQ)
self.sock.connect(self.addr)
self.poll.register(self.sock, zmq.POLLIN)
raise DataFeedException("Socket error. Restarting connection...")
# Get response
socks = dict(self.poll.poll(self.timeout))
if socks.get(self.sock) == zmq.POLLIN:
# If response, return
return self.sock.recv_json()
else:
# If request timeout, restart socket
Logger.error(DataFeed.get_response, "%s request timeout." % req)
# Socket is confused. Close and remove it.
self.sock.setsockopt(zmq.LINGER, 0)
self.sock.close()
self.poll.unregister(self.sock)
# Create new connection
self.sock = self.context.socket(zmq.REQ)
self.sock.connect(self.addr)
self.poll.register(self.sock, zmq.POLLIN)
raise RequestTimeoutException("%s request timedout" % req)
@retry
def returnTicker(self):
try:
rep = self.get_response('returnTicker')
assert isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnTicker")
@retry
def returnBalances(self):
"""
Return balance from exchange. API KEYS NEEDED!
:return: list:
"""
try:
rep = self.get_response('returnBalances')
assert isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnBalances")
@retry
def returnFeeInfo(self):
"""
Returns exchange fee informartion
:return:
"""
try:
rep = self.get_response('returnFeeInfo')
assert isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnFeeInfo")
@retry
def returnCurrencies(self):
"""
Return exchange currency pairs
:return: list:
"""
try:
rep = self.get_response('returnCurrencies')
assert isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnCurrencies")
@retry
def returnChartData(self, currencyPair, period, start=None, end=None):
"""
Return pair OHLC data
:param currencyPair: str: Desired pair str
:param period: int: Candle period. Must be in [300, 900, 1800, 7200, 14400, 86400]
:param start: str: UNIX timestamp to start from
:param end: str: UNIX timestamp to end returned data
:return: list: List containing desired asset data in "records" format
"""
try:
call = "returnChartData %s %s %s %s" % (str(currencyPair),
str(period),
str(start),
str(end))
rep = self.get_response(call)
if 'Invalid currency pair.' in rep:
try:
symbols = currencyPair.split('_')
pair = symbols[1] + '_' + symbols[0]
call = "returnChartData %s %s %s %s" % (str(pair),
str(period),
str(start),
str(end))
rep = json.loads(
self.pair_reciprocal(pd.DataFrame.from_records(self.get_response(call))).to_json(
orient='records'))
except Exception as e:
raise e
assert isinstance(rep, list), "returnChartData reply is not list"
assert int(rep[-1]['date']), "Bad returnChartData reply data"
assert float(rep[-1]['open']), "Bad returnChartData reply data"
assert float(rep[-1]['close']), "Bad returnChartData reply data"
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnChartData")
@retry
def returnTradeHistory(self, currencyPair='all', start=None, end=None):
try:
call = "returnTradeHistory %s %s %s" % (str(currencyPair),
str(start),
str(end))
rep = self.get_response(call)
assert isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnTradeHistory")
@retry
def returnDepositsWithdrawals(self, start=False, end=False):
try:
call = "returnDepositsWithdrawals %s %s" % (
str(start),
str(end)
)
rep = self.get_response(call)
assert isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnDepositsWithdrawals")
@retry
def sell(self, currencyPair, rate, amount, orderType=False):
try:
call = "sell %s %s %s %s" % (str(currencyPair),
str(rate),
str(amount),
str(orderType))
rep = self.get_response(call)
if 'Invalid currency pair.' in rep:
try:
symbols = currencyPair.split('_')
pair = symbols[1] + '_' + symbols[0]
call = "sell %s %s %s %s" % (str(pair),
str(rate),
str(amount),
str(orderType))
rep = self.get_response(call)
except Exception as e:
raise e
assert isinstance(rep, str) or isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.sell")
@retry
def buy(self, currencyPair, rate, amount, orderType=False):
try:
call = "buy %s %s %s %s" % (str(currencyPair),
str(rate),
str(amount),
str(orderType))
rep = self.get_response(call)
if 'Invalid currency pair.' in rep:
try:
symbols = currencyPair.split('_')
pair = symbols[1] + '_' + symbols[0]
call = "buy %s %s %s %s" % (str(pair),
str(rate),
str(amount),
str(orderType))
rep = self.get_response(call)
except Exception as e:
raise e
assert isinstance(rep, str) or isinstance(rep, dict)
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.buy")
# Test datafeeds
class BacktestDataFeed(ExchangeConnection):
"""
Data feeder for backtesting with TradingEnvironment.
"""
# TODO WRITE TESTS
def __init__(self, tapi, period, pairs=[], balance={}, load_dir=None):
super().__init__()
self.tapi = tapi
self.ohlc_data = {}
self._balance = balance
self.data_length = 0
self.load_dir = load_dir
self.tax = {'makerFee': '0.00150000',
'nextTier': '600.00000000',
'takerFee': '0.00250000',
'thirtyDayVolume': '0.00000000'}
self.pairs = pairs
self.period = period
def returnBalances(self):
return self._balance
def set_tax(self, tax):
"""
{'makerFee': '0.00150000',
'nextTier': '600.00000000',
'takerFee': '0.00250000',
'thirtyDayVolume': '0.00000000'}
:param dict:
:return:
"""
self.tax.update(tax)
def returnFeeInfo(self):
return self.tax
def returnCurrencies(self):
if self.load_dir:
try:
with open(self.load_dir + '/currencies.json') as file:
return json.load(file)
except Exception as e:
Logger.error(BacktestDataFeed.returnCurrencies, str(e.__cause__) + str(e))
return self.tapi.returnCurrencies()
else:
return self.tapi.returnCurrencies()
def download_data(self, start=None, end=None):
# TODO WRITE TEST
self.ohlc_data = {}
self.data_length = None
index = pd.date_range(start=start,
end=end,
freq="%dT" % self.period).ceil("%dT" % self.period)
for pair in self.pairs:
ohlc_df = pd.DataFrame.from_records(
self.tapi.returnChartData(
pair,
period=self.period * 60,
start=start,
end=end
),
nrows=index.shape[0]
)
i = -1
last_close = ohlc_df.at[ohlc_df.index[i], 'close']
while not dec_con.create_decimal(last_close).is_finite():
i -= 1
last_close = ohlc_df.at[ohlc_df.index[i], 'close']
# Replace missing values with last close
fill_dict = {col: last_close for col in ['open', 'high', 'low', 'close']}
fill_dict.update({'volume': '0E-16'})
self.ohlc_data[pair] = ohlc_df.fillna(fill_dict).ffill()
for key in self.ohlc_data:
if not self.data_length or self.ohlc_data[key].shape[0] < self.data_length:
self.data_length = self.ohlc_data[key].shape[0]
for key in self.ohlc_data:
if self.ohlc_data[key].shape[0] != self.data_length:
# self.ohlc_data[key] = pd.DataFrame.from_records(
# self.tapi.returnChartData(key, period=self.period * 60,
# start=self.ohlc_data[key].date.iloc[-self.data_length],
# end=end
# ),
# nrows=index.shape[0]
# )
self.ohlc_data[key] = self.ohlc_data[key].iloc[:self.data_length]
self.ohlc_data[key].set_index('date', inplace=True, drop=False)
print("%d intervals, or %d days of data at %d minutes period downloaded." % (self.data_length, (self.data_length * self.period) /\
(24 * 60), self.period))
def save_data(self, dir=None):
"""
Save data to disk
:param dir: str: directory relative to ./; eg './data/train
:return:
"""
for item in self.ohlc_data:
self.ohlc_data[item].to_json(dir+'/'+str(item)+'_'+str(self.period)+'min.json', orient='records')
def load_data(self, dir):
"""
Load data form disk.
JSON like data expected.
:param dir: str: directory relative to self.load_dir; eg: './self.load_dir/dir'
:return: None
"""
self.ohlc_data = {}
self.data_length = None
for key in self.pairs:
self.ohlc_data[key] = pd.read_json(self.load_dir + dir +'/'+str(key)+'_'+str(self.period)+'min.json', convert_dates=False,
orient='records', date_unit='s', keep_default_dates=False, dtype=False)
self.ohlc_data[key].set_index('date', inplace=True, drop=False)
if not self.data_length:
self.data_length = self.ohlc_data[key].shape[0]
else:
assert self.data_length == self.ohlc_data[key].shape[0]
def returnChartData(self, currencyPair, period, start=None, end=None):
try:
data = json.loads(self.ohlc_data[currencyPair].loc[start:end, :].to_json(orient='records'))
return data
except json.JSONDecodeError:
print("Bad exchange response.")
except AssertionError as e:
if "Invalid period" == e:
raise ExchangeError("%d invalid candle period" % period)
elif "Invalid pair" == e:
raise ExchangeError("Invalid currency pair.")
def reverse_data(self):
for df in self.ohlc_data:
self.ohlc_data.update({df:self.ohlc_data[df].reindex(index=self.ohlc_data[df].index[::-1])})
self.ohlc_data[df]['date'] = self.ohlc_data[df].index[::-1]
self.ohlc_data[df].index = self.ohlc_data[df].index[::-1]
self.ohlc_data[df] = self.ohlc_data[df].rename(columns={'close': 'open', 'open': 'close'})
class PaperTradingDataFeed(ExchangeConnection):
"""
Data feeder for paper trading with TradingEnvironment.
"""
# TODO WRITE TESTS
def __init__(self, tapi, period, pairs=[], balance={}):
super().__init__()
self.tapi = tapi
self._balance = balance
self.pairs = pairs
self.period = period
def returnBalances(self):
return self._balance
def returnFeeInfo(self):
return {'makerFee': '0.00150000',
'nextTier': '600.00000000',
'takerFee': '0.00250000',
'thirtyDayVolume': '0.00000000'}
def returnTicker(self):
return self.tapi.returnTicker()
def returnCurrencies(self):
"""
Return exchange currency pairs
:return: list:
"""
return self.tapi.returnCurrencies()
def returnChartData(self, currencyPair, period, start=None, end=None):
"""
Return pair OHLC data
:param currencyPair: str: Desired pair str
:param period: int: Candle period. Must be in [300, 900, 1800, 7200, 14400, 86400]
:param start: str: UNIX timestamp to start from
:param end: str: UNIX timestamp to end returned data
:return: list: List containing desired asset data in "records" format
"""
try:
return self.tapi.returnChartData(currencyPair, period, start=start, end=end)
except ExchangeError as error:
if 'Invalid currency pair.' == error.__str__():
try:
symbols = currencyPair.split('_')
pair = symbols[1] + '_' + symbols[0]
return json.loads(
self.pair_reciprocal(pd.DataFrame.from_records(self.tapi.returnChartData(pair, period,
start=start,
end=end
))).to_json(
orient='records'))
except Exception as e:
raise e
else:
raise error
# Live datafeeds
class PoloniexConnection(DataFeed):
def __init__(self, period, pairs=[], exchange='', addr='ipc:///tmp/feed.ipc', timeout=20):
"""
:param tapi: exchange api instance: Exchange api instance
:param period: int: Data period
:param pairs: list: Pairs to trade
"""
super().__init__(exchange, addr, timeout)
self.pairs = pairs
self.period = period
@DataFeed.retry
def returnChartData(self, currencyPair, period, start=None, end=None):
"""
Return pair OHLC data
:param currencyPair: str: Desired pair str
:param period: int: Candle period. Must be in [300, 900, 1800, 7200, 14400, 86400]
:param start: str: UNIX timestamp to start from
:param end: str: UNIX timestamp to end returned data
:return: list: List containing desired asset data in "records" format
"""
try:
call = "returnChartData %s %s %s %s" % (str(currencyPair),
str(period),
str(start),
str(end))
rep = self.get_response(call)
if 'Invalid currency pair.' in rep:
try:
symbols = currencyPair.split('_')
pair = symbols[1] + '_' + symbols[0]
call = "returnChartData %s %s %s %s" % (str(pair),
str(period),
str(start),
str(end))
rep = json.loads(
self.pair_reciprocal(
pd.DataFrame.from_records(
self.get_response(call)
)
).to_json(orient='records'))
except Exception as e:
raise e
assert isinstance(rep, list), "returnChartData reply is not list: %s" % str(rep)
assert int(rep[-1]['date']), "Bad returnChartData reply data"
assert float(rep[-1]['open']), "Bad returnChartData reply data"
assert float(rep[-1]['close']), "Bad returnChartData reply data"
return rep
except AssertionError:
raise UnexpectedResponseException("Unexpected response from DataFeed.returnChartData")
| mit |
lukeiwanski/tensorflow-opencl | tensorflow/examples/tutorials/input_fn/boston.py | 51 | 2709 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def input_fn(data_set):
feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}
labels = tf.constant(data_set[LABEL].values)
return feature_cols, labels
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.contrib.layers.real_valued_column(k)
for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Fit
regressor.fit(input_fn=lambda: input_fn(training_set), steps=5000)
# Score accuracy
ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps=1)
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions
y = regressor.predict(input_fn=lambda: input_fn(prediction_set))
# .predict() returns an iterator; convert to a list and print predictions
predictions = list(itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
zorojean/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
tody411/ImageViewerFramework | ivf/batch/base_detail_separation.py | 1 | 9968 | # -*- coding: utf-8 -*-
## @package ivf.batch.base_detail_separation
#
# ivf.batch.base_detail_separation utility package.
# @author tody
# @date 2016/02/10
import numpy as np
import cv2
import matplotlib.pyplot as plt
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import sys
from ivf.batch.batch import DatasetBatch, CharacterBatch
from ivf.io_util.image import loadNormal, loadRGBA, loadRGB, saveNormal
from ivf.cv.image import to32F, setAlpha, luminance, alpha, rgb
from ivf.np.norm import normalizeVector
from ivf.core.shader.toon import ToonShader
from ivf.core.sfs.detail_layer import baseDetailSeparationGaussian, baseDetailSeparationBilateral,\
baseDetailSeprationDOG, baseDetailSeparationMedian
from ivf.plot.window import SubplotGrid, showMaximize
from ivf.core.sfs.bump_mapping import bumpNormal, bumpMapping
from ivf.cv.normal import normalToColor, normalizeImage
from ivf.core.sfs.amg_constraints import normalConstraints
from ivf.core.solver import amg_solver
from ivf.core.sfs import amg_constraints
from ivf.core.sfs.lumo import computeNz
from ivf.core.shader.lambert import LambertShader
from ivf.ui.image_view import ImageView
from ivf.ui.editor.parameter_editor import ParameterEditor
class BaseDetailSeprationBatch(DatasetBatch, CharacterBatch):
def __init__(self, parameters, view, name="BaseDetailSepration", dataset_name="3dmodel"):
super(BaseDetailSeprationBatch, self).__init__(name, dataset_name)
self._parameters = parameters
self._parameters["sigmaSpace"].valueChanged.connect(self._computeBaseDetalSeparation)
self._parameters["sigmaRange"].valueChanged.connect(self._computeBaseDetalSeparation)
self._parameters["bumpScale"].valueChanged.connect(self._computeInitialDetailNormal)
self._view = view
self._N_32F = None
self._A_8U = None
self._N0_b_32F = None
self._N0_d_32F = None
self._N_lumo = None
self._C0_32F = None
self._D = None
self._N_b = None
self._N_b_smooth = None
self._N_d = None
self._N_d_smooth = None
def _runImp(self):
normal_data = loadNormal(self._data_file)
if normal_data is None:
return
N0_32F, A_8U = normal_data
A_32F = to32F(A_8U)
L = normalizeVector(np.array([-0.2, 0.3, 0.7]))
# C0_32F = ToonShader().diffuseShading(L, N0_32F)
C0_32F = LambertShader().diffuseShading(L, N0_32F)
self._C0_32F = C0_32F
self._loadImage()
def _computeBaseDetalSeparation(self):
sigma_space, sigma_range = self._parameters["sigmaSpace"], self._parameters["sigmaRange"]
C0_32F = self._C0_32F
I_32F = luminance(C0_32F)
B_b, D_b = baseDetailSeparationBilateral(I_32F, sigma_space=sigma_space.value(), sigma_range=sigma_range.value())
self._D = D_b
self._N_b = bumpNormal(B_b, scale=1.0, sigma=1.0)
self._N_d = bumpNormal(D_b, scale=1.0, sigma=1.0)
self._view.render(D_b)
def _computeInitialDetailNormal(self):
bump_scale = self._parameters["bumpScale"].value()
self._N_b[:, :, :2] *= bump_scale
self._N_b = normalizeImage(self._N_b, th=1.0)
self._N_d[:, :, :2] *= bump_scale
self._N_d = normalizeImage(self._N_d, th=1.0)
self._view.render(normalToColor(self._N_b))
def _computeDetailNormal(self, N0_32F):
h, w = N0_32F.shape[:2]
W_32F = np.zeros((h, w))
# sigma_d = 2.0 * np.max(N0_32F[:, :, 2])
# W_32F = 1.0 - np.exp( - (N0_32F[:, :, 2] ** 2) / (sigma_d ** 2))
W_32F = 1.0 - N0_32F[:, :, 2]
W_32F *= 1.0 / np.max(W_32F)
W_32F = W_32F ** 1.5
A_c, b_c = amg_constraints.normalConstraints(W_32F, N0_32F)
A_L = amg_constraints.laplacianMatrix((h, w))
lambda_d = 2.0
A = A_c + lambda_d * A_L
b = b_c
N_32F = amg_solver.solve(A, b).reshape(h, w, 3)
N_32F = computeNz(N_32F.reshape(-1, 3)).reshape(h, w, 3)
N_32F = normalizeImage(N_32F)
return N_32F
def computeDetailNormal(self):
self._N_b_smooth = self._computeDetailNormal(self._N_b)
self._N_d_smooth = self._computeDetailNormal(self._N_d)
def _computeLumoNormal(self):
A_8U = self._A_8U
if A_8U is None:
return
h, w = A_8U.shape[:2]
A_c, b_c = amg_constraints.silhouetteConstraints(A_8U)
A_L = amg_constraints.laplacianMatrix((h, w))
A = 3.0 * A_c + A_L
b = 3.0 * b_c
N_32F = amg_solver.solve(A, b).reshape(h, w, 3)
N_32F = computeNz(N_32F.reshape(-1, 3)).reshape(h, w, 3)
N_32F = normalizeImage(N_32F)
self._N_lumo = np.array(N_32F)
def computeInitialNormal(self):
if self._N_lumo.shape != self._N_b_smooth.shape:
return
self._N0_b_32F = normalizeImage(bumpMapping(np.array(self._N_lumo), self._N_b_smooth))
self._N0_d_32F = normalizeImage(bumpMapping(np.array(self._N_lumo), self._N_d_smooth))
def _loadImage(self):
C0_32F = self._C0_32F
I_32F = luminance(C0_32F)
self._view.render(I_32F)
return
# B_g, D_g = baseDetailSeparationGaussian(I_32F, sigma=10.0)
# B_b, D_b = baseDetailSeparationBilateral(I_32F, sigma_space=5.0, sigma_range=0.3)
# B_dog, D_dog = baseDetailSeprationDOG(I_32F, sigma=2.0)
# B_med, D_med = baseDetailSeparationMedian(I_32F, ksize=21)
#
# separations = [#["Gaussian", B_g, D_g],
# #["DOG", B_dog, D_dog],
# #["Median", B_med, D_med],
# ["Bilateral", B_b, D_b]
# ]
# for separation in separations:
# N0_32F = bumpNormal(separation[-1], scale=50.0, sigma=3.0)
# separation.append(normalToColor(N0_32F))
#
# th = 0.1
# h, w = N0_32F.shape[:2]
# W_32F = np.zeros((h, w))
#
# W_32F = 1.0 - N0_32F[:, :, 2]
# W_32F *= 1.0 / np.max(W_32F)
#
# W_32F = W_32F ** 1.5
# #W_32F[W_32F < th] = 0.0
# #W_32F[W_32F > th] = 1.0
#
# A_c, b_c = amg_constraints.normalConstraints(W_32F, N0_32F)
#
# A_L = amg_constraints.laplacianMatrix((h, w))
# A = 3.0 * A_c + A_L
# b = 3.0 * b_c
#
# N_32F = amg_solver.solve(A, b).reshape(h, w, 3)
# N_32F = computeNz(N_32F.reshape(-1, 3)).reshape(h, w, 3)
# N_32F = normalizeImage(N_32F)
# separation.append(normalToColor(N_32F, self._A_8U))
#
# self._N_32F = N_32F
# C0_32F = LambertShader().diffuseShading(L, N0_32F)
# fig, axes = plt.subplots(figsize=(12, 8))
# font_size = 15
# fig.subplots_adjust(left=0.05, right=0.95, top=0.9, hspace=0.12, wspace=0.05)
# fig.suptitle(self.name(), fontsize=font_size)
#
# num_rows = 1
# num_cols = len(separations) + 1
# plot_grid = SubplotGrid(num_rows, num_cols)
#
# for separation in separations:
# plot_grid.showImage(separation[-2], separation[0])
# plot_grid.showImage(separation[-1], separation[0])
#
# showMaximize()
def _runCharacterImp(self):
# if self._character_name != "XMen":
# return
self._runLayer(self.fullLayerFile())
# for layer_file in self.layerFiles():
# self._runLayer(layer_file)
def _runLayer(self, layer_file):
if layer_file is None:
return
C0_8U = loadRGBA(layer_file)
if C0_8U is None:
C0_8U = loadRGB(layer_file)
if C0_8U is None:
return
h, w = C0_8U.shape[:2]
w_low = 1024
h_low = w_low * h / w
# C0_8U = cv2.resize(C0_8U, (w_low, h_low))
A_8U = alpha(C0_8U)
self._A_8U = A_8U
C0_32F = to32F(rgb(C0_8U))
if A_8U is not None:
C0_32F[A_8U < 0.9 * np.max(A_8U), :] = np.array([0, 0, 0])
self._C0_32F = C0_32F
self._loadImage()
self._computeBaseDetalSeparation()
self._computeInitialDetailNormal()
self.computeDetailNormal()
self._computeLumoNormal()
self.computeInitialNormal()
# plt.savefig(self.characterResultFile("BumpNormal.png"))
if self._N_b_smooth is not None:
self.cleanCharacterResultDir()
saveNormal(self.characterResultFile("N_b.png"), self._N_b, A_8U)
saveNormal(self.characterResultFile("N_b_smooth.png"), self._N_b_smooth, A_8U)
saveNormal(self.characterResultFile("N_d.png"), self._N_d, A_8U)
saveNormal(self.characterResultFile("N_d_smooth.png"), self._N_d_smooth, A_8U)
saveNormal(self.characterResultFile("N_lumo.png"), self._N_lumo, A_8U)
saveNormal(self.characterResultFile("N0_b.png"), self._N0_b_32F, A_8U)
saveNormal(self.characterResultFile("N0_d.png"), self._N0_d_32F, A_8U)
def finishCharacter(self):
if self._character_name != "":
pass
self.runCharacter()
if __name__ == '__main__':
app = QApplication(sys.argv)
view = ImageView()
view.showMaximized()
editor = ParameterEditor()
editor.addFloatParameter("sigmaSpace", min_val=1.0, max_val=30.0, default_val=5.0)
editor.addFloatParameter("sigmaRange", min_val=0.0, max_val=1.0, default_val=0.3)
editor.addFloatParameter("bumpScale", min_val=10.0, max_val=50.0, default_val=20.0)
batch = BaseDetailSeprationBatch(editor.parameters(), view)
editor.addButton("Compute Silhouette Normal", cmd_func=batch.computeInitialNormal)
editor.addButton("Compute Detail Normal", cmd_func=batch.computeDetailNormal)
editor.show()
#view.setReturnCallback(batch.finishCharacter)
batch.runCharacters()
sys.exit(app.exec_())
| mit |
florian-f/sklearn | sklearn/metrics/pairwise.py | 3 | 28439 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.metrics.pairwise` submodule implements utilities to evaluate
pairwise distances or affinity of sets of samples.
This module contains both distance metrics and kernels. A brief summary is
given on the two here.
Distance metrics are a function d(a, b) such that d(a, b) < d(a, c) if objects
a and b are considered "more similar" to objects a and c. Two objects exactly
alike would have a distance of zero.
One of the most popular examples is Euclidean distance.
To be a 'true' metric, it must obey the following four conditions::
1. d(a, b) >= 0, for all a and b
2. d(a, b) == 0, if and only if a = b, positive definiteness
3. d(a, b) == d(b, a), symmetry
4. d(a, c) <= d(a, b) + d(b, c), the triangle inequality
Kernels are measures of similarity, i.e. ``s(a, b) > s(a, c)``
if objects ``a`` and ``b`` are considered "more similar" to objects
``a`` and ``c``. A kernel must also be positive semi-definite.
There are a number of ways to convert between a distance metric and a
similarity measure, such as a kernel. Let D be the distance, and S be the
kernel:
1. ``S = np.exp(-D * gamma)``, where one heuristic for choosing
``gamma`` is ``1 / num_features``
2. ``S = 1. / (D / np.max(D))``
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD Style.
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import atleast2d_or_csr
from ..utils import gen_even_slices
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import array2d
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast
# Utility Functions
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
Returns
-------
safe_X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
An array equal to X, guarenteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
An array equal to Y if Y was not None, guarenteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
if Y is X or Y is None:
X = Y = atleast2d_or_csr(X, dtype=np.float)
else:
X = atleast2d_or_csr(X, dtype=np.float)
Y = atleast2d_or_csr(Y, dtype=np.float)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
# Distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two main advantages. First, it is computationally
efficient when dealing with sparse data. Second, if x varies but y
remains unchanged, then the right-most dot-product `dot(y, y)` can be
pre-computed.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_1, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_2, n_features]
Y_norm_squared : array-like, shape = [n_samples_2], optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape = [n_samples_1, n_samples_2]
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if issparse(X):
XX = X.multiply(X).sum(axis=1)
else:
XX = np.sum(X * X, axis=1)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is None:
if issparse(Y):
# scipy.sparse matrices don't have element-wise scalar
# exponentiation, and tocsr has a copy kwarg only on CSR matrices.
YY = Y.copy() if isinstance(Y, csr_matrix) else Y.tocsr()
YY.data **= 2
YY = np.asarray(YY.sum(axis=1)).T
else:
YY = np.sum(Y ** 2, axis=1)[np.newaxis, :]
else:
YY = atleast2d_or_csr(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
# TODO: a faster Cython implementation would do the clipping of negative
# values in a single pass over the output matrix.
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances)
def manhattan_distances(X, Y=None, sum_over_features=True):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise l1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
if issparse(X) or issparse(Y):
raise ValueError("manhattan_distance does not support sparse"
" matrices.")
X, Y = check_pairwise_arrays(X, Y)
D = np.abs(X[:, np.newaxis, :] - Y[np.newaxis, :, :])
if sum_over_features:
D = np.sum(D, axis=2)
else:
D = D.reshape((-1, X.shape[1]))
return D
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-γ ||x-y||²)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array_like
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = linear_kernel(X_normalized, Y_normalized)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -∑ᵢ [(xᵢ - yᵢ)² / (xᵢ + yᵢ)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferrable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
### we don't use check_pairwise to preserve float32.
if Y is None:
# optimize this case!
X = array2d(X)
if X.dtype != np.float32:
X.astype(np.float)
Y = X
if (X < 0).any():
raise ValueError("X contains negative values.")
else:
X = array2d(X)
Y = array2d(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
if X.dtype != np.float32 or Y.dtype != np.float32:
# if not both are 32bit float, convert to 64bit float
X = X.astype(np.float)
Y = Y.astype(np.float)
if (X < 0).any():
raise ValueError("X contains negative values.")
if (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-γ ∑ᵢ [(xᵢ - yᵢ)² / (xᵢ + yᵢ)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'cityblock': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatability with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Please note that support for sparse matrices is currently limited to those
metrics listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
Valid values for metric are:
- from scikit-learn: ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
Note in the case of 'euclidean' and 'cityblock' (which are valid
scipy.spatial.distance metrics), the values will use the scikit-learn
implementation, which is faster and has support for sparse matrices.
For a verbose description of the metrics from scikit-learn, see the
__doc__ of the sklearn.pairwise.distance_metrics function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatability with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise ValueError("Unknown kernel %r" % metric)
| bsd-3-clause |
altairpearl/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
apevec/RMS | RMS/Routines/Image.py | 1 | 20077 | """ Image processing routines. """
from __future__ import print_function, division, absolute_import
import os
import math
import numpy as np
import scipy.misc
# Check which imread funtion to use
try:
imread = scipy.misc.imread
imsave = scipy.misc.imsave
USING_SCIPY_IMREAD = True
except AttributeError:
import imageio
imread = imageio.imread
imsave = imageio.imwrite
USING_SCIPY_IMREAD = False
from RMS.Decorators import memoizeSingle
# Cython init
import pyximport
pyximport.install(setup_args={'include_dirs':[np.get_include()]})
import RMS.Routines.MorphCy as morph
from RMS.Routines.BinImageCy import binImage as binImageCy
def loadImage(img_path, flatten=-1):
""" Load the given image. Handle loading it using different libraries.
Arguments:
img_path: [str] Path to the image.
Keyword arguments:
flatten: [int] Convert color image to grayscale if -1. -1 by default.
"""
if USING_SCIPY_IMREAD:
img = imread(img_path, flatten)
else:
img = imread(img_path, as_gray=bool(flatten))
return img
def saveImage(img_path, img):
""" Save image to disk.
Arguments:
img_path: [str] Image path.
img: [ndarray] Image as numpy array.
"""
imsave(img_path, img)
def binImage(img, bin_factor, method='avg'):
""" Bin the given image. The binning has to be a factor of 2, e.g. 2, 4, 8, etc.
This is just a wrapper function for a cythonized function that does the binning.
Arguments:
img: [ndarray] Numpy array representing an image.
bin_factor: [int] The binning factor. Has to be a factor of 2 (e.g. 2, 4, 8).
Keyword arguments:
method: [str] Binning method. 'avg' by default.
- 'sum' will sum all values in the binning window and assign it to the new pixel.
- 'avg' will take the average.
Return:
out_img: [ndarray] Binned image.
"""
input_type = img.dtype
# Make sure the input image is of the correct type
if img.dtype != np.uint16:
img = img.astype(np.uint16)
# Perform the binning
img = binImageCy(img, bin_factor, method=method)
# Convert the image back to the input type
img = img.astype(input_type)
return img
def thresholdImg(img, avepixel, stdpixel, k1, j1, ff=False, mask=None, mask_ave_bright=True):
""" Threshold the image with given parameters.
Arguments:
img: [2D ndarray]
avepixel: [2D ndarray]
stdpixel: [2D ndarray]
k1: [float] relative thresholding factor (how many standard deviations above mean the maxpixel image
should be)
j1: [float] absolute thresholding factor (how many minimum abuolute levels above mean the maxpixel
image should be)
Keyword arguments:
ff: [bool] If true, it indicated that the FF file is being thresholded.
mask: [ndarray] Mask image. None by default.
mask_ave_bright: [bool] Mask out regions that are 5 sigma brighter in avepixel than the mean.
This gets rid of very bright stars, saturating regions, static bright parts, etc.
Return:
[ndarray] thresholded 2D image
"""
# If the FF file is used, then values in max will always be larger than values in average
if ff:
img_avg_sub = img - avepixel
else:
# Subtract input image and average, making sure there are no values below 0 which will wrap around
img_avg_sub = applyDark(img, avepixel)
# Compute the thresholded image
img_thresh = img_avg_sub > (k1 * stdpixel + j1)
# Mask out regions that are very bright in avepixel
if mask_ave_bright:
# Compute the average saturation mask and mask out everything that's saturating in avepixel
ave_saturation_mask = avepixel >= np.min([np.mean(avepixel) + 5*np.std(avepixel), \
np.iinfo(avepixel.dtype).max])
# Dilate the mask 2 times
input_type = ave_saturation_mask.dtype
ave_saturation_mask = morph.morphApply(ave_saturation_mask.astype(np.uint8), [5, 5]).astype(input_type)
img_thresh = img_thresh & ~ave_saturation_mask
# If the mask was given, set all areas of the thresholded image convered by the mask to false
if mask is not None:
if img_thresh.shape == mask.img.shape:
img_thresh[mask.img == 0] = False
# The thresholded image is always 8 bit
return img_thresh.astype(np.uint8)
@memoizeSingle
def thresholdFF(ff, k1, j1, mask=None, mask_ave_bright=False):
""" Threshold the FF with given parameters.
Arguments:
ff: [FF object] input FF image object on which the thresholding will be applied
k1: [float] relative thresholding factor (how many standard deviations above mean the maxpixel image
should be)
j1: [float] absolute thresholding factor (how many minimum abuolute levels above mean the maxpixel
image should be)
Keyword arguments:
mask: [ndarray] Mask image. None by default.
mask_ave_bright: [bool] Mask out regions that are 5 sigma brighter in avepixel than the mean.
This gets rid of very bright stars, saturating regions, static bright parts, etc.
Return:
[ndarray] thresholded 2D image
"""
return thresholdImg(ff.maxpixel, ff.avepixel, ff.stdpixel, k1, j1, ff=True, mask=mask, \
mask_ave_bright=mask_ave_bright)
@np.vectorize
def gammaCorrection(intensity, gamma, bp=0, wp=255):
""" Correct the given intensity for gamma.
Arguments:
intensity: [int] Pixel intensity
gamma: [float] Gamma.
Keyword arguments:
bp: [int] Black point.
wp: [int] White point.
Return:
[float] Gamma corrected image intensity.
"""
if intensity < 0:
intensity = 0
x = (intensity - bp)/(wp - bp)
if x > 0:
# Compute the corrected intensity
return bp + (wp - bp)*(x**(1.0/gamma))
else:
return bp
def applyBrightnessAndContrast(img, brightness, contrast):
""" Applies brightness and contrast corrections to the image.
Arguments:
img: [2D ndarray] Image array.
brightness: [int] A number in the range -255 to 255.
contrast: [float] A number in the range -255 to 255.
Return:
img: [2D ndarray] Image array with the brightness applied.
"""
contrast = float(contrast)
# Compute the contrast factor
f = (259.0*(contrast + 255.0))/(255*(259 - contrast))
img_type = img.dtype
# Convert image to float
img = img.astype(np.float)
# Apply brightness
img = img + brightness
# Apply contrast
img = f*(img - 128.0) + 128.0
# Clip the values to 0-255 range
img = np.clip(img, 0, 255)
# Preserve image type
img = img.astype(img_type)
return img
def adjustLevels(img_array, minv, gamma, maxv, nbits=None, scaleto8bits=False):
""" Adjusts levels on image with given parameters.
Arguments:
img_array: [ndarray] Input image array.
minv: [int] Minimum level.
gamma: [float] gamma value
Mmaxv: [int] maximum level.
Keyword arguments:
nbits: [int] Image bit depth.
scaleto8bits: [bool] If True, the maximum value will be scaled to 255 and the image will be converted
to 8 bits.
Return:
[ndarray] Image with adjusted levels.
"""
if nbits is None:
# Get the bit depth from the image type
nbits = 8*img_array.itemsize
input_type = img_array.dtype
# Calculate maximum image level
max_lvl = 2**nbits - 1.0
# Limit the maximum level
if maxv > max_lvl:
maxv = max_lvl
# Check that the image adjustment values are in fact given
if (minv is None) or (gamma is None) or (maxv is None):
return img_array
minv = minv/max_lvl
maxv = maxv/max_lvl
interval = maxv - minv
invgamma = 1.0/gamma
# Make sure the interval is at least 10 levels of difference
if interval*max_lvl < 10:
minv *= 0.9
maxv *= 1.1
interval = maxv - minv
# Make sure the minimum and maximum levels are in the correct range
if minv < 0:
minv = 0
if maxv*max_lvl > max_lvl:
maxv = 1.0
img_array = img_array.astype(np.float64)
# Reduce array to 0-1 values
img_array = np.divide(img_array, max_lvl)
# Calculate new levels
img_array = np.divide((img_array - minv), interval)
# Cut values lower than 0
img_array[img_array < 0] = 0
img_array = np.power(img_array, invgamma)
img_array = np.multiply(img_array, max_lvl)
# Convert back to 0-maxval values
img_array = np.clip(img_array, 0, max_lvl)
# Scale the image to 8 bits so the maximum value is set to 255
if scaleto8bits:
img_array *= 255.0/np.max(img_array)
img_array = img_array.astype(np.uint8)
else:
# Convert the image back to input type
img_array = img_array.astype(input_type)
return img_array
class FlatStruct(object):
def __init__(self, flat_img, dark=None):
""" Structure containing the flat field.
Arguments:
flat_img: [ndarray] Flat field.
"""
# Convert the flat to float64
self.flat_img = flat_img.astype(np.float64)
# Store the original flat
self.flat_img_raw = np.copy(self.flat_img)
# Apply the dark, if given
self.applyDark(dark)
# Compute the flat median
self.computeAverage()
# Fix values close to 0
self.fixValues()
def applyDark(self, dark):
""" Apply a dark to the flat. """
# Apply a dark frame to the flat, if given
if dark is not None:
self.flat_img = applyDark(self.flat_img_raw, dark)
self.dark_applied = True
else:
self.flat_img = np.copy(self.flat_img_raw)
self.dark_applied = False
# Compute flat median
self.computeAverage()
# Fix values close to 0
self.fixValues()
def computeAverage(self):
""" Compute the reference level. """
# Bin the flat by a factor of 4 using the average method
flat_binned = binImage(self.flat_img, 4, method='avg')
# Take the maximum average level of pixels that are in a square of 1/4*height from the centre
radius = flat_binned.shape[0]//4
img_h_half = flat_binned.shape[0]//2
img_w_half = flat_binned.shape[1]//2
self.flat_avg = np.max(flat_binned[img_h_half-radius:img_h_half+radius, \
img_w_half-radius:img_w_half+radius])
# Make sure the self.flat_avg value is relatively high
if self.flat_avg < 1:
self.flat_avg = 1
def fixValues(self):
""" Handle values close to 0 on flats. """
# Make sure there are no values close to 0, as images are divided by flats
self.flat_img[(self.flat_img < self.flat_avg/10) | (self.flat_img < 10)] = self.flat_avg
def binFlat(self, binning_factor, binning_method):
""" Bin the flat. """
# Bin the processed flat
self.flat_img = binImage(self.flat_img, binning_factor, binning_method)
# Bin the raw flat image
self.flat_img_raw = binImage(self.flat_img_raw, binning_factor, binning_method)
def loadFlat(dir_path, file_name, dtype=None, byteswap=False, dark=None):
""" Load the flat field image.
Arguments:
dir_path: [str] Directory where the flat image is.
file_name: [str] Name of the flat field file.
Keyword arguments:
dtype: [bool] A given file type fill be force if given (e.g. np.uint16).
byteswap: [bool] Byteswap the flat image. False by default.
Return:
flat_struct: [Flat struct] Structure containing the flat field info.
"""
# Load the flat image
flat_img = loadImage(os.path.join(dir_path, file_name), -1)
# Change the file type if given
if dtype is not None:
flat_img = flat_img.astype(dtype)
# If the flat isn't a 8 bit integer, convert it to uint16
elif flat_img.dtype != np.uint8:
flat_img = flat_img.astype(np.uint16)
if byteswap:
flat_img = flat_img.byteswap()
# Init a new Flat structure
flat_struct = FlatStruct(flat_img, dark=dark)
return flat_struct
def applyFlat(img, flat_struct):
""" Apply a flat field to the image.
Arguments:
img: [ndarray] Image to flat field.
flat_struct: [Flat struct] Structure containing the flat field.
Return:
[ndarray] Flat corrected image.
"""
# Check that the input image and the flat have the same dimensions, otherwise do not apply it
if img.shape != flat_struct.flat_img.shape:
return img
input_type = img.dtype
# Apply the flat
img = flat_struct.flat_avg*img.astype(np.float64)/flat_struct.flat_img
# Limit the image values to image type range
dtype_info = np.iinfo(input_type)
img = np.clip(img, dtype_info.min, dtype_info.max)
# Make sure the output array is the same as the input type
img = img.astype(input_type)
return img
def loadDark(dir_path, file_name, dtype=None, byteswap=False):
""" Load the dark frame.
Arguments:
dir_path: [str] Path to the directory which containes the dark frame.
file_name: [str] Name of the dark frame file.
Keyword arguments:
dtype: [bool] A given file type fill be force if given (e.g. np.uint16).
byteswap: [bool] Byteswap the dark. False by default.
Return:
dark: [ndarray] Dark frame.
"""
try:
# Load the dark
dark = loadImage(os.path.join(dir_path, file_name), -1)
except OSError as e:
print('Dark could not be loaded:', e)
return None
# Change the file type if given
if dtype is not None:
dark = dark.astype(dtype)
# If the flat isn't a 8 bit integer, convert it to uint16
if dark.dtype != np.uint8:
dark = dark.astype(np.uint16)
if byteswap:
dark = dark.byteswap()
return dark
def applyDark(img, dark_img):
""" Apply the dark frame to an image.
Arguments:
img: [ndarray] Input image.
dark_img: [ndarray] Dark frame.
"""
# Check that the image sizes are the same
if img.shape != dark_img.shape:
return img
# Save input type
input_type = img.dtype
# Convert the image to integer (with negative values)
img = img.astype(np.int64)
# Subtract dark
img -= dark_img.astype(np.int64)
# Make sure there aren't any values smaller than 0
img[img < 0] = 0
# Convert the image back to the input type
img = img.astype(input_type)
return img
def deinterlaceOdd(img):
""" Deinterlaces the numpy array image by duplicating the odd frame.
"""
# Deepcopy img to new array
deinterlaced_image = np.copy(img)
deinterlaced_image[1::2, :] = deinterlaced_image[:-1:2, :]
# Move the image one row up
deinterlaced_image[:-1, :] = deinterlaced_image[1:, :]
deinterlaced_image[-1, :] = 0
return deinterlaced_image
def deinterlaceEven(img):
""" Deinterlaces the numpy array image by duplicating the even frame.
"""
# Deepcopy img to new array
deinterlaced_image = np.copy(img)
deinterlaced_image[:-1:2, :] = deinterlaced_image[1::2, :]
return deinterlaced_image
def blendLighten(arr1, arr2):
""" Blends two image array with lighen method (only takes the lighter pixel on each spot).
"""
# Store input type
input_type = arr1.dtype
arr1 = arr1.astype(np.int64)
temp = arr1 - arr2
temp[temp > 0] = 0
new_arr = arr1 - temp
new_arr = new_arr.astype(input_type)
return new_arr
def deinterlaceBlend(img):
""" Deinterlaces the image by making an odd and even frame, then blends them by lighten method.
"""
img_odd_d = deinterlaceOdd(img)
img_even = deinterlaceEven(img)
proc_img = blendLighten(img_odd_d, img_even)
return proc_img
def fillCircle(photom_mask, x_cent, y_cent, radius):
y_min = math.floor(y_cent - 1.41*radius)
y_max = math.ceil(y_cent + 1.41*radius)
if y_min < 0: y_min = 0
if y_max > photom_mask.shape[0]: y_max = photom_mask.shape[0]
x_min = math.floor(x_cent - 1.41*radius)
x_max = math.ceil(x_cent + 1.41*radius)
if x_min < 0: x_min = 0
if x_max > photom_mask.shape[1]: x_max = photom_mask.shape[1]
for y in range(y_min, y_max):
for x in range(x_min, x_max):
if ((x - x_cent)**2 + (y - y_cent)**2) <= radius**2:
photom_mask[y, x] = 1
return photom_mask
def thickLine(img_h, img_w, x_cent, y_cent, length, rotation, radius):
""" Given the image size, return the mask where indices which are inside a thick rounded line are 1s, and
the rest are 0s. The Bresenham algorithm is used to compute line indices.
Arguments:
img_h: [int] Image height (px).
img_w: [int] Image width (px).
x_cent: [float] X centroid.
y_cent: [float] Y centroid.
length: [float] Length of the line segment (px).
rotation: [float] Rotation of the line (deg).
radius: [float] Aperture radius (px).
Return:
photom_mask: [ndarray] Photometric mask.
"""
# Init the photom_mask array
photom_mask = np.zeros((img_h, img_w), dtype=np.uint8)
rotation = np.radians(rotation)
# Compute the bounding box
x0 = math.floor(x_cent - np.cos(rotation)*length/2.0)
y0 = math.floor(y_cent - np.sin(rotation)*length/2.0)
y1 = math.ceil(y_cent + np.sin(rotation)*length/2.0)
x1 = math.ceil(x_cent + np.cos(rotation)*length/2.0)
# Init the photom_mask array
photom_mask = np.zeros((img_h, img_w))
dx = abs(x1 - x0)
dy = abs(y1 - y0)
x, y = x0, y0
sx = -1 if x0 > x1 else 1
sy = -1 if y0 > y1 else 1
if dx > dy:
err = dx / 2.0
while x != x1:
photom_mask = fillCircle(photom_mask, int(x), int(y), radius)
err -= dy
if err < 0:
y += sy
err += dx
x += sx
else:
err = dy / 2.0
while y != y1:
photom_mask = fillCircle(photom_mask, int(x), int(y), radius)
err -= dx
if err < 0:
x += sx
err += dy
y += sy
photom_mask = fillCircle(photom_mask, int(x), int(y), radius)
return photom_mask
if __name__ == "__main__":
import time
import matplotlib.pyplot as plt
from RMS.Formats import FFfile
import RMS.ConfigReader as cr
# Load config file
config = cr.parse(".config")
# Generate image data
img_data = np.zeros(shape=(256, 256))
for i in range(256):
img_data[:, i] += i
plt.imshow(img_data, cmap='gray')
plt.show()
# Adjust levels
img_data = adjustLevels(img_data, 100, 1.2, 240)
plt.imshow(img_data, cmap='gray')
plt.show()
#### Apply the flat
# Load an FF file
dir_path = "/home/dvida/Dropbox/Apps/Elginfield RPi RMS data/ArchivedFiles/CA0001_20171018_230520_894458_detected"
file_name = "FF_CA0001_20171019_092744_161_1118976.fits"
ff = FFfile.read(dir_path, file_name)
# Load the flat
flat_struct = loadFlat(os.getcwd(), config.flat_file)
t1 = time.clock()
# Apply the flat
img = applyFlat(ff.maxpixel, flat_struct)
print('Flat time:', time.clock() - t1)
plt.imshow(img, cmap='gray', vmin=0, vmax=255)
plt.show()
### TEST THICK LINE
x_cent = 20.1
y_cent = 20
rotation = 90
length = 0
radius = 2
indices = thickLine(200, 200, x_cent, y_cent, length, rotation, radius)
plt.imshow(indices)
plt.show() | gpl-3.0 |
jstitch/git_history_visualizer | git_history_test_git.py | 2 | 6352 |
# coding: utf-8
# In[1]:
# %load https://gist.githubusercontent.com/kidpixo/2ec078d09834b5aa7869/raw/c8812811211dc7cd62f5b530b51b0104f39263ff/ipython%20inizialization
import pandas as pd
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
get_ipython().magic(u'matplotlib inline')
# In[2]:
commits_list = get_ipython().getoutput(u'git --no-pager log --reverse --oneline')
commits = []
for i in commits_list:
sha1 = i.split(' ')[0]
print 'Commit SHA-1 value:',sha1
commits.append(sha1)
# In[41]:
import subprocess
path = '/Users/damo_ma/Downloads/github_rep/git_history_visualizer'
p = subprocess.Popen(['git -C '+path+' --no-pager log --reverse --oneline'], stdout=subprocess.PIPE, shell=True)
for line in iter(p.stdout.readline,''):
print line.rstrip()
# In[3]:
all_files = get_ipython().getoutput(u"git --no-pager log --reverse --name-only --oneline --pretty='format:' | sed '/^$/d' | sort | uniq")
# ### Legend
#
# git status
#
# - `A` : file **A**dded
# - `D` : file **D**eleted
# - `M` : file **M**odified
# - `S` : file is **S**tatic (nothing happen)
# - `N` : file is **N**on existent
#
# See the official [Git - git-log Documentation](http://git-scm.com/docs/git-log) :
#
# --diff-filter=[(A|C|D|M|R|T|U|X|B)…[*]]
#
# Select only files that are Added (A), Copied (C), Deleted (D), Modified (M), Renamed (R), have their type (i.e. regular file, symlink, submodule, …) changed (T), are Unmerged (U), are Unknown (X), or have had their pairing Broken (B). Any combination of the filter characters (including none) can be used. When * (All-or-none) is added to the combination, all paths are selected if there is any file that matches other criteria in the comparison; if there is no file that matches other criteria, nothing is selected.
#
#
# In[4]:
all_filenames = pd.DataFrame(pd.DataFrame(list(all_files)),columns=commits, index=all_files)
all_commits = get_ipython().getoutput(u"git --no-pager log --reverse --name-status --oneline --pretty='format:COMMIT %h %s' | tr '\\t' ' ' | sed -e '/^$/d'")
def_states = {
'A' : 0,
'M' : 32,
'S' : 64, # custom value, Static
'D' : 128,
'N' : 128, # custom value, Non existent
}
def_states_explain = {
'A' : 'Added',
'D' : 'Deleted',
'M' : 'Modified',
'S' : 'Static',
'N' : 'Non existent'
}
# fill NaN
all_filenames.fillna('N', inplace=True)
actual_commit = 0
# previous_commit = 0
for i in all_commits:
# set the commit number
if i[0] == 'C':
value = i.split(' ')[1]
# starting at the second commit see which file exist in the previous commit
if actual_commit != int(all_filenames.columns[0]):
previous_commit = actual_commit
actual_commit = value
# assig 1 to file not null un the previous commit
if previous_commit != 0:
all_filenames[actual_commit][
(all_filenames[previous_commit] != 'N') & (all_filenames[previous_commit] != 'D')] = 'S'
# all_filenames[previous_commit][all_filenames[actual_commit] == 'D'] = 'D'
# all_filenames[actual_commit][all_filenames[actual_commit] == 'D'] = 'N'
# print previous_commit,'>',actual_commit
else:
state,value = i.split(' ')
# print ' '*4,'-',state,value
all_filenames.ix[value,actual_commit] = state
# In[5]:
all_commits
# In[6]:
all_filenames
# In[7]:
def_states = {
'A' : 120,
'M' : 180,
'S' : 255, # custom value, Static
'D' : 240,
'N' : 128, # custom value, Non existent
}
history = all_filenames.applymap(lambda x: def_states[x]).values.copy()
# In[8]:
h = history.astype('float')
h[history == 128] = np.nan
# In[14]:
fig = plt.figure(figsize=[10,12])
ax = plt.subplot(111)
for i in range(len(all_files)):
x = range(len(commits))
y = [i for kk in x]
ax.scatter(x, y, s = 500, c=h[i,:], alpha=1, marker='o',linewidths = 3 , cmap = plt.cm.spectral,vmin = 0, vmax = 255)
ax.plot(x, y, lw = 3, c='k', zorder=0)
ax.set_xticks(range(history.shape[1]))
ax.set_xticklabels(all_filenames.columns,rotation=90)
ax.set_xlabel('commits sha-1 (time arrow to the right ->)')
ax.set_xlim([-.5,len(commits)-0.5])
ax.set_ylabel('file names')
ax.set_yticks(range(history.shape[0]))
ax.set_yticklabels(all_filenames.index.tolist())
ax.set_yticks = 0.1
# set 0 to bounding box width
[i.set_linewidth(0.0) for i in ax.spines.itervalues()]
# see http://stackoverflow.com/a/20416681/1435167
# erase x ticks
for tic in ax.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
# tic.label1On = tic.label2On = False
# erase y ticks
for tic in ax.yaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
# tic.label1On = tic.label2On = False
ax2 = fig.add_axes([0.25, .9, 0.5, 0.075])
colors = np.array(def_states.values()).astype('float')
colors[colors == 128] = np.nan
x = range(len(colors))
y = [1 for kk in x]
ax2.scatter(x, y, s = 500, c=colors, alpha=1, marker='o',linewidths = 3, cmap = plt.cm.spectral,vmin = 0, vmax = 255)
ax2.plot(x, y, lw = 3, c='k', zorder=0)
ax2.set_xticks(x)
ax2.set_xticklabels(def_states_explain.values())
ax2.set_xlabel('Legend')
ax2.set_xlim([-.5,len(x)-0.5])
ax2.set_ylim([0.99,1.01])
# set 0 to bounding box width
[i.set_linewidth(0.0) for i in ax2.spines.itervalues()]
# # see http://stackoverflow.com/a/20416681/1435167
# erase x ticks
for tic in ax2.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
# erase y ticks
for tic in ax2.yaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
fig.savefig('/Users/damo_ma/Desktop/test.png')
# In[23]:
# fake legend
a = np.empty([2,len(def_states)])
a[0,:] = [k for k in def_states.itervalues()]
a[1,:] = a[0,:]
plt.imshow(a,interpolation='nearest',cmap = plt.cm.spectral,vmin = 0, vmax = 255 )
plt.xticks(range(len(def_states)), [k for k in def_states.iterkeys()]);
plt.yticks([1], '');
# In[24]:
fig = plt.figure(figsize=[10,10])
plt.imshow(history,interpolation='nearest',cmap = plt.cm.spectral,vmin = 0, vmax = 255 )
plt.xticks(range(history.shape[1]), all_filenames.columns, rotation='vertical');
plt.xlabel('commits sha-1 (time arrow to the right ->)')
plt.ylabel('file names')
plt.yticks(range(history.shape[0]), all_filenames.index.tolist());
# In[ ]:
| mit |
TeamHG-Memex/eli5 | eli5/sklearn/treeinspect.py | 1 | 2759 | # -*- coding: utf-8 -*-
"""
Inspect scikit-learn decision trees.
This is an alternative to sklearn.tree.export which doesn't require graphviz
and provides a way to output result in text-based format.
"""
from __future__ import absolute_import, division
from sklearn.base import ClassifierMixin
from sklearn.tree import _tree, export_graphviz
from eli5.base import TreeInfo, NodeInfo
def get_tree_info(decision_tree,
feature_names=None,
**export_graphviz_kwargs):
# type: (...) -> TreeInfo
"""
Convert DecisionTreeClassifier or DecisionTreeRegressor
to an inspectable object.
"""
return TreeInfo(
criterion=decision_tree.criterion,
tree=_get_root_node_info(decision_tree, feature_names),
graphviz=tree2dot(decision_tree,
feature_names=feature_names,
**export_graphviz_kwargs),
is_classification=isinstance(decision_tree, ClassifierMixin),
)
def tree2dot(decision_tree, **export_graphviz_kwargs):
return export_graphviz(decision_tree, out_file=None,
**export_graphviz_kwargs)
def _get_root_node_info(decision_tree, feature_names=None):
# type: (...) -> NodeInfo
res = _get_node_info(decision_tree.tree_, 0)
_add_feature_names(res, feature_names)
return res
def _add_feature_names(root, feature_names=None):
for node in _treeiter(root):
if not node.is_leaf:
feat_id = node.feature_id
if feature_names is None:
node.feature_name = "x%s" % feat_id
else:
node.feature_name = feature_names[feat_id]
def _get_node_info(tree, node_id):
# type: (...) -> NodeInfo
is_leaf = tree.children_left[node_id] == _tree.TREE_LEAF
value = _node_value(tree, node_id)
node = NodeInfo(
id=node_id,
is_leaf=is_leaf,
value=list(value),
value_ratio=list(value / value.sum()),
impurity=tree.impurity[node_id],
samples=tree.n_node_samples[node_id],
sample_ratio=tree.n_node_samples[node_id] / tree.n_node_samples[0],
)
if not is_leaf:
node.feature_id = tree.feature[node_id]
node.threshold = tree.threshold[node_id]
node.left = _get_node_info(tree, tree.children_left[node_id])
node.right = _get_node_info(tree, tree.children_right[node_id])
return node
def _node_value(tree, node_id):
if tree.n_outputs == 1:
return tree.value[node_id][0, :]
else:
return tree.value[node_id]
def _treeiter(node):
yield node
if not node.is_leaf:
for n in _treeiter(node.left):
yield n
for n in _treeiter(node.right):
yield n
| mit |
GuessWhoSamFoo/pandas | asv_bench/benchmarks/indexing.py | 5 | 10167 | import warnings
import numpy as np
import pandas.util.testing as tm
from pandas import (Series, DataFrame, Panel, MultiIndex,
Int64Index, UInt64Index, Float64Index,
IntervalIndex, CategoricalIndex,
IndexSlice, concat, date_range)
class NumericSeriesIndexing(object):
params = [
(Int64Index, UInt64Index, Float64Index),
('unique_monotonic_inc', 'nonunique_monotonic_inc'),
]
param_names = ['index_dtype', 'index_structure']
def setup(self, index, index_structure):
N = 10**6
indices = {
'unique_monotonic_inc': index(range(N)),
'nonunique_monotonic_inc': index(
list(range(55)) + [54] + list(range(55, N - 1))),
}
self.data = Series(np.random.rand(N), index=indices[index_structure])
self.array = np.arange(10000)
self.array_list = self.array.tolist()
def time_getitem_scalar(self, index, index_structure):
self.data[800000]
def time_getitem_slice(self, index, index_structure):
self.data[:800000]
def time_getitem_list_like(self, index, index_structure):
self.data[[800000]]
def time_getitem_array(self, index, index_structure):
self.data[self.array]
def time_getitem_lists(self, index, index_structure):
self.data[self.array_list]
def time_iloc_array(self, index, index_structure):
self.data.iloc[self.array]
def time_iloc_list_like(self, index, index_structure):
self.data.iloc[[800000]]
def time_iloc_scalar(self, index, index_structure):
self.data.iloc[800000]
def time_iloc_slice(self, index, index_structure):
self.data.iloc[:800000]
def time_ix_array(self, index, index_structure):
self.data.ix[self.array]
def time_ix_list_like(self, index, index_structure):
self.data.ix[[800000]]
def time_ix_scalar(self, index, index_structure):
self.data.ix[800000]
def time_ix_slice(self, index, index_structure):
self.data.ix[:800000]
def time_loc_array(self, index, index_structure):
self.data.loc[self.array]
def time_loc_list_like(self, index, index_structure):
self.data.loc[[800000]]
def time_loc_scalar(self, index, index_structure):
self.data.loc[800000]
def time_loc_slice(self, index, index_structure):
self.data.loc[:800000]
class NonNumericSeriesIndexing(object):
params = [
('string', 'datetime'),
('unique_monotonic_inc', 'nonunique_monotonic_inc'),
]
param_names = ['index_dtype', 'index_structure']
def setup(self, index, index_structure):
N = 10**6
indexes = {'string': tm.makeStringIndex(N),
'datetime': date_range('1900', periods=N, freq='s')}
index = indexes[index]
if index_structure == 'nonunique_monotonic_inc':
index = index.insert(item=index[2], loc=2)[:-1]
self.s = Series(np.random.rand(N), index=index)
self.lbl = index[80000]
def time_getitem_label_slice(self, index, index_structure):
self.s[:self.lbl]
def time_getitem_pos_slice(self, index, index_structure):
self.s[:80000]
def time_get_value(self, index, index_structure):
with warnings.catch_warnings(record=True):
self.s.get_value(self.lbl)
def time_getitem_scalar(self, index, index_structure):
self.s[self.lbl]
def time_getitem_list_like(self, index, index_structure):
self.s[[self.lbl]]
class DataFrameStringIndexing(object):
def setup(self):
index = tm.makeStringIndex(1000)
columns = tm.makeStringIndex(30)
self.df = DataFrame(np.random.randn(1000, 30), index=index,
columns=columns)
self.idx_scalar = index[100]
self.col_scalar = columns[10]
self.bool_indexer = self.df[self.col_scalar] > 0
self.bool_obj_indexer = self.bool_indexer.astype(object)
def time_get_value(self):
with warnings.catch_warnings(record=True):
self.df.get_value(self.idx_scalar, self.col_scalar)
def time_ix(self):
self.df.ix[self.idx_scalar, self.col_scalar]
def time_loc(self):
self.df.loc[self.idx_scalar, self.col_scalar]
def time_getitem_scalar(self):
self.df[self.col_scalar][self.idx_scalar]
def time_boolean_rows(self):
self.df[self.bool_indexer]
def time_boolean_rows_object(self):
self.df[self.bool_obj_indexer]
class DataFrameNumericIndexing(object):
def setup(self):
self.idx_dupe = np.array(range(30)) * 99
self.df = DataFrame(np.random.randn(10000, 5))
self.df_dup = concat([self.df, 2 * self.df, 3 * self.df])
self.bool_indexer = [True] * 5000 + [False] * 5000
def time_iloc_dups(self):
self.df_dup.iloc[self.idx_dupe]
def time_loc_dups(self):
self.df_dup.loc[self.idx_dupe]
def time_iloc(self):
self.df.iloc[:100, 0]
def time_loc(self):
self.df.loc[:100, 0]
def time_bool_indexer(self):
self.df[self.bool_indexer]
class Take(object):
params = ['int', 'datetime']
param_names = ['index']
def setup(self, index):
N = 100000
indexes = {'int': Int64Index(np.arange(N)),
'datetime': date_range('2011-01-01', freq='S', periods=N)}
index = indexes[index]
self.s = Series(np.random.rand(N), index=index)
self.indexer = [True, False, True, True, False] * 20000
def time_take(self, index):
self.s.take(self.indexer)
class MultiIndexing(object):
def setup(self):
mi = MultiIndex.from_product([range(1000), range(1000)])
self.s = Series(np.random.randn(1000000), index=mi)
self.df = DataFrame(self.s)
n = 100000
self.mdt = DataFrame({'A': np.random.choice(range(10000, 45000, 1000),
n),
'B': np.random.choice(range(10, 400), n),
'C': np.random.choice(range(1, 150), n),
'D': np.random.choice(range(10000, 45000), n),
'x': np.random.choice(range(400), n),
'y': np.random.choice(range(25), n)})
self.idx = IndexSlice[20000:30000, 20:30, 35:45, 30000:40000]
self.mdt = self.mdt.set_index(['A', 'B', 'C', 'D']).sort_index()
def time_series_ix(self):
self.s.ix[999]
def time_frame_ix(self):
self.df.ix[999]
def time_index_slice(self):
self.mdt.loc[self.idx, :]
class IntervalIndexing(object):
def setup_cache(self):
idx = IntervalIndex.from_breaks(np.arange(1000001))
monotonic = Series(np.arange(1000000), index=idx)
return monotonic
def time_getitem_scalar(self, monotonic):
monotonic[80000]
def time_loc_scalar(self, monotonic):
monotonic.loc[80000]
def time_getitem_list(self, monotonic):
monotonic[80000:]
def time_loc_list(self, monotonic):
monotonic.loc[80000:]
class CategoricalIndexIndexing(object):
params = ['monotonic_incr', 'monotonic_decr', 'non_monotonic']
param_names = ['index']
def setup(self, index):
N = 10**5
values = list('a' * N + 'b' * N + 'c' * N)
indices = {
'monotonic_incr': CategoricalIndex(values),
'monotonic_decr': CategoricalIndex(reversed(values)),
'non_monotonic': CategoricalIndex(list('abc' * N))}
self.data = indices[index]
self.int_scalar = 10000
self.int_list = list(range(10000))
self.cat_scalar = 'b'
self.cat_list = ['a', 'c']
def time_getitem_scalar(self, index):
self.data[self.int_scalar]
def time_getitem_slice(self, index):
self.data[:self.int_scalar]
def time_getitem_list_like(self, index):
self.data[[self.int_scalar]]
def time_getitem_list(self, index):
self.data[self.int_list]
def time_getitem_bool_array(self, index):
self.data[self.data == self.cat_scalar]
def time_get_loc_scalar(self, index):
self.data.get_loc(self.cat_scalar)
def time_get_indexer_list(self, index):
self.data.get_indexer(self.cat_list)
class PanelIndexing(object):
def setup(self):
with warnings.catch_warnings(record=True):
self.p = Panel(np.random.randn(100, 100, 100))
self.inds = range(0, 100, 10)
def time_subset(self):
with warnings.catch_warnings(record=True):
self.p.ix[(self.inds, self.inds, self.inds)]
class MethodLookup(object):
def setup_cache(self):
s = Series()
return s
def time_lookup_iloc(self, s):
s.iloc
def time_lookup_ix(self, s):
s.ix
def time_lookup_loc(self, s):
s.loc
class GetItemSingleColumn(object):
def setup(self):
self.df_string_col = DataFrame(np.random.randn(3000, 1), columns=['A'])
self.df_int_col = DataFrame(np.random.randn(3000, 1))
def time_frame_getitem_single_column_label(self):
self.df_string_col['A']
def time_frame_getitem_single_column_int(self):
self.df_int_col[0]
class AssignTimeseriesIndex(object):
def setup(self):
N = 100000
idx = date_range('1/1/2000', periods=N, freq='H')
self.df = DataFrame(np.random.randn(N, 1), columns=['A'], index=idx)
def time_frame_assign_timeseries_index(self):
self.df['date'] = self.df.index
class InsertColumns(object):
def setup(self):
self.N = 10**3
self.df = DataFrame(index=range(self.N))
def time_insert(self):
np.random.seed(1234)
for i in range(100):
self.df.insert(0, i, np.random.randn(self.N),
allow_duplicates=True)
def time_assign_with_setitem(self):
np.random.seed(1234)
for i in range(100):
self.df[i] = np.random.randn(self.N)
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
aalmah/pylearn2 | pylearn2/models/independent_multiclass_logistic.py | 44 | 2491 | """
Multiclass-classification by taking the max over a set of one-against-rest
logistic classifiers.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import logging
try:
from sklearn.linear_model import LogisticRegression
except ImportError:
LogisticRegression = None
import numpy as np
from theano.compat.six.moves import xrange
logger = logging.getLogger(__name__)
class IndependentMulticlassLogistic:
"""
Fits a separate logistic regression classifier for each class, makes
predictions based on the max output: during training, views a one-hot label
vector as a vector of independent binary labels, rather than correctly
modeling them as one-hot like softmax would do.
This is what Jia+Huang used to get state of the art on CIFAR-100
Parameters
----------
C : WRITEME
"""
def __init__(self, C):
self.C = C
def fit(self, X, y):
"""
Fits the model to the given training data.
Parameters
----------
X : ndarray
2D array, each row is one example
y : ndarray
vector of integer class labels
"""
if LogisticRegression is None:
raise RuntimeError("sklearn not available.")
min_y = y.min()
max_y = y.max()
assert min_y == 0
num_classes = max_y + 1
assert num_classes > 1
logistics = []
for c in xrange(num_classes):
logger.info('fitting class {0}'.format(c))
cur_y = (y == c).astype('int32')
logistics.append(LogisticRegression(C = self.C).fit(X,cur_y))
return Classifier(logistics)
class Classifier:
"""
.. todo::
WRITEME
Parameters
----------
logistics : WRITEME
"""
def __init__(self, logistics):
assert len(logistics) > 1
num_classes = len(logistics)
num_features = logistics[0].coef_.shape[1]
self.W = np.zeros((num_features, num_classes))
self.b = np.zeros((num_classes,))
for i in xrange(num_classes):
self.W[:,i] = logistics[i].coef_
self.b[i] = logistics[i].intercept_
def predict(self, X):
"""
.. todo::
WRITEME
"""
return np.argmax(self.b + np.dot(X,self.W), 1)
| bsd-3-clause |
samuelcolvin/julia-slideshow | pydata_lightning_2014_7_1/profile_julia_slideshow/ipython_notebook_config.py | 1 | 22905 | # Configuration file for ipython-notebook.
c = get_config()
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# NotebookApp will inherit config from: BaseIPythonApplication, Application
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = '127.0.0.1'
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_project_url = '/'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.NotebookApp.verbose_crash = False
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = ''
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
# c.NotebookApp.open_browser = True
# The notebook manager class to use.
# c.NotebookApp.notebook_manager_class = 'IPython.html.services.notebooks.filenbmanager.FileNotebookManager'
# The date format used by logging formatters for %(asctime)s
# c.NotebookApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The base URL for the kernel server
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_kernel_url = '/'
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# Whether to overwrite existing config files when copying
# c.NotebookApp.overwrite = False
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = u''
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.NotebookApp.extra_config_file = u''
# The IPython profile to use.
# c.NotebookApp.profile = u'default'
# The base URL for the websocket server, if it differs from the HTTP server
# (hint: it almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.NotebookApp.ipython_dir = u'/home/samuel/.config/ipython'
# Set the log level by value or name.
# c.NotebookApp.log_level = 30
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from IPython.lib import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# The Logging format template
# c.NotebookApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Wether to use Browser Side less-css parsing instead of compiled css version in
# templates that allows it. This is mainly convenient when working on the less
# file to avoid a build step, or if user want to overwrite some of the less
# variables without having to recompile everything.
#
# You will need to install the less.js component in the static directory either
# in the source tree or in your profile folder.
# c.NotebookApp.use_less = False
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Neccesary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.NotebookApp.copy_config_files = False
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u''
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.webapp_settings = {}
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = u''
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = 'IPython.kernel.zmq.ipkernel.Kernel'
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
#
# c.IPKernelApp.parent_appname = u''
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an 'import *' is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u'/home/samuel/.config/ipython'
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# The Popen Command to launch the kernel. Override this if you have a custom
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = '127.0.0.1'
#
# c.KernelManager.transport = 'tcp'
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialiization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'samuel'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for extra authentication.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
#------------------------------------------------------------------------------
# InlineBackend configuration
#------------------------------------------------------------------------------
# An object to store configuration of the inline backend.
# The image format for figures with the inline backend.
# c.InlineBackend.figure_format = 'png'
# Close all figures at the end of each cell.
#
# When True, ensures that each cell starts with no active figures, but it also
# means that one must keep track of references in order to edit or redraw
# figures in subsequent cells. This mode is ideal for the notebook, where
# residual plots from other cells might be surprising.
#
# When False, one must call figure() to create new figures. This means that
# gcf() and getfigs() can reference figures created in other cells, and the
# active figure can continue to be edited with pylab/pyplot methods that
# reference the current active figure. This mode facilitates iterative editing
# of figures, and behaves most consistently with other matplotlib backends, but
# figure barriers between cells must be explicit.
# c.InlineBackend.close_figures = True
# Subset of matplotlib rcParams that should be different for the inline backend.
# c.InlineBackend.rc = {'font.size': 10, 'figure.figsize': (6.0, 4.0), 'figure.facecolor': 'white', 'savefig.dpi': 72, 'figure.subplot.bottom': 0.125, 'figure.edgecolor': 'white'}
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
# MappingKernelManager will inherit config from: MultiKernelManager
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MappingKernelManager.kernel_manager_class = 'IPython.kernel.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# NotebookManager configuration
#------------------------------------------------------------------------------
# The directory to use for notebooks.
# c.NotebookManager.notebook_dir = u'/home/samuel/.julia/v0.3/IJulia/deps'
#------------------------------------------------------------------------------
# FileNotebookManager configuration
#------------------------------------------------------------------------------
# FileNotebookManager will inherit config from: NotebookManager
# The location in which to keep notebook checkpoints
#
# By default, it is notebook-dir/.ipynb_checkpoints
# c.FileNotebookManager.checkpoint_dir = u''
# Automatically create a Python script when saving the notebook.
#
# For easier use of import, %run and %load across notebooks, a <notebook-
# name>.py script will be created next to any <notebook-name>.ipynb on each
# save. This can also be set with the short `--script` flag.
# c.FileNotebookManager.save_script = False
# The directory to use for notebooks.
# c.FileNotebookManager.notebook_dir = u'/home/samuel/.julia/v0.3/IJulia/deps'
c.NotebookApp.port = 8998
| mit |
timmie/cartopy | lib/cartopy/crs.py | 2 | 67901 | # (C) British Crown Copyright 2011 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
The crs module defines Coordinate Reference Systems and the transformations
between them.
"""
from __future__ import (absolute_import, division, print_function)
from abc import ABCMeta, abstractproperty
import math
import warnings
import numpy as np
import shapely.geometry as sgeom
from shapely.prepared import prep
import six
from cartopy._crs import CRS, Geocentric, Geodetic, Globe, PROJ4_RELEASE
import cartopy.trace
__document_these__ = ['CRS', 'Geocentric', 'Geodetic', 'Globe']
WGS84_SEMIMAJOR_AXIS = 6378137.0
WGS84_SEMIMINOR_AXIS = 6356752.3142
class RotatedGeodetic(CRS):
"""
Defines a rotated latitude/longitude coordinate system with spherical
topology and geographical distance.
Coordinates are measured in degrees.
"""
def __init__(self, pole_longitude, pole_latitude,
central_rotated_longitude=0.0, globe=None):
"""
Create a RotatedGeodetic CRS.
The class uses proj4 to perform an ob_tran operation, using the
pole_longitude to set a lon_0 then performing two rotations based on
pole_latitude and central_rotated_longitude.
This is equivalent to setting the new pole to a location defined by
the pole_latitude and pole_longitude values in the GeogCRS defined by
globe, then rotating this new CRS about it's pole using the
central_rotated_longitude value.
Args:
* pole_longitude - Pole longitude position, in unrotated degrees.
* pole_latitude - Pole latitude position, in unrotated degrees.
* central_rotated_longitude - Longitude rotation about the new
pole, in degrees.
Kwargs:
* globe - An optional :class:`cartopy.crs.Globe`.
Defaults to a "WGS84" datum.
"""
proj4_params = [('proj', 'ob_tran'), ('o_proj', 'latlon'),
('o_lon_p', central_rotated_longitude),
('o_lat_p', pole_latitude),
('lon_0', 180 + pole_longitude),
('to_meter', math.radians(1))]
globe = globe or Globe(datum='WGS84')
super(RotatedGeodetic, self).__init__(proj4_params, globe=globe)
class Projection(six.with_metaclass(ABCMeta, CRS)):
"""
Defines a projected coordinate system with flat topology and Euclidean
distance.
"""
_method_map = {
'Point': '_project_point',
'LineString': '_project_line_string',
'LinearRing': '_project_linear_ring',
'Polygon': '_project_polygon',
'MultiPoint': '_project_multipoint',
'MultiLineString': '_project_multiline',
'MultiPolygon': '_project_multipolygon',
}
@abstractproperty
def boundary(self):
pass
@abstractproperty
def threshold(self):
pass
@abstractproperty
def x_limits(self):
pass
@abstractproperty
def y_limits(self):
pass
@property
def cw_boundary(self):
try:
boundary = self._cw_boundary
except AttributeError:
boundary = sgeom.LineString(self.boundary)
self._cw_boundary = boundary
return boundary
@property
def ccw_boundary(self):
try:
boundary = self._ccw_boundary
except AttributeError:
boundary = sgeom.LineString(self.boundary.coords[::-1])
self._ccw_boundary = boundary
return boundary
@property
def domain(self):
try:
domain = self._domain
except AttributeError:
domain = self._domain = sgeom.Polygon(self.boundary)
return domain
def _as_mpl_axes(self):
import cartopy.mpl.geoaxes as geoaxes
return geoaxes.GeoAxes, {'map_projection': self}
def project_geometry(self, geometry, src_crs=None):
"""
Projects the given geometry into this projection.
:param geometry: The geometry to (re-)project.
:param src_crs: The source CRS, or geodetic CRS if None.
:rtype: Shapely geometry.
If src_crs is None, the source CRS is assumed to be a geodetic
version of the target CRS.
"""
if src_crs is None:
src_crs = self.as_geodetic()
elif not isinstance(src_crs, CRS):
raise TypeError('Source CRS must be an instance of CRS'
' or one of its subclasses, or None.')
geom_type = geometry.geom_type
method_name = self._method_map.get(geom_type)
if not method_name:
raise ValueError('Unsupported geometry '
'type {!r}'.format(geom_type))
return getattr(self, method_name)(geometry, src_crs)
def _project_point(self, point, src_crs):
return sgeom.Point(*self.transform_point(point.x, point.y, src_crs))
def _project_line_string(self, geometry, src_crs):
return cartopy.trace.project_linear(geometry, src_crs, self)
def _project_linear_ring(self, linear_ring, src_crs):
"""
Projects the given LinearRing from the src_crs into this CRS and
returns a list of LinearRings and a single MultiLineString.
"""
debug = False
# 1) Resolve the initial lines into projected segments
# 1abc
# def23ghi
# jkl41
multi_line_string = cartopy.trace.project_linear(linear_ring,
src_crs, self)
# Threshold for whether a point is close enough to be the same
# point as another.
threshold = max(np.abs(self.x_limits + self.y_limits)) * 1e-5
# 2) Simplify the segments where appropriate.
if len(multi_line_string) > 1:
# Stitch together segments which are close to continuous.
# This is important when:
# 1) The first source point projects into the map and the
# ring has been cut by the boundary.
# Continuing the example from above this gives:
# def23ghi
# jkl41abc
# 2) The cut ends of segments are too close to reliably
# place into an order along the boundary.
line_strings = list(multi_line_string)
any_modified = False
i = 0
if debug:
first_coord = np.array([ls.coords[0] for ls in line_strings])
last_coord = np.array([ls.coords[-1] for ls in line_strings])
print('Distance matrix:')
np.set_printoptions(precision=2)
x = first_coord[:, np.newaxis, :]
y = last_coord[np.newaxis, :, :]
print(np.abs(x - y).max(axis=-1))
while i < len(line_strings):
modified = False
j = 0
while j < len(line_strings):
if i != j and np.allclose(line_strings[i].coords[0],
line_strings[j].coords[-1],
atol=threshold):
if debug:
print('Joining together {} and {}.'.format(i, j))
last_coords = list(line_strings[j].coords)
first_coords = list(line_strings[i].coords)[1:]
combo = sgeom.LineString(last_coords + first_coords)
if j < i:
i, j = j, i
del line_strings[j], line_strings[i]
line_strings.append(combo)
modified = True
any_modified = True
break
else:
j += 1
if not modified:
i += 1
if any_modified:
multi_line_string = sgeom.MultiLineString(line_strings)
# 3) Check for rings that have been created by the projection stage.
rings = []
line_strings = []
for line in multi_line_string:
if len(line.coords) > 3 and np.allclose(line.coords[0],
line.coords[-1],
atol=threshold):
result_geometry = sgeom.LinearRing(line.coords[:-1])
rings.append(result_geometry)
else:
line_strings.append(line)
# If we found any rings, then we should re-create the multi-line str.
if rings:
multi_line_string = sgeom.MultiLineString(line_strings)
return rings, multi_line_string
def _project_multipoint(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
geoms.append(self._project_point(geom, src_crs))
if geoms:
return sgeom.MultiPoint(geoms)
else:
return sgeom.MultiPoint()
def _project_multiline(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
r = self._project_line_string(geom, src_crs)
if r:
geoms.extend(r.geoms)
if geoms:
return sgeom.MultiLineString(geoms)
else:
return []
def _project_multipolygon(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
r = self._project_polygon(geom, src_crs)
if r:
geoms.extend(r.geoms)
if geoms:
result = sgeom.MultiPolygon(geoms)
else:
result = sgeom.MultiPolygon()
return result
def _project_polygon(self, polygon, src_crs):
"""
Returns the projected polygon(s) derived from the given polygon.
"""
# Determine orientation of polygon.
# TODO: Consider checking the internal rings have the opposite
# orientation to the external rings?
if src_crs.is_geodetic():
is_ccw = True
else:
is_ccw = polygon.exterior.is_ccw
# Project the polygon exterior/interior rings.
# Each source ring will result in either a ring, or one or more
# lines.
rings = []
multi_lines = []
for src_ring in [polygon.exterior] + list(polygon.interiors):
p_rings, p_mline = self._project_linear_ring(src_ring, src_crs)
if p_rings:
rings.extend(p_rings)
if len(p_mline) > 0:
multi_lines.append(p_mline)
# Convert any lines to rings by attaching them to the boundary.
if multi_lines:
rings.extend(self._attach_lines_to_boundary(multi_lines, is_ccw))
# Resolve all the inside vs. outside rings, and convert to the
# final MultiPolygon.
return self._rings_to_multi_polygon(rings, is_ccw)
def _attach_lines_to_boundary(self, multi_line_strings, is_ccw):
"""
Returns a list of LinearRings by attaching the ends of the given lines
to the boundary, paying attention to the traversal directions of the
lines and boundary.
"""
debug = False
debug_plot_edges = False
# Accumulate all the boundary and segment end points, along with
# their distance along the boundary.
edge_things = []
# Get the boundary as a LineString of the correct orientation
# so we can compute distances along it.
if is_ccw:
boundary = self.ccw_boundary
else:
boundary = self.cw_boundary
def boundary_distance(xy):
return boundary.project(sgeom.Point(*xy))
# Squash all the LineStrings into a single list.
line_strings = []
for multi_line_string in multi_line_strings:
line_strings.extend(multi_line_string)
# Record the positions of all the segment ends
for i, line_string in enumerate(line_strings):
first_dist = boundary_distance(line_string.coords[0])
thing = _BoundaryPoint(first_dist, False,
(i, 'first', line_string.coords[0]))
edge_things.append(thing)
last_dist = boundary_distance(line_string.coords[-1])
thing = _BoundaryPoint(last_dist, False,
(i, 'last', line_string.coords[-1]))
edge_things.append(thing)
# Record the positions of all the boundary vertices
for xy in boundary.coords[:-1]:
point = sgeom.Point(*xy)
dist = boundary.project(point)
thing = _BoundaryPoint(dist, True, point)
edge_things.append(thing)
if debug_plot_edges:
import matplotlib.pyplot as plt
current_fig = plt.gcf()
fig = plt.figure()
# Reset the current figure so we don't upset anything.
plt.figure(current_fig.number)
ax = fig.add_subplot(1, 1, 1)
# Order everything as if walking around the boundary.
# NB. We make line end-points take precedence over boundary points
# to ensure that end-points are still found and followed when they
# coincide.
edge_things.sort(key=lambda thing: (thing.distance, thing.kind))
remaining_ls = dict(enumerate(line_strings))
prev_thing = None
for edge_thing in edge_things[:]:
if (prev_thing is not None and
not edge_thing.kind and
not prev_thing.kind and
edge_thing.data[0] == prev_thing.data[0]):
j = edge_thing.data[0]
# Insert a edge boundary point in between this geometry.
mid_dist = (edge_thing.distance + prev_thing.distance) * 0.5
mid_point = boundary.interpolate(mid_dist)
new_thing = _BoundaryPoint(mid_dist, True, mid_point)
if debug:
print('Artificially insert boundary: {}'.format(new_thing))
ind = edge_things.index(edge_thing)
edge_things.insert(ind, new_thing)
prev_thing = None
else:
prev_thing = edge_thing
if debug:
print()
print('Edge things')
for thing in edge_things:
print(' ', thing)
if debug_plot_edges:
for thing in edge_things:
if isinstance(thing.data, sgeom.Point):
ax.plot(*thing.data.xy, marker='o')
else:
ax.plot(*thing.data[2], marker='o')
ls = line_strings[thing.data[0]]
coords = np.array(ls.coords)
ax.plot(coords[:, 0], coords[:, 1])
ax.text(coords[0, 0], coords[0, 1], thing.data[0])
ax.text(coords[-1, 0], coords[-1, 1],
'{}.'.format(thing.data[0]))
processed_ls = []
while remaining_ls:
# Rename line_string to current_ls
i, current_ls = remaining_ls.popitem()
if debug:
import sys
sys.stdout.write('+')
sys.stdout.flush()
print()
print('Processing: %s, %s' % (i, current_ls))
# We only want to consider boundary-points, the starts-and-ends of
# all other line-strings, or the start-point of the current
# line-string.
def filter_fn(t):
return (t.kind or
t.data[0] != i or
t.data[1] != 'last')
edge_things = list(filter(filter_fn, edge_things))
added_linestring = set()
while True:
# Find out how far around this linestring's last
# point is on the boundary. We will use this to find
# the next point on the boundary.
d_last = boundary_distance(current_ls.coords[-1])
if debug:
print(' d_last: {!r}'.format(d_last))
next_thing = _find_first_gt(edge_things, d_last)
# Remove this boundary point from the edge.
edge_things.remove(next_thing)
if debug:
print(' next_thing:', next_thing)
if next_thing.kind:
# We've just got a boundary point, add it, and keep going.
if debug:
print(' adding boundary point')
boundary_point = next_thing.data
combined_coords = (list(current_ls.coords) +
[(boundary_point.x, boundary_point.y)])
current_ls = sgeom.LineString(combined_coords)
elif next_thing.data[0] == i and next_thing.data[1] == 'first':
# We've gone all the way around and are now back at the
# first boundary thing.
if debug:
print(' close loop')
processed_ls.append(current_ls)
if debug_plot_edges:
coords = np.array(current_ls.coords)
ax.plot(coords[:, 0], coords[:, 1], color='black',
linestyle='--')
break
else:
if debug:
print(' adding line')
j = next_thing.data[0]
line_to_append = line_strings[j]
if j in remaining_ls:
remaining_ls.pop(j)
coords_to_append = list(line_to_append.coords)
if next_thing.data[1] == 'last':
coords_to_append = coords_to_append[::-1]
# Build up the linestring.
current_ls = sgeom.LineString((list(current_ls.coords) +
coords_to_append))
# Catch getting stuck in an infinite loop by checking that
# linestring only added once.
if j not in added_linestring:
added_linestring.add(j)
else:
if debug_plot_edges:
plt.show()
raise RuntimeError('Unidentified problem with '
'geometry, linestring being '
're-added. Please raise an issue.')
# filter out any non-valid linear rings
processed_ls = [linear_ring for linear_ring in processed_ls if
len(linear_ring.coords) > 2]
linear_rings = [sgeom.LinearRing(line) for line in processed_ls]
if debug:
print(' DONE')
return linear_rings
def _rings_to_multi_polygon(self, rings, is_ccw):
exterior_rings = []
interior_rings = []
for ring in rings:
if ring.is_ccw != is_ccw:
interior_rings.append(ring)
else:
exterior_rings.append(ring)
polygon_bits = []
# Turn all the exterior rings into polygon definitions,
# "slurping up" any interior rings they contain.
for exterior_ring in exterior_rings:
polygon = sgeom.Polygon(exterior_ring)
prep_polygon = prep(polygon)
holes = []
for interior_ring in interior_rings[:]:
if prep_polygon.contains(interior_ring):
holes.append(interior_ring)
interior_rings.remove(interior_ring)
elif polygon.crosses(interior_ring):
# Likely that we have an invalid geometry such as
# that from #509 or #537.
holes.append(interior_ring)
interior_rings.remove(interior_ring)
polygon_bits.append((exterior_ring.coords,
[ring.coords for ring in holes]))
# Any left over "interior" rings need "inverting" with respect
# to the boundary.
if interior_rings:
boundary_poly = self.domain
x3, y3, x4, y4 = boundary_poly.bounds
bx = (x4 - x3) * 0.1
by = (y4 - y3) * 0.1
x3 -= bx
y3 -= by
x4 += bx
y4 += by
for ring in interior_rings:
polygon = sgeom.Polygon(ring)
if polygon.is_valid:
x1, y1, x2, y2 = polygon.bounds
bx = (x2 - x1) * 0.1
by = (y2 - y1) * 0.1
x1 -= bx
y1 -= by
x2 += bx
y2 += by
box = sgeom.box(min(x1, x3), min(y1, y3),
max(x2, x4), max(y2, y4))
# Invert the polygon
polygon = box.difference(polygon)
# Intersect the inverted polygon with the boundary
polygon = boundary_poly.intersection(polygon)
if not polygon.is_empty:
polygon_bits.append(polygon)
if polygon_bits:
multi_poly = sgeom.MultiPolygon(polygon_bits)
else:
multi_poly = sgeom.MultiPolygon()
return multi_poly
def quick_vertices_transform(self, vertices, src_crs):
"""
Where possible, return a vertices array transformed to this CRS from
the given vertices array of shape ``(n, 2)`` and the source CRS.
.. important::
This method may return None to indicate that the vertices cannot
be transformed quickly, and a more complex geometry transformation
is required (see :meth:`cartopy.crs.Projection.project_geometry`).
"""
return_value = None
if self == src_crs:
x = vertices[:, 0]
y = vertices[:, 1]
x_limits = self.x_limits
y_limits = self.y_limits
if (x.min() >= x_limits[0] and x.max() <= x_limits[1] and
y.min() >= y_limits[0] and y.max() <= y_limits[1]):
return_value = vertices
return return_value
class _RectangularProjection(Projection):
"""
The abstract superclass of projections with a rectangular domain which
is symmetric about the origin.
"""
def __init__(self, proj4_params, half_width, half_height, globe=None):
self._half_width = half_width
self._half_height = half_height
super(_RectangularProjection, self).__init__(proj4_params, globe=globe)
@property
def boundary(self):
# XXX Should this be a LinearRing?
w, h = self._half_width, self._half_height
return sgeom.LineString([(-w, -h), (-w, h), (w, h), (w, -h), (-w, -h)])
@property
def x_limits(self):
return (-self._half_width, self._half_width)
@property
def y_limits(self):
return (-self._half_height, self._half_height)
class _CylindricalProjection(_RectangularProjection):
"""
The abstract class which denotes cylindrical projections where we
want to allow x values to wrap around.
"""
def _ellipse_boundary(semimajor=2, semiminor=1, easting=0, northing=0, n=201):
"""
Defines a projection boundary using an ellipse.
This type of boundary is used by several projections.
"""
t = np.linspace(0, 2 * np.pi, n)
coords = np.vstack([semimajor * np.cos(t), semiminor * np.sin(t)])
coords += ([easting], [northing])
return coords[:, ::-1]
class PlateCarree(_CylindricalProjection):
def __init__(self, central_longitude=0.0, globe=None):
proj4_params = [('proj', 'eqc'), ('lon_0', central_longitude)]
if globe is None:
globe = Globe(semimajor_axis=math.degrees(1))
a_rad = math.radians(globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
x_max = a_rad * 180
y_max = a_rad * 90
# Set the threshold around 0.5 if the x max is 180.
self._threshold = x_max / 360.
super(PlateCarree, self).__init__(proj4_params, x_max, y_max,
globe=globe)
@property
def threshold(self):
return self._threshold
def _bbox_and_offset(self, other_plate_carree):
"""
Returns a pair of (xmin, xmax) pairs and an offset which can be used
for identification of whether data in ``other_plate_carree`` needs
to be transformed to wrap appropriately.
>>> import cartopy.crs as ccrs
>>> src = ccrs.PlateCarree(central_longitude=10)
>>> bboxes, offset = ccrs.PlateCarree()._bbox_and_offset(src)
>>> print(bboxes)
[[-180.0, -170.0], [-170.0, 180.0]]
>>> print(offset)
10.0
The returned values are longitudes in ``other_plate_carree``'s
coordinate system.
.. important::
The two CRSs must be identical in every way, other than their
central longitudes. No checking of this is done.
"""
self_lon_0 = self.proj4_params['lon_0']
other_lon_0 = other_plate_carree.proj4_params['lon_0']
lon_0_offset = other_lon_0 - self_lon_0
lon_lower_bound_0 = self.x_limits[0]
lon_lower_bound_1 = (other_plate_carree.x_limits[0] + lon_0_offset)
if lon_lower_bound_1 < self.x_limits[0]:
lon_lower_bound_1 += np.diff(self.x_limits)[0]
lon_lower_bound_0, lon_lower_bound_1 = sorted(
[lon_lower_bound_0, lon_lower_bound_1])
bbox = [[lon_lower_bound_0, lon_lower_bound_1],
[lon_lower_bound_1, lon_lower_bound_0]]
bbox[1][1] += np.diff(self.x_limits)[0]
return bbox, lon_0_offset
def quick_vertices_transform(self, vertices, src_crs):
return_value = super(PlateCarree,
self).quick_vertices_transform(vertices, src_crs)
# Optimise the PlateCarree -> PlateCarree case where no
# wrapping or interpolation needs to take place.
if return_value is None and isinstance(src_crs, PlateCarree):
self_params = self.proj4_params.copy()
src_params = src_crs.proj4_params.copy()
self_params.pop('lon_0'), src_params.pop('lon_0')
xs, ys = vertices[:, 0], vertices[:, 1]
potential = (self_params == src_params and
self.y_limits[0] <= ys.min() and
self.y_limits[1] >= ys.max())
if potential:
mod = np.diff(src_crs.x_limits)[0]
bboxes, proj_offset = self._bbox_and_offset(src_crs)
x_lim = xs.min(), xs.max()
y_lim = ys.min(), ys.max()
for poly in bboxes:
# Arbitrarily choose the number of moduli to look
# above and below the -180->180 range. If data is beyond
# this range, we're not going to transform it quickly.
for i in [-1, 0, 1, 2]:
offset = mod * i - proj_offset
if ((poly[0] + offset) <= x_lim[0] and
(poly[1] + offset) >= x_lim[1]):
return_value = vertices + [[-offset, 0]]
break
if return_value is not None:
break
return return_value
class TransverseMercator(Projection):
"""
A Transverse Mercator projection.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
scale_factor=1.0, globe=None):
"""
Kwargs:
* central_longitude - The true longitude of the central meridian in
degrees. Defaults to 0.
* central_latitude - The true latitude of the planar origin in
degrees. Defaults to 0.
* false_easting - X offset from the planar origin in metres.
Defaults to 0.
* false_northing - Y offset from the planar origin in metres.
Defaults to 0.
* scale_factor - Scale factor at the central meridian. Defaults
to 1.
* globe - An instance of :class:`cartopy.crs.Globe`. If omitted, a
default globe is created.
"""
proj4_params = [('proj', 'tmerc'), ('lon_0', central_longitude),
('lat_0', central_latitude), ('k', scale_factor),
('x_0', false_easting), ('y_0', false_northing),
('units', 'm')]
super(TransverseMercator, self).__init__(proj4_params, globe=globe)
@property
def threshold(self):
return 1e4
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LineString([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def x_limits(self):
return (-2e7, 2e7)
@property
def y_limits(self):
return (-1e7, 1e7)
class OSGB(TransverseMercator):
def __init__(self):
super(OSGB, self).__init__(central_longitude=-2, central_latitude=49,
scale_factor=0.9996012717,
false_easting=400000,
false_northing=-100000,
globe=Globe(datum='OSGB36', ellipse='airy'))
@property
def boundary(self):
w = self.x_limits[1] - self.x_limits[0]
h = self.y_limits[1] - self.y_limits[0]
return sgeom.LineString([(0, 0), (0, h), (w, h), (w, 0), (0, 0)])
@property
def x_limits(self):
return (0, 7e5)
@property
def y_limits(self):
return (0, 13e5)
class OSNI(TransverseMercator):
def __init__(self):
globe = Globe(semimajor_axis=6377340.189,
semiminor_axis=6356034.447938534)
super(OSNI, self).__init__(central_longitude=-8,
central_latitude=53.5,
scale_factor=1.000035,
false_easting=200000,
false_northing=250000,
globe=globe)
@property
def boundary(self):
w = self.x_limits[1] - self.x_limits[0]
h = self.y_limits[1] - self.y_limits[0]
return sgeom.LineString([(0, 0), (0, h), (w, h), (w, 0), (0, 0)])
@property
def x_limits(self):
return (18814.9667, 386062.3293)
@property
def y_limits(self):
return (11764.8481, 464720.9559)
class UTM(Projection):
"""
Universal Transverse Mercator projection.
"""
def __init__(self, zone, southern_hemisphere=False, globe=None):
"""
Kwargs:
* zone - the numeric zone of the UTM required.
* globe - An instance of :class:`cartopy.crs.Globe`. If omitted, a
default globe is created.
* southern_hemisphere - set to True if the zone is in the southern
hemisphere, defaults to False.
"""
proj4_params = [('proj', 'utm'),
('units', 'm'),
('zone', zone)]
if southern_hemisphere:
proj4_params.append(('south', None))
super(UTM, self).__init__(proj4_params, globe=globe)
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LineString([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def threshold(self):
return 1e2
@property
def x_limits(self):
easting = 5e5
# allow 50% overflow
return (0 - easting/2, 2 * easting + easting/2)
@property
def y_limits(self):
northing = 1e7
# allow 50% overflow
return (0 - northing, 2 * northing + northing/2)
class EuroPP(UTM):
"""
UTM Zone 32 projection for EuroPP domain.
Ellipsoid is International 1924, Datum is ED50.
"""
def __init__(self):
globe = Globe(ellipse='intl')
super(EuroPP, self).__init__(32, globe=globe)
@property
def x_limits(self):
return (-1.4e6, 2e6)
@property
def y_limits(self):
return (4e6, 7.9e6)
class Mercator(Projection):
"""
A Mercator projection.
"""
def __init__(self, central_longitude=0.0,
min_latitude=-80.0, max_latitude=84.0,
globe=None):
"""
Kwargs:
* central_longitude - the central longitude. Defaults to 0.
* min_latitude - the maximum southerly extent of the projection.
Defaults to -80 degrees.
* max_latitude - the maximum northerly extent of the projection.
Defaults to 84 degrees.
* globe - A :class:`cartopy.crs.Globe`.
If omitted, a default globe is created.
"""
proj4_params = [('proj', 'merc'),
('lon_0', central_longitude),
('k', 1),
('units', 'm')]
super(Mercator, self).__init__(proj4_params, globe=globe)
# Calculate limits.
limits = self.transform_points(Geodetic(),
np.array([-180,
180]) + central_longitude,
np.array([min_latitude, max_latitude]))
self._xlimits = tuple(limits[..., 0])
self._ylimits = tuple(limits[..., 1])
self._threshold = np.diff(self.x_limits)[0] / 720
def __eq__(self, other):
res = super(Mercator, self).__eq__(other)
if hasattr(other, "_ylimits") and hasattr(other, "_xlimits"):
res = res and self._ylimits == other._ylimits and \
self._xlimits == other._xlimits
return res
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.proj4_init, self._xlimits, self._ylimits))
@property
def threshold(self):
return self._threshold
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LineString([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def x_limits(self):
return self._xlimits
@property
def y_limits(self):
return self._ylimits
# Define a specific instance of a Mercator projection, the Google mercator.
Mercator.GOOGLE = Mercator(min_latitude=-85.0511287798066,
max_latitude=85.0511287798066,
globe=Globe(ellipse=None,
semimajor_axis=WGS84_SEMIMAJOR_AXIS,
semiminor_axis=WGS84_SEMIMAJOR_AXIS,
nadgrids='@null'))
# Deprecated form
GOOGLE_MERCATOR = Mercator.GOOGLE
class LambertCylindrical(_RectangularProjection):
def __init__(self, central_longitude=0.0):
proj4_params = [('proj', 'cea'), ('lon_0', central_longitude)]
globe = Globe(semimajor_axis=math.degrees(1))
super(LambertCylindrical, self).__init__(proj4_params, 180,
math.degrees(1), globe=globe)
@property
def threshold(self):
return 0.5
class LambertConformal(Projection):
"""
A Lambert Conformal conic projection.
"""
def __init__(self, central_longitude=-96.0, central_latitude=39.0,
false_easting=0.0, false_northing=0.0,
secant_latitudes=None, standard_parallels=None,
globe=None, cutoff=-30):
"""
Kwargs:
* central_longitude - The central longitude. Defaults to -96.
* central_latitude - The central latitude. Defaults to 39.
* false_easting - X offset from planar origin in metres.
Defaults to 0.
* false_northing - Y offset from planar origin in metres.
Defaults to 0.
* standard_parallels - Standard parallel latitude(s).
Defaults to (33, 45).
* globe - A :class:`cartopy.crs.Globe`.
If omitted, a default globe is created.
* cutoff - Latitude of map cutoff.
The map extends to infinity opposite the central pole
so we must cut off the map drawing before then.
A value of 0 will draw half the globe. Defaults to -30.
"""
proj4_params = [('proj', 'lcc'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
if secant_latitudes and standard_parallels:
raise TypeError('standard_parallels replaces secant_latitudes.')
elif secant_latitudes is not None:
warnings.warn('secant_latitudes has been deprecated in v0.12. '
'The standard_parallels keyword can be used as a '
'direct replacement.')
standard_parallels = secant_latitudes
elif standard_parallels is None:
# The default. Put this as a keyword arg default once
# secant_latitudes is removed completely.
standard_parallels = (33, 45)
n_parallels = len(standard_parallels)
if not 1 <= n_parallels <= 2:
raise ValueError('1 or 2 standard parallels must be specified. '
'Got {} ({})'.format(n_parallels,
standard_parallels))
proj4_params.append(('lat_1', standard_parallels[0]))
if n_parallels == 2:
proj4_params.append(('lat_2', standard_parallels[1]))
super(LambertConformal, self).__init__(proj4_params, globe=globe)
# Compute whether this projection is at the "north pole" or the
# "south pole" (after the central lon/lat have been taken into
# account).
if n_parallels == 1:
plat = 90 if standard_parallels[0] > 0 else -90
else:
# Which pole are the parallels closest to? That is the direction
# that the cone converges.
if abs(standard_parallels[0]) > abs(standard_parallels[1]):
poliest_sec = standard_parallels[0]
else:
poliest_sec = standard_parallels[1]
plat = 90 if poliest_sec > 0 else -90
self.cutoff = cutoff
n = 91
lons = [0]
lats = [plat]
lons.extend(np.linspace(central_longitude - 180 + 0.001,
central_longitude + 180 - 0.001, n))
lats.extend(np.array([cutoff] * n))
lons.append(0)
lats.append(plat)
points = self.transform_points(PlateCarree(),
np.array(lons), np.array(lats))
if plat == 90:
# Ensure clockwise
points = points[::-1, :]
self._boundary = sgeom.LineString(points)
bounds = self._boundary.bounds
self._x_limits = bounds[0], bounds[2]
self._y_limits = bounds[1], bounds[3]
def __eq__(self, other):
res = super(LambertConformal, self).__eq__(other)
if hasattr(other, "cutoff"):
res = res and self.cutoff == other.cutoff
return res
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.proj4_init, self.cutoff))
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class LambertAzimuthalEqualArea(Projection):
"""
A Lambert Azimuthal Equal-Area projection.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
globe=None):
"""
Kwargs:
* central_longitude - The central longitude. Defaults to 0.
* central_latitude - The central latitude. Defaults to 0.
* false_easting - X offset from planar origin in metres.
Defaults to 0.
* false_northing - Y offset from planar origin in metres.
Defaults to 0.
* globe - A :class:`cartopy.crs.Globe`.
If omitted, a default globe is created.
"""
proj4_params = [('proj', 'laea'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
super(LambertAzimuthalEqualArea, self).__init__(proj4_params,
globe=globe)
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or WGS84_SEMIMINOR_AXIS)
lon, lat = central_longitude + 180, - central_latitude + 0.01
x, max_y = self.transform_point(lon, lat, PlateCarree())
coords = _ellipse_boundary(a * 1.9999, max_y - false_northing,
false_easting, false_northing, 61)
self._boundary = sgeom.polygon.LinearRing(coords.T)
self._x_limits = self._boundary.bounds[::2]
self._y_limits = self._boundary.bounds[1::2]
self._threshold = np.diff(self._x_limits)[0] * 1e-3
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Miller(_RectangularProjection):
def __init__(self, central_longitude=0.0):
proj4_params = [('proj', 'mill'), ('lon_0', central_longitude)]
globe = Globe(semimajor_axis=math.degrees(1))
# XXX How can we derive the vertical limit of 131.98?
super(Miller, self).__init__(proj4_params, 180, 131.98, globe=globe)
@property
def threshold(self):
return 0.5
class RotatedPole(_CylindricalProjection):
"""
Defines a rotated latitude/longitude projected coordinate system
with cylindrical topology and projected distance.
Coordinates are measured in projection metres.
"""
def __init__(self, pole_longitude=0.0, pole_latitude=90.0,
central_rotated_longitude=0.0, globe=None):
"""
Create a RotatedPole CRS.
The class uses proj4 to perform an ob_tran operation, using the
pole_longitude to set a lon_0 then performing two rotations based on
pole_latitude and central_rotated_longitude.
This is equivalent to setting the new pole to a location defined by
the pole_latitude and pole_longitude values in the GeogCRS defined by
globe, then rotating this new CRS about it's pole using the
central_rotated_longitude value.
Args:
* pole_longitude - Pole longitude position, in unrotated degrees.
* pole_latitude - Pole latitude position, in unrotated degrees.
* central_rotated_longitude - Longitude rotation about the new
pole, in degrees.
Kwargs:
* globe - An optional :class:`cartopy.crs.Globe`.
Defaults to a "WGS84" datum.
"""
proj4_params = [('proj', 'ob_tran'), ('o_proj', 'latlon'),
('o_lon_p', central_rotated_longitude),
('o_lat_p', pole_latitude),
('lon_0', 180 + pole_longitude),
('to_meter', math.radians(1))]
super(RotatedPole, self).__init__(proj4_params, 180, 90, globe=globe)
@property
def threshold(self):
return 0.5
class Gnomonic(Projection):
def __init__(self, central_latitude=0.0, globe=None):
proj4_params = [('proj', 'gnom'), ('lat_0', central_latitude)]
super(Gnomonic, self).__init__(proj4_params, globe=globe)
self._max = 5e7
@property
def boundary(self):
return sgeom.Point(0, 0).buffer(self._max).exterior
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return (-self._max, self._max)
@property
def y_limits(self):
return (-self._max, self._max)
class Stereographic(Projection):
def __init__(self, central_latitude=0.0, central_longitude=0.0,
false_easting=0.0, false_northing=0.0,
true_scale_latitude=None, globe=None):
proj4_params = [('proj', 'stere'), ('lat_0', central_latitude),
('lon_0', central_longitude),
('x_0', false_easting), ('y_0', false_northing)]
if true_scale_latitude:
proj4_params.append(('lat_ts', true_scale_latitude))
super(Stereographic, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or WGS84_SEMIMINOR_AXIS)
# Note: The magic number has been picked to maintain consistent
# behaviour with a wgs84 globe. There is no guarantee that the scaling
# should even be linear.
x_axis_offset = 5e7 / WGS84_SEMIMAJOR_AXIS
y_axis_offset = 5e7 / WGS84_SEMIMINOR_AXIS
self._x_limits = (-a * x_axis_offset + false_easting,
a * x_axis_offset + false_easting)
self._y_limits = (-b * y_axis_offset + false_northing,
b * y_axis_offset + false_northing)
if self._x_limits[1] == self._y_limits[1]:
point = sgeom.Point(false_easting, false_northing)
self._boundary = point.buffer(self._x_limits[1]).exterior
else:
coords = _ellipse_boundary(self._x_limits[1], self._y_limits[1],
false_easting, false_northing, 91)
self._boundary = sgeom.LinearRing(coords.T)
self._threshold = np.diff(self._x_limits)[0] * 1e-3
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class NorthPolarStereo(Stereographic):
def __init__(self, central_longitude=0.0, globe=None):
super(NorthPolarStereo, self).__init__(
central_latitude=90,
central_longitude=central_longitude, globe=globe)
class SouthPolarStereo(Stereographic):
def __init__(self, central_longitude=0.0, globe=None):
super(SouthPolarStereo, self).__init__(
central_latitude=-90,
central_longitude=central_longitude, globe=globe)
class Orthographic(Projection):
def __init__(self, central_longitude=0.0, central_latitude=0.0,
globe=None):
proj4_params = [('proj', 'ortho'), ('lon_0', central_longitude),
('lat_0', central_latitude)]
super(Orthographic, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or a)
if b != a:
warnings.warn('The proj4 "ortho" projection does not appear to '
'handle elliptical globes.')
# To stabilise the projection of geometries, we reduce the boundary by
# a tiny fraction at the cost of the extreme edges.
coords = _ellipse_boundary(a * 0.99999, b * 0.99999, n=61)
self._boundary = sgeom.polygon.LinearRing(coords.T)
self._xlim = self._boundary.bounds[::2]
self._ylim = self._boundary.bounds[1::2]
self._threshold = np.diff(self._xlim)[0] * 0.02
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._xlim
@property
def y_limits(self):
return self._ylim
class _WarpedRectangularProjection(Projection):
def __init__(self, proj4_params, central_longitude, globe=None):
super(_WarpedRectangularProjection, self).__init__(proj4_params,
globe=globe)
# Obtain boundary points
points = []
n = 91
geodetic_crs = self.as_geodetic()
for lat in np.linspace(-90, 90, n):
points.append(
self.transform_point(180 + central_longitude,
lat, geodetic_crs)
)
for lat in np.linspace(90, -90, n):
points.append(
self.transform_point(-180 + central_longitude,
lat, geodetic_crs)
)
points.append(
self.transform_point(180 + central_longitude, -90, geodetic_crs))
self._boundary = sgeom.LineString(points[::-1])
x = [p[0] for p in points]
y = [p[1] for p in points]
self._x_limits = min(x), max(x)
self._y_limits = min(y), max(y)
@property
def boundary(self):
return self._boundary
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Mollweide(_WarpedRectangularProjection):
def __init__(self, central_longitude=0, globe=None):
proj4_params = [('proj', 'moll'), ('lon_0', central_longitude)]
super(Mollweide, self).__init__(proj4_params, central_longitude,
globe=globe)
@property
def threshold(self):
return 1e5
class Robinson(_WarpedRectangularProjection):
def __init__(self, central_longitude=0, globe=None):
# Warn when using Robinson with proj4 4.8 due to discontinuity at
# 40 deg N introduced by incomplete fix to issue #113 (see
# https://trac.osgeo.org/proj/ticket/113).
import re
match = re.search(r"\d\.\d", PROJ4_RELEASE)
if match is not None:
proj4_version = float(match.group())
if 4.8 <= proj4_version < 4.9:
warnings.warn('The Robinson projection in the v4.8.x series '
'of Proj.4 contains a discontinuity at '
'40 deg latitude. Use this projection with '
'caution.')
else:
warnings.warn('Cannot determine Proj.4 version. The Robinson '
'projection may be unreliable and should be used '
'with caution.')
proj4_params = [('proj', 'robin'), ('lon_0', central_longitude)]
super(Robinson, self).__init__(proj4_params, central_longitude,
globe=globe)
@property
def threshold(self):
return 1e4
def transform_point(self, x, y, src_crs):
"""
Capture and handle any input NaNs, else invoke parent function,
:meth:`_WarpedRectangularProjection.transform_point`.
Needed because input NaNs can trigger a fatal error in the underlying
implementation of the Robinson projection.
.. note::
Although the original can in fact translate (nan, lat) into
(nan, y-value), this patched version doesn't support that.
"""
if np.isnan(x) or np.isnan(y):
result = (np.nan, np.nan)
else:
result = super(Robinson, self).transform_point(x, y, src_crs)
return result
def transform_points(self, src_crs, x, y, z=None):
"""
Capture and handle NaNs in input points -- else as parent function,
:meth:`_WarpedRectangularProjection.transform_points`.
Needed because input NaNs can trigger a fatal error in the underlying
implementation of the Robinson projection.
.. note::
Although the original can in fact translate (nan, lat) into
(nan, y-value), this patched version doesn't support that.
Instead, we invalidate any of the points that contain a NaN.
"""
input_point_nans = np.isnan(x) | np.isnan(y)
if z is not None:
input_point_nans |= np.isnan(z)
handle_nans = np.any(input_point_nans)
if handle_nans:
# Remove NaN points from input data to avoid the error.
x[input_point_nans] = 0.0
y[input_point_nans] = 0.0
if z is not None:
z[input_point_nans] = 0.0
result = super(Robinson, self).transform_points(src_crs, x, y, z)
if handle_nans:
# Result always has shape (N, 3).
# Blank out each (whole) point where we had a NaN in the input.
result[input_point_nans] = np.nan
return result
class InterruptedGoodeHomolosine(Projection):
def __init__(self, central_longitude=0, globe=None):
proj4_params = [('proj', 'igh'), ('lon_0', central_longitude)]
super(InterruptedGoodeHomolosine, self).__init__(proj4_params,
globe=globe)
# Obtain boundary points
points = []
n = 31
geodetic_crs = self.as_geodetic()
# Right boundary
for lat in np.linspace(-90, 90, n):
points.append(self.transform_point(180 + central_longitude,
lat, geodetic_crs))
# Top boundary
interrupted_lons = (-40.0,)
delta = 0.001
for lon in interrupted_lons:
for lat in np.linspace(90, 0, n):
points.append(self.transform_point(lon + delta +
central_longitude,
lat, geodetic_crs))
for lat in np.linspace(0, 90, n):
points.append(self.transform_point(lon - delta +
central_longitude,
lat, geodetic_crs))
# Left boundary
for lat in np.linspace(90, -90, n):
points.append(self.transform_point(-180 + central_longitude,
lat, geodetic_crs))
# Bottom boundary
interrupted_lons = (-100.0, -20.0, 80.0)
delta = 0.001
for lon in interrupted_lons:
for lat in np.linspace(-90, 0, n):
points.append(self.transform_point(lon - delta +
central_longitude,
lat, geodetic_crs))
for lat in np.linspace(0, -90, n):
points.append(self.transform_point(lon + delta +
central_longitude,
lat, geodetic_crs))
# Close loop
points.append(self.transform_point(180 + central_longitude, -90,
geodetic_crs))
self._boundary = sgeom.LineString(points[::-1])
x = [p[0] for p in points]
y = [p[1] for p in points]
self._x_limits = min(x), max(x)
self._y_limits = min(y), max(y)
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 2e4
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Geostationary(Projection):
def __init__(self, central_longitude=0.0, satellite_height=35785831,
false_easting=0, false_northing=0, globe=None):
proj4_params = [('proj', 'geos'), ('lon_0', central_longitude),
('lat_0', 0), ('h', satellite_height),
('x_0', false_easting), ('y_0', false_northing),
('units', 'm')]
super(Geostationary, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or a)
h = np.float(satellite_height)
max_x = h * math.atan(a / (a + h))
max_y = h * math.atan(b / (b + h))
coords = _ellipse_boundary(max_x, max_y,
false_easting, false_northing, 61)
self._boundary = sgeom.LinearRing(coords.T)
self._xlim = self._boundary.bounds[::2]
self._ylim = self._boundary.bounds[1::2]
self._threshold = np.diff(self._xlim)[0] * 0.02
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._xlim
@property
def y_limits(self):
return self._ylim
class AlbersEqualArea(Projection):
"""
An Albers Equal Area projection
This projection is conic and equal-area, and is commonly used for maps of
the conterminous United States.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
standard_parallels=(20.0, 50.0), globe=None):
"""
Kwargs:
* central_longitude - The central longitude. Defaults to 0.
* central_latitude - The central latitude. Defaults to 0.
* false_easting - X offset from planar origin in metres.
Defaults to 0.
* false_northing - Y offset from planar origin in metres.
Defaults to 0.
* standard_parallels - The one or two latitudes of correct scale.
Defaults to (20, 50).
* globe - A :class:`cartopy.crs.Globe`.
If omitted, a default globe is created.
"""
proj4_params = [('proj', 'aea'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
if standard_parallels is not None:
try:
proj4_params.append(('lat_1', standard_parallels[0]))
try:
proj4_params.append(('lat_2', standard_parallels[1]))
except IndexError:
pass
except TypeError:
proj4_params.append(('lat_1', standard_parallels))
super(AlbersEqualArea, self).__init__(proj4_params, globe=globe)
# bounds
n = 103
lons = np.empty(2 * n + 1)
lats = np.empty(2 * n + 1)
tmp = np.linspace(central_longitude - 180, central_longitude + 180, n)
lons[:n] = tmp
lats[:n] = 90
lons[n:-1] = tmp[::-1]
lats[n:-1] = -90
lons[-1] = lons[0]
lats[-1] = lats[0]
points = self.transform_points(self.as_geodetic(), lons, lats)
self._boundary = sgeom.LineString(points)
bounds = self._boundary.bounds
self._x_limits = bounds[0], bounds[2]
self._y_limits = bounds[1], bounds[3]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class AzimuthalEquidistant(Projection):
"""
An Azimuthal Equidistant projection
This projection provides accurate angles about and distances through the
central position. Other angles, distances, or areas may be distorted.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
globe=None):
"""
Kwargs:
* central_longitude - The true longitude of the central meridian in
degrees. Defaults to 0.
* central_latitude - The true latitude of the planar origin in
degrees. Defaults to 0.
* false_easting - X offset from the planar origin in metres.
Defaults to 0.
* false_northing - Y offset from the planar origin in metres.
Defaults to 0.
* globe - An instance of :class:`cartopy.crs.Globe`. If omitted, a
default globe is created.
"""
proj4_params = [('proj', 'aeqd'), ('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting), ('y_0', false_northing)]
super(AzimuthalEquidistant, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or a)
coords = _ellipse_boundary(a * np.pi, b * np.pi,
false_easting, false_northing, 61)
self._boundary = sgeom.LinearRing(coords.T)
bounds = self._boundary.bounds
self._x_limits = bounds[0], bounds[2]
self._y_limits = bounds[1], bounds[3]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Sinusoidal(Projection):
"""
A Sinusoidal projection.
This projection is equal-area.
"""
def __init__(self, central_longitude=0.0, false_easting=0.0,
false_northing=0.0, globe=None):
"""
Kwargs:
* central_longitude - The central longitude. Defaults to 0.
* false_easting - X offset from planar origin in metres.
Defaults to 0.
* false_northing - Y offset from planar origin in metres.
Defaults to 0.
* globe - A :class:`cartopy.crs.Globe`.
If omitted, a default globe is created.
"""
proj4_params = [('proj', 'sinu'),
('lon_0', central_longitude),
('x_0', false_easting),
('y_0', false_northing)]
super(Sinusoidal, self).__init__(proj4_params, globe=globe)
# Obtain boundary points
points = []
n = 91
geodetic_crs = self.as_geodetic()
for lat in np.linspace(-90, 90, n):
points.append(
self.transform_point(180 + central_longitude,
lat, geodetic_crs)
)
for lat in np.linspace(90, -90, n):
points.append(
self.transform_point(-180 + central_longitude,
lat, geodetic_crs)
)
points.append(
self.transform_point(180 + central_longitude, -90, geodetic_crs))
self._boundary = sgeom.LineString(points[::-1])
minx, miny, maxx, maxy = self._boundary.bounds
self._x_limits = minx, maxx
self._y_limits = miny, maxy
self._threshold = max(np.abs(self.x_limits + self.y_limits)) * 1e-5
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
# MODIS data products use a Sinusoidal projection of a spherical Earth
# http://modis-land.gsfc.nasa.gov/GCTP.html
Sinusoidal.MODIS = Sinusoidal(globe=Globe(ellipse=None,
semimajor_axis=6371007.181,
semiminor_axis=6371007.181))
class _BoundaryPoint(object):
def __init__(self, distance, kind, data):
"""
A representation for a geometric object which is
connected to the boundary.
Parameters
==========
distance - float
The distance along the boundary that this object
can be found.
kind - bool
Whether this object represents a point from the pre-computed
boundary.
data - point or namedtuple
The actual data that this boundary object represents.
"""
self.distance = distance
self.kind = kind
self.data = data
def __repr__(self):
return '_BoundaryPoint(%r, %r, %s)' % (self.distance, self.kind,
self.data)
def _find_first_gt(a, x):
for v in a:
if v.distance > x:
return v
# We've gone all the way around, so pick the first point again.
return a[0]
def epsg(code):
"""
Return the projection which corresponds to the given EPSG code.
The EPSG code must correspond to a "projected coordinate system",
so EPSG codes such as 4326 (WGS-84) which define a "geodetic coordinate
system" will not work.
.. note::
The conversion is performed by querying https://epsg.io/ so a
live internet connection is required.
"""
import cartopy._epsg
return cartopy._epsg._EPSGProjection(code)
| gpl-3.0 |
DangoMelon0701/PyRemote-Sensing | NETCDF scripts/QuikSCAT/wndstrss_read_nc.py | 1 | 3641 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 10 15:23:19 2017
@author: DangoMelon0701
"""
from scipy.io import netcdf
import os
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
#%%
def plot_data(data,cbar=0,save_img=0,name='image'):
norm = mpl.colors.Normalize(vmin=-0.5, vmax=0.5)
cmap = mpl.cm.get_cmap('jet')
plot,axs = plt.subplots()
raw_data = axs.imshow(data,interpolation="gaussian",cmap=cmap,norm=norm)
if cbar == 1:
cbar = plot.colorbar(raw_data)
if save_img == 1:
plt.savefig("{}.png".format(name),dpi=1000,bbox_inches='tight')
#%%
def rmse(predictions, targets):
return np.sqrt(np.nanmean((predictions - targets) ** 2))
#%%
list_files = []
drag_c = 1e-3
air_ro = 1.2
for files in os.listdir(os.getcwd()):
if files.endswith('.nc'):
list_files.append(files)
for nc_files in list_files:
open_file = netcdf.NetCDFFile(nc_files,'r')
#Velocidad zonal
znl_wnd_speed = open_file.variables['zonal_wind_speed']
np_znl_speed = znl_wnd_speed[:]*znl_wnd_speed.scale_factor
np_znl_speed[np.where(np_znl_speed==np_znl_speed.max())]=np.nan
#Velocidad meridional
mrdnl_wnd_speed = open_file.variables['meridional_wind_speed']
np_mrdnl_speed = mrdnl_wnd_speed[:]*mrdnl_wnd_speed.scale_factor
np_mrdnl_speed[np.where(np_mrdnl_speed==np_mrdnl_speed.max())]=np.nan
#Magnitud del vector velocidad
np_wnd_module = np.sqrt(np.power(np_znl_speed,2)+np.power(np_mrdnl_speed,2))
#Calculo de estres
tao_x = air_ro*drag_c*np_znl_speed*np_wnd_module
tao_y = air_ro*drag_c*np_mrdnl_speed*np_wnd_module
#Leo los datos de estres para comparar
znl_wnd_stress = open_file.variables['zonal_wind_stress']
np_znl_stress = znl_wnd_stress[:]*znl_wnd_stress.scale_factor
np_znl_stress[np.where(np_znl_stress==np_znl_stress.max())]=np.nan
np_znl_stress[np.where(np_znl_stress==np.nanmax(np_znl_stress))]=np.nan
np_znl_stress[np.where(np_znl_stress==np.nanmin(np_znl_stress))]=np.nan
mrdnl_wnd_stress = open_file.variables['meridional_wind_stress']
np_mrdnl_stress = mrdnl_wnd_stress[:]*mrdnl_wnd_stress.scale_factor
np_mrdnl_stress[np.where(np_mrdnl_stress==np_mrdnl_stress.max())]=np.nan
np_mrdnl_stress[np.where(np_mrdnl_stress==np.nanmax(np_mrdnl_stress))]=np.nan
np_mrdnl_stress[np.where(np_mrdnl_stress==np.nanmin(np_mrdnl_stress))]=np.nan
open_file.close()
#%%
#Grafica de dispersión para comparar la data
x_limit = 0.45
y_limit = 0.45
fig, axes = plt.subplots(figsize=(7,7))
#linea 1:1
_11line = np.linspace(0,x_limit,2)
axes.plot(_11line,_11line, color ='black',lw=0.6)
#scatter plot
axes.scatter(np_mrdnl_stress,tao_y,edgecolors="black",linewidth=0.1,s=10)
axes.axis([0,x_limit,0,y_limit],fontsize=10)
#nombre de los ejes y grafico
fig.suptitle('QuikSCAT {}'.format(nc_files),fontsize=16)
axes.set_ylabel('Computed Meridional Wind Stress',fontsize=16)
axes.set_xlabel('QuikSCAT Meridional Wind Stress',fontsize=16)
#valores adicionales
rmse_value = rmse(np_mrdnl_stress,tao_y)
axes.text(0.08,0.78,"(RMSE={:.3f})".format(rmse_value),fontsize=13.5,\
transform=axes.transAxes)
axes.grid(linestyle='--')
#guardando la figura
fig.savefig("mrdstrss_scatter_plot.png",dpi=1000,bbox_inches='tight')
#%%
plot_data(tao_x,1,1,'zonal_strees_calculated')
plot_data(np_znl_stress,1,1,'zonal_stress_given')
plot_data(tao_y,1,1,'meridional_strees_calculated')
plot_data(np_mrdnl_stress,1,1,'meridional_stress_given')
#%%
break
| mit |
shakamunyi/tensorflow | tensorflow/contrib/learn/python/learn/estimators/classifier_test.py | 16 | 5175 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.session_bundle import manifest_pb2
def iris_input_fn(num_epochs=None):
iris = tf.contrib.learn.datasets.load_iris()
features = tf.train.limit_epochs(
tf.reshape(tf.constant(iris.data), [-1, 4]), num_epochs=num_epochs)
labels = tf.reshape(tf.constant(iris.target), [-1])
return features, labels
def logistic_model_fn(features, labels, unused_mode):
labels = tf.one_hot(labels, 3, 1, 0)
prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(
features, labels)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def logistic_model_params_fn(features, labels, unused_mode, params):
labels = tf.one_hot(labels, 3, 1, 0)
prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(
features, labels)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
class ClassifierTest(tf.test.TestCase):
def testIrisAll(self):
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
self._runIrisAll(est)
def testIrisAllWithParams(self):
est = tf.contrib.learn.Classifier(model_fn=logistic_model_params_fn,
n_classes=3,
params={'learning_rate': 0.01})
self._runIrisAll(est)
def testIrisInputFn(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
est.fit(input_fn=iris_input_fn, steps=100)
est.evaluate(input_fn=iris_input_fn, steps=1, name='eval')
predict_input_fn = functools.partial(iris_input_fn, num_epochs=1)
predictions = list(est.predict(input_fn=predict_input_fn))
self.assertEqual(len(predictions), iris.target.shape[0])
def _runIrisAll(self, est):
iris = tf.contrib.learn.datasets.load_iris()
est.fit(iris.data, iris.target, steps=100)
scores = est.evaluate(x=iris.data, y=iris.target, name='eval')
predictions = list(est.predict(x=iris.data))
predictions_proba = list(est.predict_proba(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
self.assertAllEqual(predictions, np.argmax(predictions_proba, axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions)
self.assertAllClose(other_score, scores['accuracy'])
def _get_default_signature(self, export_meta_filename):
"""Gets the default signature from the export.meta file."""
with tf.Session():
save = tf.train.import_meta_graph(export_meta_filename)
meta_graph_def = save.export_meta_graph()
collection_def = meta_graph_def.collection_def
signatures_any = collection_def['serving_signatures'].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
default_signature = signatures.default_signature
return default_signature
# Disable this test case until b/31032996 is fixed.
def _testExportMonitorRegressionSignature(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
export_dir = tempfile.mkdtemp() + 'export/'
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=export_dir,
exports_to_keep=1,
signature_fn=tf.contrib.learn.classifier.classification_signature_fn)
est.fit(iris.data, iris.target, steps=2, monitors=[export_monitor])
self.assertTrue(tf.gfile.Exists(export_dir))
self.assertFalse(tf.gfile.Exists(export_dir + '00000000/export'))
self.assertTrue(tf.gfile.Exists(export_dir + '00000002/export'))
# Validate the signature
signature = self._get_default_signature(export_dir + '00000002/export.meta')
self.assertTrue(signature.HasField('classification_signature'))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
jreback/pandas | pandas/tests/resample/test_datetime_index.py | 2 | 59676 | from datetime import datetime
from functools import partial
from io import StringIO
import numpy as np
import pytest
import pytz
from pandas._libs import lib
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Series, Timedelta, Timestamp, isna, notna
import pandas._testing as tm
from pandas.core.groupby.grouper import Grouper
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import Period, period_range
from pandas.core.resample import DatetimeIndex, _get_timestamp_range_edges
import pandas.tseries.offsets as offsets
from pandas.tseries.offsets import Minute
@pytest.fixture()
def _index_factory():
return date_range
@pytest.fixture
def _index_freq():
return "Min"
@pytest.fixture
def _static_values(index):
return np.random.rand(len(index))
def test_custom_grouper(index):
dti = index
s = Series(np.array([1] * len(dti)), index=dti, dtype="int64")
b = Grouper(freq=Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ["add", "mean", "prod", "ohlc", "min", "max", "var"]
for f in funcs:
g._cython_agg_general(f)
b = Grouper(freq=Minute(5), closed="right", label="right")
g = s.groupby(b)
# check all cython functions work
funcs = ["add", "mean", "prod", "ohlc", "min", "max", "var"]
for f in funcs:
g._cython_agg_general(f)
assert g.ngroups == 2593
assert notna(g.mean()).all()
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
idx = DatetimeIndex(idx, freq="5T")
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
tm.assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10), index=dti, dtype="float64")
r = df.groupby(b).agg(np.sum)
assert len(r.columns) == 10
assert len(r.index) == 2593
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
@pytest.mark.parametrize(
"closed, expected",
[
(
"right",
lambda s: Series(
[s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=date_range("1/1/2000", periods=4, freq="5min", name="index"),
),
),
(
"left",
lambda s: Series(
[s[:5].mean(), s[5:10].mean(), s[10:].mean()],
index=date_range(
"1/1/2000 00:05", periods=3, freq="5min", name="index"
),
),
),
],
)
def test_resample_basic(series, closed, expected):
s = series
expected = expected(s)
result = s.resample("5min", closed=closed, label="right").mean()
tm.assert_series_equal(result, expected)
def test_resample_integerarray():
# GH 25580, resample on IntegerArray
ts = Series(
range(9), index=pd.date_range("1/1/2000", periods=9, freq="T"), dtype="Int64"
)
result = ts.resample("3T").sum()
expected = Series(
[3, 12, 21],
index=pd.date_range("1/1/2000", periods=3, freq="3T"),
dtype="Int64",
)
tm.assert_series_equal(result, expected)
result = ts.resample("3T").mean()
expected = Series(
[1, 4, 7],
index=pd.date_range("1/1/2000", periods=3, freq="3T"),
dtype="Float64",
)
tm.assert_series_equal(result, expected)
def test_resample_basic_grouper(series):
s = series
result = s.resample("5Min").last()
grouper = Grouper(freq=Minute(5), closed="left", label="left")
expected = s.groupby(grouper).agg(lambda x: x[-1])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
@pytest.mark.parametrize(
"keyword,value",
[("label", "righttt"), ("closed", "righttt"), ("convention", "starttt")],
)
def test_resample_string_kwargs(series, keyword, value):
# see gh-19303
# Check that wrong keyword argument strings raise an error
msg = f"Unsupported value {value} for `{keyword}`"
with pytest.raises(ValueError, match=msg):
series.resample("5min", **({keyword: value}))
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
def test_resample_how(series, downsample_method):
if downsample_method == "ohlc":
pytest.skip("covered by test_resample_how_ohlc")
s = series
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
expected = s.groupby(grouplist).agg(downsample_method)
expected.index = date_range("1/1/2000", periods=4, freq="5min", name="index")
result = getattr(
s.resample("5min", closed="right", label="right"), downsample_method
)()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"_index_start,_index_end,_index_name",
[("1/1/2000 00:00:00", "1/1/2000 00:13:00", "index")],
)
def test_resample_how_ohlc(series):
s = series
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
expected = DataFrame(
s.groupby(grouplist).agg(_ohlc).values.tolist(),
index=date_range("1/1/2000", periods=4, freq="5min", name="index"),
columns=["open", "high", "low", "close"],
)
result = s.resample("5min", closed="right", label="right").ohlc()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max", "sum", "prod", "mean", "var", "std"])
def test_numpy_compat(func):
# see gh-12811
s = Series([1, 2, 3, 4, 5], index=date_range("20130101", periods=5, freq="s"))
r = s.resample("2s")
msg = "numpy operations are not valid with resample"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, func)(func, 1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, func)(axis=1)
def test_resample_how_callables():
# GH#7929
data = np.arange(5, dtype=np.int64)
ind = date_range(start="2014-01-01", periods=len(data), freq="d")
df = DataFrame({"A": data, "B": data}, index=ind)
def fn(x, a=1):
return str(type(x))
class FnClass:
def __call__(self, x):
return str(type(x))
df_standard = df.resample("M").apply(fn)
df_lambda = df.resample("M").apply(lambda x: str(type(x)))
df_partial = df.resample("M").apply(partial(fn))
df_partial2 = df.resample("M").apply(partial(fn, a=2))
df_class = df.resample("M").apply(FnClass())
tm.assert_frame_equal(df_standard, df_lambda)
tm.assert_frame_equal(df_standard, df_partial)
tm.assert_frame_equal(df_standard, df_partial2)
tm.assert_frame_equal(df_standard, df_class)
def test_resample_rounding():
# GH 8371
# odd results when rounding is needed
data = """date,time,value
11-08-2014,00:00:01.093,1
11-08-2014,00:00:02.159,1
11-08-2014,00:00:02.667,1
11-08-2014,00:00:03.175,1
11-08-2014,00:00:07.058,1
11-08-2014,00:00:07.362,1
11-08-2014,00:00:08.324,1
11-08-2014,00:00:08.830,1
11-08-2014,00:00:08.982,1
11-08-2014,00:00:09.815,1
11-08-2014,00:00:10.540,1
11-08-2014,00:00:11.061,1
11-08-2014,00:00:11.617,1
11-08-2014,00:00:13.607,1
11-08-2014,00:00:14.535,1
11-08-2014,00:00:15.525,1
11-08-2014,00:00:17.960,1
11-08-2014,00:00:20.674,1
11-08-2014,00:00:21.191,1"""
df = pd.read_csv(
StringIO(data),
parse_dates={"timestamp": ["date", "time"]},
index_col="timestamp",
)
df.index.name = None
result = df.resample("6s").sum()
expected = DataFrame(
{"value": [4, 9, 4, 2]}, index=date_range("2014-11-08", freq="6s", periods=4)
)
tm.assert_frame_equal(result, expected)
result = df.resample("7s").sum()
expected = DataFrame(
{"value": [4, 10, 4, 1]}, index=date_range("2014-11-08", freq="7s", periods=4)
)
tm.assert_frame_equal(result, expected)
result = df.resample("11s").sum()
expected = DataFrame(
{"value": [11, 8]}, index=date_range("2014-11-08", freq="11s", periods=2)
)
tm.assert_frame_equal(result, expected)
result = df.resample("13s").sum()
expected = DataFrame(
{"value": [13, 6]}, index=date_range("2014-11-08", freq="13s", periods=2)
)
tm.assert_frame_equal(result, expected)
result = df.resample("17s").sum()
expected = DataFrame(
{"value": [16, 3]}, index=date_range("2014-11-08", freq="17s", periods=2)
)
tm.assert_frame_equal(result, expected)
def test_resample_basic_from_daily():
# from daily
dti = date_range(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D", name="index"
)
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample("w-sun").last()
assert len(result) == 3
assert (result.index.dayofweek == [6, 6, 6]).all()
assert result.iloc[0] == s["1/2/2005"]
assert result.iloc[1] == s["1/9/2005"]
assert result.iloc[2] == s.iloc[-1]
result = s.resample("W-MON").last()
assert len(result) == 2
assert (result.index.dayofweek == [0, 0]).all()
assert result.iloc[0] == s["1/3/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-TUE").last()
assert len(result) == 2
assert (result.index.dayofweek == [1, 1]).all()
assert result.iloc[0] == s["1/4/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-WED").last()
assert len(result) == 2
assert (result.index.dayofweek == [2, 2]).all()
assert result.iloc[0] == s["1/5/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-THU").last()
assert len(result) == 2
assert (result.index.dayofweek == [3, 3]).all()
assert result.iloc[0] == s["1/6/2005"]
assert result.iloc[1] == s["1/10/2005"]
result = s.resample("W-FRI").last()
assert len(result) == 2
assert (result.index.dayofweek == [4, 4]).all()
assert result.iloc[0] == s["1/7/2005"]
assert result.iloc[1] == s["1/10/2005"]
# to biz day
result = s.resample("B").last()
assert len(result) == 7
assert (result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all()
assert result.iloc[0] == s["1/2/2005"]
assert result.iloc[1] == s["1/3/2005"]
assert result.iloc[5] == s["1/9/2005"]
assert result.index.name == "index"
def test_resample_upsampling_picked_but_not_correct():
# Test for issue #3020
dates = date_range("01-Jan-2014", "05-Jan-2014", freq="D")
series = Series(1, index=dates)
result = series.resample("D").mean()
assert result.index[0] == dates[0]
# GH 5955
# incorrect deciding to upsample when the axis frequency matches the
# resample frequency
s = Series(
np.arange(1.0, 6), index=[datetime(1975, 1, i, 12, 0) for i in range(1, 6)]
)
expected = Series(
np.arange(1.0, 6), index=date_range("19750101", periods=5, freq="D")
)
result = s.resample("D").count()
tm.assert_series_equal(result, Series(1, index=expected.index))
result1 = s.resample("D").sum()
result2 = s.resample("D").mean()
tm.assert_series_equal(result1, expected)
tm.assert_series_equal(result2, expected)
def test_resample_frame_basic():
df = tm.makeTimeDataFrame()
b = Grouper(freq="M")
g = df.groupby(b)
# check all cython functions work
funcs = ["add", "mean", "prod", "min", "max", "var"]
for f in funcs:
g._cython_agg_general(f)
result = df.resample("A").mean()
tm.assert_series_equal(result["A"], df["A"].resample("A").mean())
result = df.resample("M").mean()
tm.assert_series_equal(result["A"], df["A"].resample("M").mean())
df.resample("M", kind="period").mean()
df.resample("W-WED", kind="period").mean()
def test_resample_upsample():
# from daily
dti = date_range(
start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D", name="index"
)
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample("Min").pad()
assert len(result) == 12961
assert result[0] == s[0]
assert result[-1] == s[-1]
assert result.index.name == "index"
def test_resample_how_method():
# GH9915
s = Series(
[11, 22],
index=[
Timestamp("2015-03-31 21:48:52.672000"),
Timestamp("2015-03-31 21:49:52.739000"),
],
)
expected = Series(
[11, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 22],
index=DatetimeIndex(
[
Timestamp("2015-03-31 21:48:50"),
Timestamp("2015-03-31 21:49:00"),
Timestamp("2015-03-31 21:49:10"),
Timestamp("2015-03-31 21:49:20"),
Timestamp("2015-03-31 21:49:30"),
Timestamp("2015-03-31 21:49:40"),
Timestamp("2015-03-31 21:49:50"),
],
freq="10s",
),
)
tm.assert_series_equal(s.resample("10S").mean(), expected)
def test_resample_extra_index_point():
# GH#9756
index = date_range(start="20150101", end="20150331", freq="BM")
expected = DataFrame({"A": Series([21, 41, 63], index=index)})
index = date_range(start="20150101", end="20150331", freq="B")
df = DataFrame({"A": Series(range(len(index)), index=index)}, dtype="int64")
result = df.resample("BM").last()
tm.assert_frame_equal(result, expected)
def test_upsample_with_limit():
rng = date_range("1/1/2000", periods=3, freq="5t")
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample("t").ffill(limit=2)
expected = ts.reindex(result.index, method="ffill", limit=2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("freq", ["5D", "10H", "5Min", "10S"])
@pytest.mark.parametrize("rule", ["Y", "3M", "15D", "30H", "15Min", "30S"])
def test_nearest_upsample_with_limit(tz_aware_fixture, freq, rule):
# GH 33939
rng = date_range("1/1/2000", periods=3, freq=freq, tz=tz_aware_fixture)
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample(rule).nearest(limit=2)
expected = ts.reindex(result.index, method="nearest", limit=2)
tm.assert_series_equal(result, expected)
def test_resample_ohlc(series):
s = series
grouper = Grouper(freq=Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample("5Min").ohlc()
assert len(result) == len(expect)
assert len(result.columns) == 4
xs = result.iloc[-2]
assert xs["open"] == s[-6]
assert xs["high"] == s[-6:-1].max()
assert xs["low"] == s[-6:-1].min()
assert xs["close"] == s[-2]
xs = result.iloc[0]
assert xs["open"] == s[0]
assert xs["high"] == s[:5].max()
assert xs["low"] == s[:5].min()
assert xs["close"] == s[4]
def test_resample_ohlc_result():
# GH 12332
index = pd.date_range("1-1-2000", "2-15-2000", freq="h")
index = index.union(pd.date_range("4-15-2000", "5-15-2000", freq="h"))
s = Series(range(len(index)), index=index)
a = s.loc[:"4-15-2000"].resample("30T").ohlc()
assert isinstance(a, DataFrame)
b = s.loc[:"4-14-2000"].resample("30T").ohlc()
assert isinstance(b, DataFrame)
# GH12348
# raising on odd period
rng = date_range("2013-12-30", "2014-01-07")
index = rng.drop(
[
Timestamp("2014-01-01"),
Timestamp("2013-12-31"),
Timestamp("2014-01-04"),
Timestamp("2014-01-05"),
]
)
df = DataFrame(data=np.arange(len(index)), index=index)
result = df.resample("B").mean()
expected = df.reindex(index=date_range(rng[0], rng[-1], freq="B"))
tm.assert_frame_equal(result, expected)
def test_resample_ohlc_dataframe():
df = (
DataFrame(
{
"PRICE": {
Timestamp("2011-01-06 10:59:05", tz=None): 24990,
Timestamp("2011-01-06 12:43:33", tz=None): 25499,
Timestamp("2011-01-06 12:54:09", tz=None): 25499,
},
"VOLUME": {
Timestamp("2011-01-06 10:59:05", tz=None): 1500000000,
Timestamp("2011-01-06 12:43:33", tz=None): 5000000000,
Timestamp("2011-01-06 12:54:09", tz=None): 100000000,
},
}
)
).reindex(["VOLUME", "PRICE"], axis=1)
res = df.resample("H").ohlc()
exp = pd.concat(
[df["VOLUME"].resample("H").ohlc(), df["PRICE"].resample("H").ohlc()],
axis=1,
keys=["VOLUME", "PRICE"],
)
tm.assert_frame_equal(exp, res)
df.columns = [["a", "b"], ["c", "d"]]
res = df.resample("H").ohlc()
exp.columns = pd.MultiIndex.from_tuples(
[
("a", "c", "open"),
("a", "c", "high"),
("a", "c", "low"),
("a", "c", "close"),
("b", "d", "open"),
("b", "d", "high"),
("b", "d", "low"),
("b", "d", "close"),
]
)
tm.assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index():
# GH 4812
# dup columns with resample raising
df = DataFrame(
np.random.randn(4, 12),
index=[2000, 2000, 2000, 2000],
columns=[Period(year=2000, month=i + 1, freq="M") for i in range(12)],
)
df.iloc[3, :] = np.nan
result = df.resample("Q", axis=1).mean()
expected = df.groupby(lambda x: int((x.month - 1) / 3), axis=1).mean()
expected.columns = [Period(year=2000, quarter=i + 1, freq="Q") for i in range(4)]
tm.assert_frame_equal(result, expected)
def test_resample_reresample():
dti = date_range(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq="D")
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample("B", closed="right", label="right").mean()
result = bs.resample("8H").mean()
assert len(result) == 22
assert isinstance(result.index.freq, offsets.DateOffset)
assert result.index.freq == offsets.Hour(8)
def test_resample_timestamp_to_period(simple_date_range_series):
ts = simple_date_range_series("1/1/1990", "1/1/2000")
result = ts.resample("A-DEC", kind="period").mean()
expected = ts.resample("A-DEC").mean()
expected.index = period_range("1990", "2000", freq="a-dec")
tm.assert_series_equal(result, expected)
result = ts.resample("A-JUN", kind="period").mean()
expected = ts.resample("A-JUN").mean()
expected.index = period_range("1990", "2000", freq="a-jun")
tm.assert_series_equal(result, expected)
result = ts.resample("M", kind="period").mean()
expected = ts.resample("M").mean()
expected.index = period_range("1990-01", "2000-01", freq="M")
tm.assert_series_equal(result, expected)
result = ts.resample("M", kind="period").mean()
expected = ts.resample("M").mean()
expected.index = period_range("1990-01", "2000-01", freq="M")
tm.assert_series_equal(result, expected)
def test_ohlc_5min():
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range("1/1/2000 00:00:00", "1/1/2000 5:59:50", freq="10s")
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample("5min", closed="right", label="right").ohlc()
assert (resampled.loc["1/1/2000 00:00"] == ts[0]).all()
exp = _ohlc(ts[1:31])
assert (resampled.loc["1/1/2000 00:05"] == exp).all()
exp = _ohlc(ts["1/1/2000 5:55:01":])
assert (resampled.loc["1/1/2000 6:00:00"] == exp).all()
def test_downsample_non_unique():
rng = date_range("1/1/2000", "2/29/2000")
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample("M").mean()
expected = ts.groupby(lambda x: x.month).mean()
assert len(result) == 2
tm.assert_almost_equal(result[0], expected[1])
tm.assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique():
# GH #1077
rng = date_range("1/1/2000", "2/29/2000")
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
ts.asfreq("B")
def test_resample_axis1():
rng = date_range("1/1/2000", "2/29/2000")
df = DataFrame(np.random.randn(3, len(rng)), columns=rng, index=["a", "b", "c"])
result = df.resample("M", axis=1).mean()
expected = df.T.resample("M").mean().T
tm.assert_frame_equal(result, expected)
def test_resample_anchored_ticks():
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the
# middle of a desired interval
rng = date_range("1/1/2000 04:00:00", periods=86400, freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ["t", "5t", "15t", "30t", "4h", "12h"]
for freq in freqs:
result = ts[2:].resample(freq, closed="left", label="left").mean()
expected = ts.resample(freq, closed="left", label="left").mean()
tm.assert_series_equal(result, expected)
def test_resample_single_group():
mysum = lambda x: x.sum()
rng = date_range("2000-1-1", "2000-2-10", freq="D")
ts = Series(np.random.randn(len(rng)), index=rng)
tm.assert_series_equal(ts.resample("M").sum(), ts.resample("M").apply(mysum))
rng = date_range("2000-1-1", "2000-1-10", freq="D")
ts = Series(np.random.randn(len(rng)), index=rng)
tm.assert_series_equal(ts.resample("M").sum(), ts.resample("M").apply(mysum))
# GH 3849
s = Series(
[30.1, 31.6],
index=[Timestamp("20070915 15:30:00"), Timestamp("20070915 15:40:00")],
)
expected = Series([0.75], index=DatetimeIndex([Timestamp("20070915")], freq="D"))
result = s.resample("D").apply(lambda x: np.std(x))
tm.assert_series_equal(result, expected)
def test_resample_offset():
# GH 31809
rng = date_range("1/1/2000 00:00:00", "1/1/2000 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample("5min", offset="2min").mean()
exp_rng = date_range("12/31/1999 23:57:00", "1/1/2000 01:57", freq="5min")
tm.assert_index_equal(resampled.index, exp_rng)
def test_resample_origin():
# GH 31809
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
exp_rng = date_range("1999-12-31 23:57:00", "2000-01-01 01:57", freq="5min")
resampled = ts.resample("5min", origin="1999-12-31 23:57:00").mean()
tm.assert_index_equal(resampled.index, exp_rng)
offset_timestamp = Timestamp(0) + Timedelta("2min")
resampled = ts.resample("5min", origin=offset_timestamp).mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("5min", origin="epoch", offset="2m").mean()
tm.assert_index_equal(resampled.index, exp_rng)
# origin of '1999-31-12 12:02:00' should be equivalent for this case
resampled = ts.resample("5min", origin="1999-12-31 12:02:00").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("5min", offset="-3m").mean()
tm.assert_index_equal(resampled.index, exp_rng)
@pytest.mark.parametrize(
"origin", ["invalid_value", "epch", "startday", "startt", "2000-30-30", object()]
)
def test_resample_bad_origin(origin):
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
msg = (
"'origin' should be equal to 'epoch', 'start', 'start_day', "
"'end', 'end_day' or should be a Timestamp convertible type. Got "
f"'{origin}' instead."
)
with pytest.raises(ValueError, match=msg):
ts.resample("5min", origin=origin)
@pytest.mark.parametrize("offset", ["invalid_value", "12dayys", "2000-30-30", object()])
def test_resample_bad_offset(offset):
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
msg = f"'offset' should be a Timedelta convertible type. Got '{offset}' instead."
with pytest.raises(ValueError, match=msg):
ts.resample("5min", offset=offset)
def test_resample_origin_prime_freq():
# GH 31809
start, end = "2000-10-01 23:30:00", "2000-10-02 00:30:00"
rng = pd.date_range(start, end, freq="7min")
ts = Series(np.random.randn(len(rng)), index=rng)
exp_rng = date_range("2000-10-01 23:14:00", "2000-10-02 00:22:00", freq="17min")
resampled = ts.resample("17min").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("17min", origin="start_day").mean()
tm.assert_index_equal(resampled.index, exp_rng)
exp_rng = date_range("2000-10-01 23:30:00", "2000-10-02 00:21:00", freq="17min")
resampled = ts.resample("17min", origin="start").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("17min", offset="23h30min").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("17min", origin="start_day", offset="23h30min").mean()
tm.assert_index_equal(resampled.index, exp_rng)
exp_rng = date_range("2000-10-01 23:18:00", "2000-10-02 00:26:00", freq="17min")
resampled = ts.resample("17min", origin="epoch").mean()
tm.assert_index_equal(resampled.index, exp_rng)
exp_rng = date_range("2000-10-01 23:24:00", "2000-10-02 00:15:00", freq="17min")
resampled = ts.resample("17min", origin="2000-01-01").mean()
tm.assert_index_equal(resampled.index, exp_rng)
def test_resample_origin_with_tz():
# GH 31809
msg = "The origin must have the same timezone as the index."
tz = "Europe/Paris"
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s", tz=tz)
ts = Series(np.random.randn(len(rng)), index=rng)
exp_rng = date_range("1999-12-31 23:57:00", "2000-01-01 01:57", freq="5min", tz=tz)
resampled = ts.resample("5min", origin="1999-12-31 23:57:00+00:00").mean()
tm.assert_index_equal(resampled.index, exp_rng)
# origin of '1999-31-12 12:02:00+03:00' should be equivalent for this case
resampled = ts.resample("5min", origin="1999-12-31 12:02:00+03:00").mean()
tm.assert_index_equal(resampled.index, exp_rng)
resampled = ts.resample("5min", origin="epoch", offset="2m").mean()
tm.assert_index_equal(resampled.index, exp_rng)
with pytest.raises(ValueError, match=msg):
ts.resample("5min", origin="12/31/1999 23:57:00").mean()
# if the series is not tz aware, origin should not be tz aware
rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
with pytest.raises(ValueError, match=msg):
ts.resample("5min", origin="12/31/1999 23:57:00+03:00").mean()
def test_resample_origin_epoch_with_tz_day_vs_24h():
# GH 34474
start, end = "2000-10-01 23:30:00+0500", "2000-12-02 00:30:00+0500"
rng = pd.date_range(start, end, freq="7min")
random_values = np.random.randn(len(rng))
ts_1 = Series(random_values, index=rng)
result_1 = ts_1.resample("D", origin="epoch").mean()
result_2 = ts_1.resample("24H", origin="epoch").mean()
tm.assert_series_equal(result_1, result_2)
# check that we have the same behavior with epoch even if we are not timezone aware
ts_no_tz = ts_1.tz_localize(None)
result_3 = ts_no_tz.resample("D", origin="epoch").mean()
result_4 = ts_no_tz.resample("24H", origin="epoch").mean()
tm.assert_series_equal(result_1, result_3.tz_localize(rng.tz), check_freq=False)
tm.assert_series_equal(result_1, result_4.tz_localize(rng.tz), check_freq=False)
# check that we have the similar results with two different timezones (+2H and +5H)
start, end = "2000-10-01 23:30:00+0200", "2000-12-02 00:30:00+0200"
rng = pd.date_range(start, end, freq="7min")
ts_2 = Series(random_values, index=rng)
result_5 = ts_2.resample("D", origin="epoch").mean()
result_6 = ts_2.resample("24H", origin="epoch").mean()
tm.assert_series_equal(result_1.tz_localize(None), result_5.tz_localize(None))
tm.assert_series_equal(result_1.tz_localize(None), result_6.tz_localize(None))
def test_resample_origin_with_day_freq_on_dst():
# GH 31809
tz = "America/Chicago"
def _create_series(values, timestamps, freq="D"):
return Series(
values,
index=DatetimeIndex(
[Timestamp(t, tz=tz) for t in timestamps], freq=freq, ambiguous=True
),
)
# test classical behavior of origin in a DST context
start = Timestamp("2013-11-02", tz=tz)
end = Timestamp("2013-11-03 23:59", tz=tz)
rng = pd.date_range(start, end, freq="1h")
ts = Series(np.ones(len(rng)), index=rng)
expected = _create_series([24.0, 25.0], ["2013-11-02", "2013-11-03"])
for origin in ["epoch", "start", "start_day", start, None]:
result = ts.resample("D", origin=origin).sum()
tm.assert_series_equal(result, expected)
# test complex behavior of origin/offset in a DST context
start = Timestamp("2013-11-03", tz=tz)
end = Timestamp("2013-11-03 23:59", tz=tz)
rng = pd.date_range(start, end, freq="1h")
ts = Series(np.ones(len(rng)), index=rng)
expected_ts = ["2013-11-02 22:00-05:00", "2013-11-03 22:00-06:00"]
expected = _create_series([23.0, 2.0], expected_ts)
result = ts.resample("D", origin="start", offset="-2H").sum()
tm.assert_series_equal(result, expected)
expected_ts = ["2013-11-02 22:00-05:00", "2013-11-03 21:00-06:00"]
expected = _create_series([22.0, 3.0], expected_ts, freq="24H")
result = ts.resample("24H", origin="start", offset="-2H").sum()
tm.assert_series_equal(result, expected)
expected_ts = ["2013-11-02 02:00-05:00", "2013-11-03 02:00-06:00"]
expected = _create_series([3.0, 22.0], expected_ts)
result = ts.resample("D", origin="start", offset="2H").sum()
tm.assert_series_equal(result, expected)
expected_ts = ["2013-11-02 23:00-05:00", "2013-11-03 23:00-06:00"]
expected = _create_series([24.0, 1.0], expected_ts)
result = ts.resample("D", origin="start", offset="-1H").sum()
tm.assert_series_equal(result, expected)
expected_ts = ["2013-11-02 01:00-05:00", "2013-11-03 01:00:00-0500"]
expected = _create_series([1.0, 24.0], expected_ts)
result = ts.resample("D", origin="start", offset="1H").sum()
tm.assert_series_equal(result, expected)
def test_resample_daily_anchored():
rng = date_range("1/1/2000 0:00:00", periods=10000, freq="T")
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample("D", closed="left", label="left").mean()
expected = ts.resample("D", closed="left", label="left").mean()
tm.assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet():
# GH #1259
rng = date_range("1/1/2000", "12/31/2000")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample("M", kind="period").mean()
exp_index = period_range("Jan-2000", "Dec-2000", freq="M")
tm.assert_index_equal(result.index, exp_index)
def test_period_with_agg():
# aggregate a period resampler with a lambda
s2 = Series(
np.random.randint(0, 5, 50),
index=pd.period_range("2012-01-01", freq="H", periods=50),
dtype="float64",
)
expected = s2.to_timestamp().resample("D").mean().to_period()
result = s2.resample("D").agg(lambda x: x.mean())
tm.assert_series_equal(result, expected)
def test_resample_segfault():
# GH 8573
# segfaulting in older versions
all_wins_and_wagers = [
(1, datetime(2013, 10, 1, 16, 20), 1, 0),
(2, datetime(2013, 10, 1, 16, 10), 1, 0),
(2, datetime(2013, 10, 1, 18, 15), 1, 0),
(2, datetime(2013, 10, 1, 16, 10, 31), 1, 0),
]
df = DataFrame.from_records(
all_wins_and_wagers, columns=("ID", "timestamp", "A", "B")
).set_index("timestamp")
result = df.groupby("ID").resample("5min").sum()
expected = df.groupby("ID").apply(lambda x: x.resample("5min").sum())
tm.assert_frame_equal(result, expected)
def test_resample_dtype_preservation():
# GH 12202
# validation tests for dtype preservation
df = DataFrame(
{
"date": pd.date_range(start="2016-01-01", periods=4, freq="W"),
"group": [1, 1, 2, 2],
"val": Series([5, 6, 7, 8], dtype="int32"),
}
).set_index("date")
result = df.resample("1D").ffill()
assert result.val.dtype == np.int32
result = df.groupby("group").resample("1D").ffill()
assert result.val.dtype == np.int32
def test_resample_dtype_coercion():
pytest.importorskip("scipy.interpolate")
# GH 16361
df = {"a": [1, 3, 1, 4]}
df = DataFrame(df, index=pd.date_range("2017-01-01", "2017-01-04"))
expected = df.astype("float64").resample("H").mean()["a"].interpolate("cubic")
result = df.resample("H")["a"].mean().interpolate("cubic")
tm.assert_series_equal(result, expected)
result = df.resample("H").mean()["a"].interpolate("cubic")
tm.assert_series_equal(result, expected)
def test_weekly_resample_buglet():
# #1327
rng = date_range("1/1/2000", freq="B", periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample("W").mean()
expected = ts.resample("W-SUN").mean()
tm.assert_series_equal(resampled, expected)
def test_monthly_resample_error():
# #1451
dates = date_range("4/16/2012 20:00", periods=5000, freq="h")
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample("M")
def test_nanosecond_resample_error():
# GH 12307 - Values falls after last bin when
# Resampling using pd.tseries.offsets.Nano as period
start = 1443707890427
exp_start = 1443707890400
indx = pd.date_range(start=pd.to_datetime(start), periods=10, freq="100n")
ts = Series(range(len(indx)), index=indx)
r = ts.resample(pd.tseries.offsets.Nano(100))
result = r.agg("mean")
exp_indx = pd.date_range(start=pd.to_datetime(exp_start), periods=10, freq="100n")
exp = Series(range(len(exp_indx)), index=exp_indx)
tm.assert_series_equal(result, exp)
def test_resample_anchored_intraday(simple_date_range_series):
# #1471, #1458
rng = date_range("1/1/2012", "4/1/2012", freq="100min")
df = DataFrame(rng.month, index=rng)
result = df.resample("M").mean()
expected = df.resample("M", kind="period").mean().to_timestamp(how="end")
expected.index += Timedelta(1, "ns") - Timedelta(1, "D")
expected.index = expected.index._with_freq("infer")
assert expected.index.freq == "M"
tm.assert_frame_equal(result, expected)
result = df.resample("M", closed="left").mean()
exp = df.shift(1, freq="D").resample("M", kind="period").mean()
exp = exp.to_timestamp(how="end")
exp.index = exp.index + Timedelta(1, "ns") - Timedelta(1, "D")
exp.index = exp.index._with_freq("infer")
assert exp.index.freq == "M"
tm.assert_frame_equal(result, exp)
rng = date_range("1/1/2012", "4/1/2012", freq="100min")
df = DataFrame(rng.month, index=rng)
result = df.resample("Q").mean()
expected = df.resample("Q", kind="period").mean().to_timestamp(how="end")
expected.index += Timedelta(1, "ns") - Timedelta(1, "D")
expected.index._data.freq = "Q"
expected.index._freq = lib.no_default
tm.assert_frame_equal(result, expected)
result = df.resample("Q", closed="left").mean()
expected = df.shift(1, freq="D").resample("Q", kind="period", closed="left").mean()
expected = expected.to_timestamp(how="end")
expected.index += Timedelta(1, "ns") - Timedelta(1, "D")
expected.index._data.freq = "Q"
expected.index._freq = lib.no_default
tm.assert_frame_equal(result, expected)
ts = simple_date_range_series("2012-04-29 23:00", "2012-04-30 5:00", freq="h")
resampled = ts.resample("M").mean()
assert len(resampled) == 1
def test_resample_anchored_monthstart(simple_date_range_series):
ts = simple_date_range_series("1/1/2000", "12/31/2002")
freqs = ["MS", "BMS", "QS-MAR", "AS-DEC", "AS-JUN"]
for freq in freqs:
ts.resample(freq).mean()
def test_resample_anchored_multiday():
# When resampling a range spanning multiple days, ensure that the
# start date gets used to determine the offset. Fixes issue where
# a one day period is not a multiple of the frequency.
#
# See: https://github.com/pandas-dev/pandas/issues/8683
index1 = pd.date_range("2014-10-14 23:06:23.206", periods=3, freq="400L")
index2 = pd.date_range("2014-10-15 23:00:00", periods=2, freq="2200L")
index = index1.union(index2)
s = Series(np.random.randn(5), index=index)
# Ensure left closing works
result = s.resample("2200L").mean()
assert result.index[-1] == Timestamp("2014-10-15 23:00:02.000")
# Ensure right closing works
result = s.resample("2200L", label="right").mean()
assert result.index[-1] == Timestamp("2014-10-15 23:00:04.200")
def test_corner_cases(simple_period_range_series, simple_date_range_series):
# miscellaneous test coverage
rng = date_range("1/1/2000", periods=12, freq="t")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample("5t", closed="right", label="left").mean()
ex_index = date_range("1999-12-31 23:55", periods=4, freq="5t")
tm.assert_index_equal(result.index, ex_index)
len0pts = simple_period_range_series("2007-01", "2010-05", freq="M")[:0]
# it works
result = len0pts.resample("A-DEC").mean()
assert len(result) == 0
# resample to periods
ts = simple_date_range_series("2000-04-28", "2000-04-30 11:00", freq="h")
result = ts.resample("M", kind="period").mean()
assert len(result) == 1
assert result.index[0] == Period("2000-04", freq="M")
def test_anchored_lowercase_buglet():
dates = date_range("4/16/2012 20:00", periods=50000, freq="s")
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample("d").mean()
def test_upsample_apply_functions():
# #1596
rng = pd.date_range("2012-06-12", periods=4, freq="h")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample("20min").aggregate(["mean", "sum"])
assert isinstance(result, DataFrame)
def test_resample_not_monotonic():
rng = pd.date_range("2012-06-12", periods=200, freq="h")
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample("D").sum()
exp = ts.sort_index().resample("D").sum()
tm.assert_series_equal(result, exp)
def test_resample_median_bug_1688():
for dtype in ["int64", "int32", "float64", "float32"]:
df = DataFrame(
[1, 2],
index=[datetime(2012, 1, 1, 0, 0, 0), datetime(2012, 1, 1, 0, 5, 0)],
dtype=dtype,
)
result = df.resample("T").apply(lambda x: x.mean())
exp = df.asfreq("T")
tm.assert_frame_equal(result, exp)
result = df.resample("T").median()
exp = df.asfreq("T")
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(simple_date_range_series):
ts = simple_date_range_series("1/1/2000", "4/1/2000")
result = ts.resample("M").apply(lambda x: x.mean())
exp = ts.resample("M").mean()
tm.assert_series_equal(result, exp)
foo_exp = ts.resample("M").mean()
foo_exp.name = "foo"
bar_exp = ts.resample("M").std()
bar_exp.name = "bar"
result = ts.resample("M").apply([lambda x: x.mean(), lambda x: x.std(ddof=1)])
result.columns = ["foo", "bar"]
tm.assert_series_equal(result["foo"], foo_exp)
tm.assert_series_equal(result["bar"], bar_exp)
# this is a MI Series, so comparing the names of the results
# doesn't make sense
result = ts.resample("M").aggregate(
{"foo": lambda x: x.mean(), "bar": lambda x: x.std(ddof=1)}
)
tm.assert_series_equal(result["foo"], foo_exp, check_names=False)
tm.assert_series_equal(result["bar"], bar_exp, check_names=False)
def test_resample_unequal_times():
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({"close": 1}, index=bad_ind)
# it works!
df.resample("AS").sum()
def test_resample_consistency():
# GH 6418
# resample with bfill / limit / reindex consistency
i30 = pd.date_range("2002-02-02", periods=4, freq="30T")
s = Series(np.arange(4.0), index=i30)
s[2] = np.NaN
# Upsample by factor 3 with reindex() and resample() methods:
i10 = pd.date_range(i30[0], i30[-1], freq="10T")
s10 = s.reindex(index=i10, method="bfill")
s10_2 = s.reindex(index=i10, method="bfill", limit=2)
rl = s.reindex_like(s10, method="bfill", limit=2)
r10_2 = s.resample("10Min").bfill(limit=2)
r10 = s.resample("10Min").bfill()
# s10_2, r10, r10_2, rl should all be equal
tm.assert_series_equal(s10_2, r10)
tm.assert_series_equal(s10_2, r10_2)
tm.assert_series_equal(s10_2, rl)
def test_resample_timegrouper():
# GH 7227
dates1 = [
datetime(2014, 10, 1),
datetime(2014, 9, 3),
datetime(2014, 11, 5),
datetime(2014, 9, 5),
datetime(2014, 10, 8),
datetime(2014, 7, 15),
]
dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:]
dates3 = [pd.NaT] + dates1 + [pd.NaT]
for dates in [dates1, dates2, dates3]:
df = DataFrame({"A": dates, "B": np.arange(len(dates))})
result = df.set_index("A").resample("M").count()
exp_idx = DatetimeIndex(
["2014-07-31", "2014-08-31", "2014-09-30", "2014-10-31", "2014-11-30"],
freq="M",
name="A",
)
expected = DataFrame({"B": [1, 0, 2, 2, 1]}, index=exp_idx)
if df["A"].isna().any():
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(result, expected)
result = df.groupby(Grouper(freq="M", key="A")).count()
tm.assert_frame_equal(result, expected)
df = DataFrame(
{"A": dates, "B": np.arange(len(dates)), "C": np.arange(len(dates))}
)
result = df.set_index("A").resample("M").count()
expected = DataFrame(
{"B": [1, 0, 2, 2, 1], "C": [1, 0, 2, 2, 1]},
index=exp_idx,
columns=["B", "C"],
)
if df["A"].isna().any():
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(result, expected)
result = df.groupby(Grouper(freq="M", key="A")).count()
tm.assert_frame_equal(result, expected)
def test_resample_nunique():
# GH 12352
df = DataFrame(
{
"ID": {
Timestamp("2015-06-05 00:00:00"): "0010100903",
Timestamp("2015-06-08 00:00:00"): "0010150847",
},
"DATE": {
Timestamp("2015-06-05 00:00:00"): "2015-06-05",
Timestamp("2015-06-08 00:00:00"): "2015-06-08",
},
}
)
r = df.resample("D")
g = df.groupby(Grouper(freq="D"))
expected = df.groupby(Grouper(freq="D")).ID.apply(lambda x: x.nunique())
assert expected.name == "ID"
for t in [r, g]:
result = r.ID.nunique()
tm.assert_series_equal(result, expected)
result = df.ID.resample("D").nunique()
tm.assert_series_equal(result, expected)
result = df.ID.groupby(Grouper(freq="D")).nunique()
tm.assert_series_equal(result, expected)
def test_resample_nunique_preserves_column_level_names():
# see gh-23222
df = tm.makeTimeDataFrame(freq="1D").abs()
df.columns = pd.MultiIndex.from_arrays(
[df.columns.tolist()] * 2, names=["lev0", "lev1"]
)
result = df.resample("1h").nunique()
tm.assert_index_equal(df.columns, result.columns)
def test_resample_nunique_with_date_gap():
# GH 13453
index = pd.date_range("1-1-2000", "2-15-2000", freq="h")
index2 = pd.date_range("4-15-2000", "5-15-2000", freq="h")
index3 = index.append(index2)
s = Series(range(len(index3)), index=index3, dtype="int64")
r = s.resample("M")
# Since all elements are unique, these should all be the same
results = [r.count(), r.nunique(), r.agg(Series.nunique), r.agg("nunique")]
tm.assert_series_equal(results[0], results[1])
tm.assert_series_equal(results[0], results[2])
tm.assert_series_equal(results[0], results[3])
@pytest.mark.parametrize("n", [10000, 100000])
@pytest.mark.parametrize("k", [10, 100, 1000])
def test_resample_group_info(n, k):
# GH10914
# use a fixed seed to always have the same uniques
prng = np.random.RandomState(1234)
dr = date_range(start="2015-08-27", periods=n // 10, freq="T")
ts = Series(prng.randint(0, n // k, n).astype("int64"), index=prng.choice(dr, n))
left = ts.resample("30T").nunique()
ix = date_range(start=ts.index.min(), end=ts.index.max(), freq="30T")
vals = ts.values
bins = np.searchsorted(ix.values, ts.index, side="right")
sorter = np.lexsort((vals, bins))
vals, bins = vals[sorter], bins[sorter]
mask = np.r_[True, vals[1:] != vals[:-1]]
mask |= np.r_[True, bins[1:] != bins[:-1]]
arr = np.bincount(bins[mask] - 1, minlength=len(ix)).astype("int64", copy=False)
right = Series(arr, index=ix)
tm.assert_series_equal(left, right)
def test_resample_size():
n = 10000
dr = date_range("2015-09-19", periods=n, freq="T")
ts = Series(np.random.randn(n), index=np.random.choice(dr, n))
left = ts.resample("7T").size()
ix = date_range(start=left.index.min(), end=ts.index.max(), freq="7T")
bins = np.searchsorted(ix.values, ts.index.values, side="right")
val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype("int64", copy=False)
right = Series(val, index=ix)
tm.assert_series_equal(left, right)
def test_resample_across_dst():
# The test resamples a DatetimeIndex with values before and after a
# DST change
# Issue: 14682
# The DatetimeIndex we will start with
# (note that DST happens at 03:00+02:00 -> 02:00+01:00)
# 2016-10-30 02:23:00+02:00, 2016-10-30 02:23:00+01:00
df1 = DataFrame([1477786980, 1477790580], columns=["ts"])
dti1 = DatetimeIndex(
pd.to_datetime(df1.ts, unit="s")
.dt.tz_localize("UTC")
.dt.tz_convert("Europe/Madrid")
)
# The expected DatetimeIndex after resampling.
# 2016-10-30 02:00:00+02:00, 2016-10-30 02:00:00+01:00
df2 = DataFrame([1477785600, 1477789200], columns=["ts"])
dti2 = DatetimeIndex(
pd.to_datetime(df2.ts, unit="s")
.dt.tz_localize("UTC")
.dt.tz_convert("Europe/Madrid"),
freq="H",
)
df = DataFrame([5, 5], index=dti1)
result = df.resample(rule="H").sum()
expected = DataFrame([5, 5], index=dti2)
tm.assert_frame_equal(result, expected)
def test_groupby_with_dst_time_change():
# GH 24972
index = DatetimeIndex(
[1478064900001000000, 1480037118776792000], tz="UTC"
).tz_convert("America/Chicago")
df = DataFrame([1, 2], index=index)
result = df.groupby(Grouper(freq="1d")).last()
expected_index_values = pd.date_range(
"2016-11-02", "2016-11-24", freq="d", tz="America/Chicago"
)
index = DatetimeIndex(expected_index_values)
expected = DataFrame([1.0] + ([np.nan] * 21) + [2.0], index=index)
tm.assert_frame_equal(result, expected)
def test_resample_dst_anchor():
# 5172
dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz="US/Eastern")
df = DataFrame([5], index=dti)
dti = DatetimeIndex(df.index.normalize(), freq="D")
expected = DataFrame([5], index=dti)
tm.assert_frame_equal(df.resample(rule="D").sum(), expected)
df.resample(rule="MS").sum()
tm.assert_frame_equal(
df.resample(rule="MS").sum(),
DataFrame(
[5],
index=DatetimeIndex([datetime(2012, 11, 1)], tz="US/Eastern", freq="MS"),
),
)
dti = date_range("2013-09-30", "2013-11-02", freq="30Min", tz="Europe/Paris")
values = range(dti.size)
df = DataFrame({"a": values, "b": values, "c": values}, index=dti, dtype="int64")
how = {"a": "min", "b": "max", "c": "count"}
tm.assert_frame_equal(
df.resample("W-MON").agg(how)[["a", "b", "c"]],
DataFrame(
{
"a": [0, 48, 384, 720, 1056, 1394],
"b": [47, 383, 719, 1055, 1393, 1586],
"c": [48, 336, 336, 336, 338, 193],
},
index=date_range("9/30/2013", "11/4/2013", freq="W-MON", tz="Europe/Paris"),
),
"W-MON Frequency",
)
tm.assert_frame_equal(
df.resample("2W-MON").agg(how)[["a", "b", "c"]],
DataFrame(
{
"a": [0, 48, 720, 1394],
"b": [47, 719, 1393, 1586],
"c": [48, 672, 674, 193],
},
index=date_range(
"9/30/2013", "11/11/2013", freq="2W-MON", tz="Europe/Paris"
),
),
"2W-MON Frequency",
)
tm.assert_frame_equal(
df.resample("MS").agg(how)[["a", "b", "c"]],
DataFrame(
{"a": [0, 48, 1538], "b": [47, 1537, 1586], "c": [48, 1490, 49]},
index=date_range("9/1/2013", "11/1/2013", freq="MS", tz="Europe/Paris"),
),
"MS Frequency",
)
tm.assert_frame_equal(
df.resample("2MS").agg(how)[["a", "b", "c"]],
DataFrame(
{"a": [0, 1538], "b": [1537, 1586], "c": [1538, 49]},
index=date_range("9/1/2013", "11/1/2013", freq="2MS", tz="Europe/Paris"),
),
"2MS Frequency",
)
df_daily = df["10/26/2013":"10/29/2013"]
tm.assert_frame_equal(
df_daily.resample("D").agg({"a": "min", "b": "max", "c": "count"})[
["a", "b", "c"]
],
DataFrame(
{
"a": [1248, 1296, 1346, 1394],
"b": [1295, 1345, 1393, 1441],
"c": [48, 50, 48, 48],
},
index=date_range("10/26/2013", "10/29/2013", freq="D", tz="Europe/Paris"),
),
"D Frequency",
)
def test_downsample_across_dst():
# GH 8531
tz = pytz.timezone("Europe/Berlin")
dt = datetime(2014, 10, 26)
dates = date_range(tz.localize(dt), periods=4, freq="2H")
result = Series(5, index=dates).resample("H").mean()
expected = Series(
[5.0, np.nan] * 3 + [5.0],
index=date_range(tz.localize(dt), periods=7, freq="H"),
)
tm.assert_series_equal(result, expected)
def test_downsample_across_dst_weekly():
# GH 9119, GH 21459
df = DataFrame(
index=DatetimeIndex(
["2017-03-25", "2017-03-26", "2017-03-27", "2017-03-28", "2017-03-29"],
tz="Europe/Amsterdam",
),
data=[11, 12, 13, 14, 15],
)
result = df.resample("1W").sum()
expected = DataFrame(
[23, 42],
index=DatetimeIndex(
["2017-03-26", "2017-04-02"], tz="Europe/Amsterdam", freq="W"
),
)
tm.assert_frame_equal(result, expected)
idx = pd.date_range("2013-04-01", "2013-05-01", tz="Europe/London", freq="H")
s = Series(index=idx, dtype=np.float64)
result = s.resample("W").mean()
expected = Series(
index=pd.date_range("2013-04-07", freq="W", periods=5, tz="Europe/London"),
dtype=np.float64,
)
tm.assert_series_equal(result, expected)
def test_downsample_dst_at_midnight():
# GH 25758
start = datetime(2018, 11, 3, 12)
end = datetime(2018, 11, 5, 12)
index = pd.date_range(start, end, freq="1H")
index = index.tz_localize("UTC").tz_convert("America/Havana")
data = list(range(len(index)))
dataframe = DataFrame(data, index=index)
result = dataframe.groupby(Grouper(freq="1D")).mean()
dti = date_range("2018-11-03", periods=3).tz_localize(
"America/Havana", ambiguous=True
)
dti = DatetimeIndex(dti, freq="D")
expected = DataFrame([7.5, 28.0, 44.5], index=dti)
tm.assert_frame_equal(result, expected)
def test_resample_with_nat():
# GH 13020
index = DatetimeIndex(
[
pd.NaT,
"1970-01-01 00:00:00",
pd.NaT,
"1970-01-01 00:00:01",
"1970-01-01 00:00:02",
]
)
frame = DataFrame([2, 3, 5, 7, 11], index=index)
index_1s = DatetimeIndex(
["1970-01-01 00:00:00", "1970-01-01 00:00:01", "1970-01-01 00:00:02"]
)
frame_1s = DataFrame([3, 7, 11], index=index_1s)
tm.assert_frame_equal(frame.resample("1s").mean(), frame_1s)
index_2s = DatetimeIndex(["1970-01-01 00:00:00", "1970-01-01 00:00:02"])
frame_2s = DataFrame([5, 11], index=index_2s)
tm.assert_frame_equal(frame.resample("2s").mean(), frame_2s)
index_3s = DatetimeIndex(["1970-01-01 00:00:00"])
frame_3s = DataFrame([7], index=index_3s)
tm.assert_frame_equal(frame.resample("3s").mean(), frame_3s)
tm.assert_frame_equal(frame.resample("60s").mean(), frame_3s)
def test_resample_datetime_values():
# GH 13119
# check that datetime dtype is preserved when NaT values are
# introduced by the resampling
dates = [datetime(2016, 1, 15), datetime(2016, 1, 19)]
df = DataFrame({"timestamp": dates}, index=dates)
exp = Series(
[datetime(2016, 1, 15), pd.NaT, datetime(2016, 1, 19)],
index=date_range("2016-01-15", periods=3, freq="2D"),
name="timestamp",
)
res = df.resample("2D").first()["timestamp"]
tm.assert_series_equal(res, exp)
res = df["timestamp"].resample("2D").first()
tm.assert_series_equal(res, exp)
def test_resample_apply_with_additional_args(series):
# GH 14615
def f(data, add_arg):
return np.mean(data) * add_arg
multiplier = 10
result = series.resample("D").apply(f, multiplier)
expected = series.resample("D").mean().multiply(multiplier)
tm.assert_series_equal(result, expected)
# Testing as kwarg
result = series.resample("D").apply(f, add_arg=multiplier)
expected = series.resample("D").mean().multiply(multiplier)
tm.assert_series_equal(result, expected)
# Testing dataframe
df = DataFrame({"A": 1, "B": 2}, index=pd.date_range("2017", periods=10))
result = df.groupby("A").resample("D").agg(f, multiplier)
expected = df.groupby("A").resample("D").mean().multiply(multiplier)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("k", [1, 2, 3])
@pytest.mark.parametrize(
"n1, freq1, n2, freq2",
[
(30, "S", 0.5, "Min"),
(60, "S", 1, "Min"),
(3600, "S", 1, "H"),
(60, "Min", 1, "H"),
(21600, "S", 0.25, "D"),
(86400, "S", 1, "D"),
(43200, "S", 0.5, "D"),
(1440, "Min", 1, "D"),
(12, "H", 0.5, "D"),
(24, "H", 1, "D"),
],
)
def test_resample_equivalent_offsets(n1, freq1, n2, freq2, k):
# GH 24127
n1_ = n1 * k
n2_ = n2 * k
s = Series(0, index=pd.date_range("19910905 13:00", "19911005 07:00", freq=freq1))
s = s + range(len(s))
result1 = s.resample(str(n1_) + freq1).mean()
result2 = s.resample(str(n2_) + freq2).mean()
tm.assert_series_equal(result1, result2)
@pytest.mark.parametrize(
"first,last,freq,exp_first,exp_last",
[
("19910905", "19920406", "D", "19910905", "19920407"),
("19910905 00:00", "19920406 06:00", "D", "19910905", "19920407"),
("19910905 06:00", "19920406 06:00", "H", "19910905 06:00", "19920406 07:00"),
("19910906", "19920406", "M", "19910831", "19920430"),
("19910831", "19920430", "M", "19910831", "19920531"),
("1991-08", "1992-04", "M", "19910831", "19920531"),
],
)
def test_get_timestamp_range_edges(first, last, freq, exp_first, exp_last):
first = Period(first)
first = first.to_timestamp(first.freq)
last = Period(last)
last = last.to_timestamp(last.freq)
exp_first = Timestamp(exp_first, freq=freq)
exp_last = Timestamp(exp_last, freq=freq)
freq = pd.tseries.frequencies.to_offset(freq)
result = _get_timestamp_range_edges(first, last, freq)
expected = (exp_first, exp_last)
assert result == expected
def test_resample_apply_product():
# GH 5586
index = date_range(start="2012-01-31", freq="M", periods=12)
ts = Series(range(12), index=index)
df = DataFrame({"A": ts, "B": ts + 2})
result = df.resample("Q").apply(np.product)
expected = DataFrame(
np.array([[0, 24], [60, 210], [336, 720], [990, 1716]], dtype=np.int64),
index=DatetimeIndex(
["2012-03-31", "2012-06-30", "2012-09-30", "2012-12-31"], freq="Q-DEC"
),
columns=["A", "B"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"first,last,freq_in,freq_out,exp_last",
[
(
"2020-03-28",
"2020-03-31",
"D",
"24H",
"2020-03-30 01:00",
), # includes transition into DST
(
"2020-03-28",
"2020-10-27",
"D",
"24H",
"2020-10-27 00:00",
), # includes transition into and out of DST
(
"2020-10-25",
"2020-10-27",
"D",
"24H",
"2020-10-26 23:00",
), # includes transition out of DST
(
"2020-03-28",
"2020-03-31",
"24H",
"D",
"2020-03-30 00:00",
), # same as above, but from 24H to D
("2020-03-28", "2020-10-27", "24H", "D", "2020-10-27 00:00"),
("2020-10-25", "2020-10-27", "24H", "D", "2020-10-26 00:00"),
],
)
def test_resample_calendar_day_with_dst(
first: str, last: str, freq_in: str, freq_out: str, exp_last: str
):
# GH 35219
ts = Series(1.0, pd.date_range(first, last, freq=freq_in, tz="Europe/Amsterdam"))
result = ts.resample(freq_out).pad()
expected = Series(
1.0, pd.date_range(first, exp_last, freq=freq_out, tz="Europe/Amsterdam")
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max", "first", "last"])
def test_resample_aggregate_functions_min_count(func):
# GH#37768
index = date_range(start="2020", freq="M", periods=3)
ser = Series([1, np.nan, np.nan], index)
result = getattr(ser.resample("Q"), func)(min_count=2)
expected = Series(
[np.nan],
index=DatetimeIndex(["2020-03-31"], dtype="datetime64[ns]", freq="Q-DEC"),
)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
bolozna/EDENetworks | RawDataWindow.py | 1 | 37921 | """
EDENetworks, a genetic network analyzer
Copyright (C) 2011 Aalto University
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from __future__ import with_statement
# -*- coding: utf-8 -*-
from Tkinter import *
from netpython import *
import tkFileDialog,tkMessageBox
from pylab import *
import pylab
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg,NavigationToolbar2TkAgg
from PIL import Image
from PIL import ImageTk
import os
import shutil,math
import random
from EdenNetWindow import EdenNetWindow
from DataWindow import DataWindow
class RawDataWindow(Toplevel):
def __init__(self,parent,distancematrix,namestring=None,sizeX=800,sizeY=600,matrixtype='distance',distancemeasure='unknown',msdata=None,poplist=None,nodeTypes='individual'):
"""
nodeTypes : individual or population
"""
Toplevel.__init__(self,parent,bg='gray90') # creates the window itself
self.distancematrix=distancematrix
self.parent=parent
self.weighted=True
self.matrixtype=0 # meaning this is a distance matrix instead of a weight matrix (type=1)
self.coords=[]
self.namestring=""
self.msdata=msdata
self.poplist=poplist
self.nodeTypes=nodeTypes
self.distancemeasure=distancemeasure
if namestring:
self.title("Raw data: "+namestring)
self.namestring=namestring
mBar=Frame(self,relief='raised',borderwidth=2,bg='steelblue') # this will be the menu bar
lowerhalf=Frame(self,bg='gray90')
# --- calculate main statistics etc
N=len(self.distancematrix)
[minw,maxw,avgw]=netanalysis.weightStats(self.distancematrix)
self.minw=minw #saved for disabling log binning from menu bars
# --- menu bar ------------------------------
mBar=Frame(self,relief='raised',borderwidth=2,bg='steelblue')
FileBtn=self.makeFileMenu(mBar,self.distancematrix,namestring,self.parent)
AnalyzeBtn=self.makeAnalyzeMenu(mBar,self.distancematrix,namestring,self.parent,self.weighted)
DeriveBtn=self.makeDeriveMenu(mBar,self.distancematrix,namestring,self.parent,self.weighted)
if self.nodeTypes=="population":
PopBtn=self.makePopulationMenu(mBar)
if self.nodeTypes=="population":
mBar.tk_menuBar=(FileBtn,DeriveBtn,AnalyzeBtn,PopBtn)
else:
mBar.tk_menuBar=(FileBtn,DeriveBtn,AnalyzeBtn)
# --- lower left: plot panel ---------------
# generate plot frame
plotframe=Frame(lowerhalf,relief='raised',borderwidth=2,bg='gray80')
# first generate plot
temp=netanalysis.weight_distribution(self.distancematrix,'linbin',14)
datavector=[[temp[0],temp[1]]]
width=0.75*(temp[0][-1]-temp[0][0])/float(len(temp[0]))
myplot=visuals.ReturnPlotObject(datavector,'bar',"sample distances","d","P(d)",figsize=(3,2),fontsize=9,addstr=',width='+str(width)+',color=\"#9e0b0f\"')
myplot.canvas=FigureCanvasTkAgg(myplot.thisFigure,master=plotframe)
myplot.canvas.show()
myplot.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=YES, padx=10,pady=10)
plotframe.grid(row=0,column=0,sticky=NW)
# ----- lower right: info panel -------
infoframe=Frame(lowerhalf,relief='raised',borderwidth=2,bg='gray80')
lwidth=20 # sets width for the following labels
rowcounter=0
Label(infoframe,
text="Data:",
relief='flat',
bg='DarkOliveGreen2',
justify=RIGHT,width=lwidth).grid(row=rowcounter)
Label(infoframe,
text="%s" % namestring,
relief='flat',
bg='DarkOliveGreen3',
width=lwidth).grid(row=rowcounter,column=1)
rowcounter=rowcounter+1
Label(infoframe,text="Size:",relief='flat',bg='gray70',width=lwidth).grid(row=rowcounter)
sizeunit="samples"
if self.nodeTypes=="population":
sizeunit="populations"
Label(infoframe,text="N=%d " % N + sizeunit,relief='flat',bg='gray60',width=lwidth).grid(row=rowcounter,column=1)
rowcounter=rowcounter+1
if hasattr(self.distancematrix,'N_origsamples'):
Label(infoframe,text="Based on:",relief='flat',bg='gray70',width=lwidth).grid(row=rowcounter)
Label(infoframe,text="Ns=%d samples" % self.distancematrix.N_origsamples,relief='flat',bg='gray60',width=lwidth).grid(row=rowcounter,column=1)
rowcounter=rowcounter+1
if not(self.nodeTypes=="population"):
# label informing about how clones have been handled
Label(infoframe,text="Clones:",relief='flat',bg='gray70',width=lwidth).grid(row=rowcounter)
if hasattr(self.distancematrix,'clones'):
if self.distancematrix.clones=='collapsed':
if hasattr(self.distancematrix,'Nclones'):
clonetext='removed %d samples' % self.distancematrix.Nclones
else:
clonetext='removed'
elif self.distancematrix.clones=='included':
clonetext='kept'
else:
clonetext='unknown'
else:
clonetext='unknown'
Label(infoframe,text="%s" % clonetext,relief='flat',bg='gray60',width=lwidth).grid(row=rowcounter,column=1)
rowcounter=rowcounter+1
# label about distance measure
if hasattr(self.distancematrix,'distancemeasure'):
distancetext=self.distancematrix.distancemeasure
else:
distancetext='unknown'
Label(infoframe,text="Distance measure:",relief='flat',bg='gray65',width=lwidth).grid(row=rowcounter)
Label(infoframe,text="%s" % distancetext,relief='flat',bg='gray55',width=lwidth).grid(row=rowcounter,column=1)
rowcounter=rowcounter+1
Label(infoframe,text="Average distance:",relief='flat',bg='gray65',width=lwidth).grid(row=rowcounter)
Label(infoframe,text="<d>=%3.2f" % avgw,relief='flat',bg='gray55',width=lwidth).grid(row=rowcounter,column=1)
rowcounter=rowcounter+1
Label(infoframe,text="Min distance:",relief='flat',bg='gray65',width=lwidth).grid(row=rowcounter)
Label(infoframe,text="dmin=%3.2f" % minw,relief='flat',bg='gray55',width=lwidth).grid(row=rowcounter,column=1)
rowcounter=rowcounter+1
Label(infoframe,text="Max distance:",relief='flat',bg='gray65',width=lwidth).grid(row=rowcounter)
Label(infoframe,text="dmax=%3.2f" % maxw,relief='flat',bg='gray55',width=lwidth).grid(row=rowcounter,column=1)
rowcounter=rowcounter+1
if hasattr(self.distancematrix,'nodeProperty'):
Label(infoframe,text="Imported attributes", relief='flat',bg='DarkOliveGreen2',width=lwidth).grid(row=rowcounter)
Label(infoframe,text="type",relief='flat',bg='DarkOliveGreen3',width=lwidth).grid(row=rowcounter,column=1)
rowcounter=rowcounter+1
propertydict=netext.getPropertyTypes(self.distancematrix)
for prop in propertydict.keys():
Label(infoframe,text=prop,relief='flat',bg='gray70',width=lwidth).grid(row=rowcounter)
Label(infoframe,text=propertydict[prop],relief='flat',bg='gray65',width=lwidth).grid(row=rowcounter,column=1)
rowcounter=rowcounter+1
# -- SHOW IT ALL ---
mBar.pack(side=TOP,fill=X,expand=YES,anchor=NW)
infoframe.grid(row=0,column=1,sticky=NW)
lowerhalf.pack(side=TOP,fill=BOTH,expand=YES)
# ------- generators for menus ----------
def makeFileMenu(self,mBar,network,namestring,root):
FileBtn=Menubutton(mBar,text="File",underline=0,bg='steelblue')
FileBtn.pack(side=LEFT,padx="2m")
FileBtn.menu=Menu(FileBtn)
FileBtn.menu.save=Menu(FileBtn.menu)
FileBtn.menu.save.add_command(label="Square matrix",underline=0,command=lambda s=self,type="square":s.save_network(type))
FileBtn.menu.save.add_command(label="Upper triangular",underline=0,command=lambda s=self,type="upperdiag":s.save_network(type))
FileBtn.menu.save.add_command(label="Strictly upper triangluar",underline=0,command=lambda s=self,type="supperdiag":s.save_network(type))
FileBtn.menu.save.add_command(label="Lower triangular",underline=0,command=lambda s=self,type="lowerdiag":s.save_network(type))
FileBtn.menu.save.add_command(label="Strictly lower triangluar",underline=0,command=lambda s=self,type="slowerdiag":s.save_network(type))
FileBtn.menu.add_cascade(label='Save distance matrix',menu=FileBtn.menu.save)
FileBtn.menu.add('separator')
FileBtn.menu.add_command(label="Close",underline=0,command=self.exit_network)
FileBtn['menu']=FileBtn.menu
return FileBtn
def makeDeriveMenu(self,mBar,network,namestring,root,weighted=FALSE):
DeriveBtn=Menubutton(mBar,text="Derive",underline=0,bg='steelblue')
DeriveBtn.pack(side=LEFT,padx="2m")
DeriveBtn.menu=Menu(DeriveBtn)
DeriveBtn.menu.add_command(label='Minimum spanning tree',command=lambda s=self,net=self.distancematrix,n=namestring,r=root: s.generate_mst(net,n,r,False))
DeriveBtn.menu.add_command(label='Manual thresholding',command=lambda s=self,n=namestring,r=root: s.percolation(r,namestring=n,method='weight',reverse=False))
DeriveBtn.menu.add_command(label='Automatic thresholding',command=self.autothreshold)
DeriveBtn["menu"]=DeriveBtn.menu
def makeAnalyzeMenu(self,mBar,network,namestring,root,weighted=FALSE):
AnalyzeBtn=Menubutton(mBar,text="Analyze",underline=0,bg='steelblue')
AnalyzeBtn.pack(side=LEFT,padx="2m")
AnalyzeBtn.menu=Menu(AnalyzeBtn)
AnalyzeBtn.menu.weights=Menu(AnalyzeBtn.menu)
AnalyzeBtn.menu.weights.add_command(label='Linear bins',command=lambda s=self,net=self.distancematrix,n=namestring,r=root: s.weights_bin(net,n,r,'lin'))
AnalyzeBtn.menu.weights.add_command(label='Cumulative',command=lambda s=self,net=self.distancematrix,n=namestring,r=root: s.weights_cumulative(net,n,r))
AnalyzeBtn.menu.weights.add_command(label='Logarithmic bins',command=lambda s=self,net=self.distancematrix,n=namestring,r=root: s.weights_bin(net,n,r,'log'))
#Disable logarithmic bins if data has zero distances
if self.minw==0:
AnalyzeBtn.menu.weights.entryconfig(3, state=DISABLED)
AnalyzeBtn.menu.add_cascade(label='Distance distribution',menu=AnalyzeBtn.menu.weights)
AnalyzeBtn['menu']=AnalyzeBtn.menu
return AnalyzeBtn
def makePopulationMenu(self,mBar):
PopBtn=Menubutton(mBar,text="Randomizations",underline=0,bg='steelblue')
PopBtn.pack(side=LEFT,padx="2m")
PopBtn.menu=Menu(PopBtn)
PopBtn.menu.stats=Menu(PopBtn.menu)
PopBtn.menu.single=Menu(PopBtn.menu)
PopBtn.menu.single.add_command(label='Bootstrapping',command=lambda: self.singleBootstrapping())
PopBtn.menu.stats.add_command(label='Betweenness: Bootstrapping',command=lambda: self.bootstrapping())
PopBtn.menu.single.add_command(label='Shuffle samples',command=lambda: self.singleShuffledNodes())
PopBtn.menu.stats.add_command(label='Clustering: Shuffle samples',command=lambda: self.statsShuffled("nodes"))
PopBtn.menu.single.add_command(label='Shuffle alleles',command=lambda: self.singleShuffledAlleles())
PopBtn.menu.stats.add_command(label='Clustering: Shuffle alleles',command=lambda: self.statsShuffled("alleles"))
PopBtn.menu.add_cascade(label="Single realizations",menu=PopBtn.menu.single)
PopBtn.menu.add_cascade(label="Statistics",menu=PopBtn.menu.stats)
PopBtn['menu']=PopBtn.menu
return PopBtn
# DEFINE ANALYSIS COMMANDS
def bootstrapping(self):
bsdialog=dialogues.BootstrapPopulationsDialog(self)
bsP,rounds=bsdialog.result
bsP,rounds=float(bsP),int(rounds)
bsWaiter=dialogues.WaitWindow(self,title="Processsing...",titlemsg="Calculating statistics...",hasProgressbar=True)
bsFunction=lambda:bootstrap(self.msdata,self.poplist,self.distancematrix,bsP,rounds,bsWaiter.progressbar.set)
bsWaiter.go(bsFunction)
nodeBc,nodeBcRank,nodeDegree,nodeDegreeRank=bsWaiter.result
BootstrapResultsWindow(self.parent,nodeBc,nodeBcRank,"betweenness centrality","BC",namestring=self.namestring,bsP=bsP)
def singleBootstrapping(self):
bsdialog=dialogues.SliderDialog2(self,0.0,1.0,0.01,0.5,bodyText="Percentage of nodes in each location?")
bsP=bsdialog.result
bsNet,bsMsdata,bsPoplist=bootstrap_single(self.msdata,self.poplist,self.distancematrix,bsP)
bsNet.matrixtype=0
RawDataWindow(self.parent,bsNet,namestring=self.namestring+"_bootstrap_P="+str(bsP),sizeX=800,sizeY=600,matrixtype=self.matrixtype,distancemeasure=self.distancemeasure,msdata=bsMsdata,poplist=bsPoplist,nodeTypes=self.nodeTypes)
def autothreshold(self):
newnet,threshold=autoThreshold(self.distancematrix,outputThreshold=True)
netext.copyNodeProperties(self.distancematrix,newnet)
titlestring=self.namestring+" thresholded at %2.2f" % threshold
self.makeMstAndCoords()
EdenNetWindow(self.parent,newnet,namestring=titlestring,nettype=self.matrixtype,parentnet=self.distancematrix,coords=self.coords,mstnet=self.mstnet)
def singleShuffledNodes(self):
newmsdata=self.msdata.copy() #make a copy of the data
newmsdata.shuffleNodes()
goldstein_list,unique_poplist=eden.getGoldsteinLists(self.poplist)
newdm=newmsdata.getGroupwiseDistanceMatrix(goldstein_list,self.distancematrix.distancemeasure,groupNames=unique_poplist)
newdm.distancemeasure=self.distancematrix.distancemeasure
newdm.matrixtype=0
RawDataWindow(self.parent,newdm,namestring=self.namestring+"_shuffledSamples",sizeX=800,sizeY=600,matrixtype=self.matrixtype,distancemeasure=self.distancemeasure,msdata=newmsdata,poplist=self.poplist,nodeTypes=self.nodeTypes)
def singleShuffledAlleles(self):
newmsdata=self.msdata.copy() #make a copy of the data
newmsdata.randomize()
goldstein_list,unique_poplist=eden.getGoldsteinLists(self.poplist)
newdm=newmsdata.getGroupwiseDistanceMatrix(goldstein_list,self.distancematrix.distancemeasure,groupNames=unique_poplist)
newdm.distancemeasure=self.distancematrix.distancemeasure
newdm.matrixtype=0
RawDataWindow(self.parent,newdm,namestring=self.namestring+"_shuffledAlleles",sizeX=800,sizeY=600,matrixtype=self.matrixtype,distancemeasure=self.distancemeasure,msdata=newmsdata,poplist=self.poplist,nodeTypes=self.nodeTypes)
def statsShuffled(self,shuffling):
#first autothreshold the original network
thNet,autoTh=autoThreshold(self.distancematrix,outputThreshold=True)
repDialog=dialogues.AskNumberDialog(self,title="Provide a treshold leveel",bodyText="Threshold distance:",initNumber=autoTh)
try:
th=float(repDialog.result)
except Exception:
tkMessageBox.showerror(
"Error",
"Please provide a number."
)
return
thNet=transforms.threshold_by_value(self.distancematrix,th,accept="<=",keepIsolatedNodes=True)
thEdges=len(thNet.edges)
oClustering=netanalysis.globalClustering(thNet)
#ask how many repetitions
repDialog=dialogues.AskNumberDialog(self,title="Provide number of repetitions",bodyText="Number of repetitions:")
try:
reps=int(repDialog.result)
except Exception:
tkMessageBox.showerror(
"Error",
"Please provide a number."
)
return
#show progressbar
waiter=dialogues.WaitWindow(self,title="Processing...",titlemsg="Calculating statistics...",hasProgressbar=True)
#calculate stats
clustering=[]
for round in range(reps):
newmsdata=self.msdata.copy() #make a copy of the data
if shuffling=="nodes":
newmsdata.shuffleNodes()
elif shuffling=="alleles":
newmsdata.randomize()
else:
raise Exception("No such shuffling method.")
goldstein_list,unique_poplist=eden.getGoldsteinLists(self.poplist)
newdm=newmsdata.getGroupwiseDistanceMatrix(goldstein_list,self.distancematrix.distancemeasure,groupNames=unique_poplist)
newdm.distancemeasure=self.distancematrix.distancemeasure
newEdges=list(newdm.edges)
newEdges.sort(key=lambda x:x[2])
newThNet=pynet.SymmNet()
for i in range(thEdges):
edge=newEdges[i]
newThNet[edge[0],edge[1]]=edge[2]
clustering.append(netanalysis.globalClustering(newThNet))
waiter.progressbar.set(float(round)/float(reps))
#terminate the progressbar window
waiter.ok()
p=float(len(filter(lambda x:x>=oClustering,clustering)))/float(len(clustering))
try: #for numpy <1.3 ?
temp=pylab.histogram(clustering,bins=min(reps,30),new=True)
except TypeError:
temp=pylab.histogram(clustering,bins=min(reps,30))
temp=list(temp)
temp[1]=temp[1][:len(temp[1])-1]
width=temp[1][1]-temp[1][0]
t=DataWindow(self,[[temp[1],temp[0]]],self.namestring+"_shuffle_"+shuffling+"_"+str(reps),'bar','Clustering (original=%.2f,p=%g)' %(oClustering,p),'<c>','P(<c>)',addstring=",width="+str(width))
def weights_cumulative(self,network,namestring,parent,maxPoints=1000):
nDists=len(network.weights)
dists=pylab.zeros(nDists)
for i,w in enumerate(network.weights):
dists[i]=w
dists.sort()
x=pylab.array(pylab.linspace(0,nDists,min(nDists,maxPoints)),dtype='uint')
x[-1]=x[-1]-1
dists=dists[x]
x=pylab.array(x,dtype='float')/nDists
DataWindow(self,[[dists,x]],namestring,'plot','Cumulative distance distribution','d','P(<d)')
def weights_bin(self,network,namestring,parent,linorlog='log'):
'''Calculates and plots weight distribution'''
choices=dialogues.AskNumberOfBins(parent,'Distance distribution: options')
if choices.result!=None:
if linorlog=='lin':
plotstyle='bar'
temp=netanalysis.weight_distribution(network,'linbin',choices.result)
width=0.75*(temp[0][-1]-temp[0][0])/float(len(temp[0]))
addstring=',width='+str(width)+',color=\"#9e0b0f\"'
else:
plotstyle='loglog'
temp=netanalysis.weight_distribution(network,'logbin',choices.result)
addstring=''
t=DataWindow(parent,[[temp[0],temp[1]]],namestring,plotstyle,'Distance distribution','d','P(d)',addstring=addstring)
def percolation(self,parent,namestring='',method='fraction',reverse=False):
edges=list(self.distancematrix.edges)
random.shuffle(edges)
Nedges=len(edges)
Nnodes=len(self.distancematrix)
edges.sort(lambda x,y:cmp(x[2],y[2]),reverse=reverse)
ee=percolator.EvaluationList(edges)
if method=='weight':
ee.setStrengthEvaluations()
else:
ee.setLinearEvaluations(0,len(edges),100)
data=[]
thresholds=[]
threshfract=[]
gcs=[]
clusterings=[]
susc=[]
suscmax=0.0
suscmax_thresh=0.0
suscmax_fract=0.0
counter=0
gcctitle=''
for cs in percolator.Percolator(ee):
thresholds.append(cs.threshold)
threshfract.append(cs.addedEdges/float(Nedges))
gcs.append(cs.getGiantSize())
s=cs.getSusceptibility(Nnodes)
if (s>suscmax):
suscmax=s
suscmax_thresh=cs.threshold
suscmax_fract=cs.addedEdges/float(Nedges)
susc.append(s)
if method=='weight':
data.append([thresholds,gcs])
data.append([thresholds,susc])
tstring='w'
susctitle='S peaks at f=%2.2f' % float(suscmax_thresh)
if not(reverse):
gcctitle='Links with w<threshold added'
else:
gcctitle='Links with w>threshold added'
else:
data.append([threshfract,gcs])
data.append([threshfract,susc])
tstring='%'
susctitle='S peaks at f=%2.2f, w=%2.2f' % (float(suscmax_fract),float(suscmax_thresh))
if not(reverse):
gcctitle='% weakest links added'
else:
gcctitle='% strongest links added'
if len(self.coords)==0:
mstnet=transforms.mst_kruskal(self.distancematrix,randomize=TRUE,maximum=FALSE)
mstnet.matrixtype=0
h=visuals.Himmeli(mstnet,treeMode=True)
c=h.getCoordinates()
self.coords=c
self.mstnet=mstnet
#Use autoThreshold results instead of max susc values
newnet,suscmax_thresh=autoThreshold(self.distancematrix,outputThreshold=True)
#To avoid rounding errors, not a very nice thing to do :(
suscmax_thresh+=suscmax_thresh*0.00000001
temp=dialogues.PercolationDialog(parent,title="Choose threshold distance:",titlemsg="Set threshold distance",pdata=data,suscmax_thresh=suscmax_thresh)
if temp.result!=None:
threshold=float(temp.result)
newnet=transforms.threshold_by_value(self.distancematrix,threshold,accept="<=",keepIsolatedNodes=True)
newnet.matrixtype=0
titlestring=self.namestring+" thresholded at %2.2f" % threshold
if len(newnet.edges)==0:
tkMessageBox.showerror("Error while thresholding","Threshodling lead to an empty network.\nIncrease the threshold level.")
else:
t=EdenNetWindow(parent,newnet,titlestring,coords=self.coords,nettype=self.matrixtype,mstnet=self.mstnet,parentnet=self.distancematrix)
def makeMstAndCoords(self):
if len(self.coords)==0:
mstnet=transforms.mst_kruskal(self.distancematrix,randomize=True,maximum=False)
mstnet.matrixtype=0
h=visuals.Himmeli(mstnet,treeMode=True)
c=h.getCoordinates()
self.coords=c
self.mstnet=mstnet
def generate_mst(self,network,namestring,parent,max_spanningtree):
if max_spanningtree:
titlestring='Max_spanning_tree_%s' % namestring
else:
titlestring='Min_spanning_tree_%s' % namestring
newnet=transforms.mst_kruskal(network,randomize=True,maximum=max_spanningtree)
newnet.matrixtype=0
h=visuals.Himmeli(newnet,treeMode=True)
c=h.getCoordinates()
filename=h.netName+"_0001.eps"
if os.path.isfile(filename):
os.remove(filename)
t=EdenNetWindow(parent,newnet,titlestring,nettype=self.matrixtype,parentnet=network,coords=c,mstnet=newnet)
def threshold_manual(self,network,namestring,parent,mode):
th=dialogues.AskThreshold(parent,'Choose threshold:')
threshold=th.result
if mode:
titlestring='%s_edges_above_%3.2f' % (namestring,threshold)
else:
titlestring='%s_edges_below_%3.2f' % (namestring,threshold)
newnet=transforms.threshold_by_value(network,threshold,mode)
newnet.matrixtype=0
if len(self.coords)==0:
# first time thresholding; generate MST visualization coords
if self.matrixtype==1:
maximum=TRUE
else:
maximum=FALSE
mstnet=transforms.mst_kruskal(network,randomize=TRUE,maximum=maximum)
h=visuals.Himmeli(mstnet)
c=h.getCoordinates()
self.coords=c
self.mstnet=mstnet
t=NetWindow(parent,newnet,titlestring,coords=self.coords,nettype=self.matrixtype,mstnet=self.mstnet)
def dist_to_weight(self,network,namestring,parent):
matrixfilename='%s_DtoW' % namestring
m=transforms.dist_to_weights(network)
t=MatrixWindow(parent,m,matrixfilename,matrixtype=1)
def open_network(self):
pass
def load_aux_data(self):
aux_filename=tkFileDialog.askopenfilename(filetypes=[("Text files",".txt"),
("Ascii files",".asc")],
title="Choose auxiliary data file")
if len(aux_filename)==0:
return
else:
try:
netio.loadNodeProperties(self.distancematrix,aux_filename)
except Exception,e:
tkMessageBox.showerror(
"Error importing auxiliary data:",
"File in wrong format:\n %s" % str(e)
)
#call for method updating the property labels in the window
def save_network(self,type="square"):
network=self.distancematrix
netfilename=tkFileDialog.asksaveasfilename(title="Select file for the distance matrix")
if len(netfilename)==0:
return
netfile=open(netfilename,"w")
nodes=netio.writeNet_mat(network,netfile,type=type)
nodefilename=tkFileDialog.asksaveasfilename(title="Select file for node names")
if len(nodefilename)>0:
nodefile=open(nodefilename,"w")
for node in nodes:
nodefile.write(str(node)+"\n")
tkMessageBox.showinfo(message='Save succesful.')
else:
tkMessageBox.showinfo(message='Save succesful.\nNode names not saved.')
def exit_network(self):
self.destroy()
def setAxes(self,a,c,xstring,ystring):
setp(a,'xscale',xstring,'yscale',ystring)
c.show()
def close_window(self):
self.destroy()
class BootstrapResultsWindow(Toplevel):
def __init__(self,parent,nodeM,nodeMRank,measureName,measureShorthand,namestring=None,sizeX=8000,sizeY=8000,bsP=None):
Toplevel.__init__(self,parent,bg='gray90') # creates the window itself
self.parent=parent
if namestring!=None:
self.title("Bootstrapping results for "+measureName+", "+namestring+" with "+str(100*bsP)+"% of samples kept")
self.namestring=namestring
else:
self.namestring=""
mBar=Frame(self,relief='raised',borderwidth=2,bg='steelblue') # this will be the menu bar
lowerhalf=Frame(self,bg='gray90')
# --- menu bar ------------------------------
mBar=Frame(self,relief='raised',borderwidth=2,bg='steelblue')
FileBtn=self.makeFileMenu(mBar,self.parent)
# --- left: bc histograms ---------------
# generate plot frame
plotframe=Frame(lowerhalf,relief='raised',borderwidth=2,bg='gray80',width=2000, height=2000)
#generate the figure
fig=self.getMFigure(nodeM,measureName,measureShorthand)
self.figure_left=fig
#Show the figure
canvas=FigureCanvasTkAgg(fig,master=plotframe)
canvas.show()
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=YES, padx=10,pady=10)
plotframe.grid(row=0,column=0,sticky=NW)
# --- right: bc rank histograms ---------------
# generate plot frame
rightFrame=Frame(lowerhalf,relief='raised',borderwidth=2,bg='gray80',width=2000, height=2000)
plotframe=Frame(rightFrame,relief='raised',borderwidth=2,bg='gray80',width=2000, height=2000)
fig=self.getTopRankedFigure(nodeMRank,measureShorthand)
canvas=FigureCanvasTkAgg(fig,master=plotframe)
canvas.show()
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=YES, padx=10,pady=10)
plotframe.grid(row=0,column=0,sticky=NW)
plotframe=Frame(rightFrame,relief='raised',borderwidth=2,bg='gray80',width=2000, height=2000)
self.figure_upper_right=fig
fig=self.getTopRankedFigure(nodeMRank,measureShorthand,nTop=1)
canvas=FigureCanvasTkAgg(fig,master=plotframe)
canvas.show()
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=YES, padx=10,pady=10)
plotframe.grid(row=1,column=0,sticky=NW)
self.figure_lower_right=fig
rightFrame.grid(row=0,column=1,sticky=NW)
#rightFrame.pack(side=TOP,fill=BOTH,expand=YES)
# -- SHOW IT ALL ---
mBar.pack(side=TOP,fill=X,expand=YES,anchor=NW)
lowerhalf.pack(side=TOP,fill=BOTH,expand=YES)
def getMFigure(self,nodeBc,measureName,measureShorthand):
#sort bc by mean
names=nodeBc.keys()
meanNodeBc={}
for name in names:
meanNodeBc[name]=pylab.mean(nodeBc[name])
names.sort(key=lambda x:meanNodeBc[x],reverse=True)
data=[]
for name in names:
data.append(nodeBc[name])
#top 5 distributions
nTop=5
fig=pylab.Figure(figsize=(5,8), dpi=80)
fig.subplots_adjust(bottom=0.08,right=0.95,top=0.95)
for nodeIndex in range(nTop):
axes=fig.add_subplot(nTop,1,nodeIndex+1)
axes.hist(data[nodeIndex],100)
axes.set_ylabel(names[nodeIndex],fontsize=8)
for tick in axes.get_yticklabels():
tick.set_fontsize(10)
tick.set_fontname("Times")
if nodeIndex==0:
axes.set_title("Distribution of "+measureShorthand+"s for top "+str(nTop)+" locations")
axes.set_xlabel(measureName)
return fig
def getTopRankedFigure(self,nodeBcRank,measureShorthand,nTop=5,plotAtMost=10):
tops={}
for node in nodeBcRank.iterkeys():
for rank in nodeBcRank[node]:
if rank<nTop:
tops[node]=tops.get(node,0)+1
names=tops.keys()
names.sort(key=lambda x:tops[x],reverse=True)
data=[]
for name in names:
data.append(tops[name])
fig=pylab.Figure(figsize=(5,3.8),dpi=80)
fig.subplots_adjust(bottom=0.3)
axes=fig.add_subplot(111)
nums=range(min(len(data),plotAtMost))
axes.bar(nums,data[:plotAtMost])
axes.set_xticklabels(names,rotation=45,fontsize=8)
axes.set_xticks(map(lambda x:x+0.5,nums))
axes.set_title("# of times at top "+str(nTop))
return fig
def makeFileMenu(self,mBar,root):
FileBtn=Menubutton(mBar,text="File",underline=0,bg='steelblue')
FileBtn.pack(side=LEFT,padx="2m")
FileBtn.menu=Menu(FileBtn)
FileBtn.menu.add_command(label="Save figure on the left...",underline=0,command=self.save_left)
FileBtn.menu.add_command(label="Save figure on the upper right...",underline=0,command=self.save_upper_right)
FileBtn.menu.add_command(label="Save figure on the lower right...",underline=0,command=self.save_lower_right)
FileBtn.menu.add_command(label="Close",underline=0,command=self.destroy)
FileBtn['menu']=FileBtn.menu
return FileBtn
def save_left(self):
self.save_figure("left.png",self.figure_left)
def save_upper_right(self):
self.save_figure("upper_right.png",self.figure_upper_right)
def save_lower_right(self):
self.save_figure("lower_right.png",self.figure_lower_right)
def save_figure(self,namestring,thefigure):
filetypes=[("PNG file","*.png"),("PDF file","*.pdf"),('EPS file','*.eps'),("SVG file","*.svg")]
types=["png","pdf","eps","svg"]
newname=tkFileDialog.asksaveasfilename(initialfile=namestring,title='Save as',filetypes=filetypes)
if len(newname)>0: #if user did not press cancel button
ending=newname.split(".")[-1]
if ending in types:
thefigure.savefig(newname)
else:
tkMessageBox.showerror(
"Error saving the figure",
"Unknown file format. Use png, pdf, eps or svg."
)
def bootstrap(msdata,poplist,distancematrix,bsP,rounds,progressUpdater):
#find the threhold for the original distance matrix
othrNet=autoThreshold(distancematrix)
originalThreshold=len(othrNet.edges)
#transfer poplist to another format:
pops={}
for i,p in enumerate(poplist):
if p not in pops:
pops[p]=[]
pops[p].append(i)
#make node statistics containers:
nodeBc={}
nodeBcRank={}
nodeDegree={}
nodeDegreeRank={}
for node in distancematrix:
nodeBc[node]=[]
nodeBcRank[node]=[]
nodeDegree[node]=[]
nodeDegreeRank[node]=[]
#rounds
for r in range(rounds):
#first select nodes
selected=[]
for pop in pops:
nPop=len(pops[pop])
bsSize=int(bsP*nPop)
if bsSize<1:
bsSize=1
selected.extend(random.sample(pops[pop],bsSize))
selected.sort()
#make new distance matrix
newmsdata=msdata.getSubset(selected)
newpoplist=[]
for index in selected:
newpoplist.append(poplist[index])
newglists,newunique_poplist=eden.getGoldsteinLists(newpoplist)
newdm=newmsdata.getGroupwiseDistanceMatrix(newglists,distancematrix.distancemeasure,groupNames=newunique_poplist)
newdm.matrixtype=distancematrix.matrixtype
#--BC statistics
thNet=autoThreshold(newdm)
#calculate statistics for this realization
bc=netext.getBetweennessCentrality(thNet)
bcRankList=list(distancematrix)
random.shuffle(bcRankList)
bcRankList.sort(key=lambda x:-bc[x])
for rank,node in enumerate(bcRankList):
nodeBcRank[node].append(rank)
for node in distancematrix:
nodeBc[node].append(bc[node])
#--Degree statistics
othNet=pynet.SymmNet()
for node in newdm:
othNet.addNode(node)
edges=list(newdm.edges)
edges.sort(lambda x,y:cmp(x[2],y[2]),reverse=False)
for i in range(originalThreshold):
othNet[edges[i][0],edges[i][1]]=edges[i][2]
#calculate statistics
degrees=netext.deg(othNet)
for node in othNet:
nodeDegree[node].append(degrees[node])
degreeRankList=list(distancematrix)
random.shuffle(degreeRankList)
degreeRankList.sort(key=lambda x:-degrees[x])
for rank,node in enumerate(degreeRankList):
nodeDegreeRank[node].append(rank)
progressUpdater(float(r)/float(rounds))
return nodeBc,nodeBcRank,nodeDegree,nodeDegreeRank
def bootstrap_single(msdata,poplist,distancematrix,bsP):
pops={}
for i,p in enumerate(poplist):
if p not in pops:
pops[p]=[]
pops[p].append(i)
selected=[]
for pop in pops:
nPop=len(pops[pop])
bsSize=int(bsP*nPop)
if bsSize<1:
bsSize=1
selected.extend(random.sample(pops[pop],bsSize))
selected.sort()
#make new distance matrix
newmsdata=msdata.getSubset(selected)
newpoplist=[]
for index in selected:
newpoplist.append(poplist[index])
newglists,newunique_poplist=eden.getGoldsteinLists(newpoplist)
newdm=newmsdata.getGroupwiseDistanceMatrix(newglists,distancematrix.distancemeasure,groupNames=newunique_poplist)
return newdm,newmsdata,newpoplist
def autoThreshold(net,outputThreshold=False):
"""
Returns a thresholded copy of the net given as a parameter.
The threshold is determined to be the percolation threshold
by finding a maximum value for the susceptibility.
"""
edges=list(net.edges)
edges.sort(lambda x,y:cmp(x[2],y[2]),reverse=False)
ee=percolator.EvaluationList(edges)
ee.setStrengthEvaluations()
suscmax=0
suscmax_thresh=0
nNodes=len(net)
for cs in percolator.Percolator(ee):
s=cs.getSusceptibility(nNodes)
if (s>=suscmax):
suscmax=s
suscmax_thresh=cs.threshold
suscmax_nedges=cs.addedEdges
if cs.getGiantSize()>0.95*len(net):
break
newNet=pynet.SymmNet()
for node in net:
newNet.addNode(node)
#continue one threshold level after the threshold maximizing susceptibility
maxReached=False
stopAtNext=False
for e in edges:
if e[2]==suscmax_thresh:
maxReached=True
if maxReached and e[2]!=suscmax_thresh and not stopAtNext:
stopAtNext=True
lastW=e[2]
if stopAtNext and lastW!=e[2]:
break
newNet[e[0],e[1]]=e[2]
newNet.matrixtype=net.matrixtype
if outputThreshold:
return newNet,lastW
else:
return newNet
| gpl-2.0 |
bnaul/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 82 | 1671 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is
completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y, edgecolor='black', s=20)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/io/wb.py | 5 | 12305 | # -*- coding: utf-8 -*-
from __future__ import print_function
from pandas.compat import map, reduce, range, lrange
from pandas.io.common import urlopen
from pandas.io import json
import pandas
import numpy as np
import warnings
# This list of country codes was pulled from wikipedia during October 2014.
# While some exceptions do exist, it is the best proxy for countries supported
# by World Bank. It is an aggregation of the 2-digit ISO 3166-1 alpha-2, and
# 3-digit ISO 3166-1 alpha-3, codes, with 'all', 'ALL', and 'All' appended ot
# the end.
country_codes = ['AD', 'AE', 'AF', 'AG', 'AI', 'AL', 'AM', 'AO', 'AQ', 'AR', \
'AS', 'AT', 'AU', 'AW', 'AX', 'AZ', 'BA', 'BB', 'BD', 'BE', \
'BF', 'BG', 'BH', 'BI', 'BJ', 'BL', 'BM', 'BN', 'BO', 'BQ', \
'BR', 'BS', 'BT', 'BV', 'BW', 'BY', 'BZ', 'CA', 'CC', 'CD', \
'CF', 'CG', 'CH', 'CI', 'CK', 'CL', 'CM', 'CN', 'CO', 'CR', \
'CU', 'CV', 'CW', 'CX', 'CY', 'CZ', 'DE', 'DJ', 'DK', 'DM', \
'DO', 'DZ', 'EC', 'EE', 'EG', 'EH', 'ER', 'ES', 'ET', 'FI', \
'FJ', 'FK', 'FM', 'FO', 'FR', 'GA', 'GB', 'GD', 'GE', 'GF', \
'GG', 'GH', 'GI', 'GL', 'GM', 'GN', 'GP', 'GQ', 'GR', 'GS', \
'GT', 'GU', 'GW', 'GY', 'HK', 'HM', 'HN', 'HR', 'HT', 'HU', \
'ID', 'IE', 'IL', 'IM', 'IN', 'IO', 'IQ', 'IR', 'IS', 'IT', \
'JE', 'JM', 'JO', 'JP', 'KE', 'KG', 'KH', 'KI', 'KM', 'KN', \
'KP', 'KR', 'KW', 'KY', 'KZ', 'LA', 'LB', 'LC', 'LI', 'LK', \
'LR', 'LS', 'LT', 'LU', 'LV', 'LY', 'MA', 'MC', 'MD', 'ME', \
'MF', 'MG', 'MH', 'MK', 'ML', 'MM', 'MN', 'MO', 'MP', 'MQ', \
'MR', 'MS', 'MT', 'MU', 'MV', 'MW', 'MX', 'MY', 'MZ', 'NA', \
'NC', 'NE', 'NF', 'NG', 'NI', 'NL', 'NO', 'NP', 'NR', 'NU', \
'NZ', 'OM', 'PA', 'PE', 'PF', 'PG', 'PH', 'PK', 'PL', 'PM', \
'PN', 'PR', 'PS', 'PT', 'PW', 'PY', 'QA', 'RE', 'RO', 'RS', \
'RU', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SG', 'SH', 'SI', \
'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SR', 'SS', 'ST', 'SV', \
'SX', 'SY', 'SZ', 'TC', 'TD', 'TF', 'TG', 'TH', 'TJ', 'TK', \
'TL', 'TM', 'TN', 'TO', 'TR', 'TT', 'TV', 'TW', 'TZ', 'UA', \
'UG', 'UM', 'US', 'UY', 'UZ', 'VA', 'VC', 'VE', 'VG', 'VI', \
'VN', 'VU', 'WF', 'WS', 'YE', 'YT', 'ZA', 'ZM', 'ZW', \
'ABW', 'AFG', 'AGO', 'AIA', 'ALA', 'ALB', 'AND', 'ARE', \
'ARG', 'ARM', 'ASM', 'ATA', 'ATF', 'ATG', 'AUS', 'AUT', \
'AZE', 'BDI', 'BEL', 'BEN', 'BES', 'BFA', 'BGD', 'BGR', \
'BHR', 'BHS', 'BIH', 'BLM', 'BLR', 'BLZ', 'BMU', 'BOL', \
'BRA', 'BRB', 'BRN', 'BTN', 'BVT', 'BWA', 'CAF', 'CAN', \
'CCK', 'CHE', 'CHL', 'CHN', 'CIV', 'CMR', 'COD', 'COG', \
'COK', 'COL', 'COM', 'CPV', 'CRI', 'CUB', 'CUW', 'CXR', \
'CYM', 'CYP', 'CZE', 'DEU', 'DJI', 'DMA', 'DNK', 'DOM', \
'DZA', 'ECU', 'EGY', 'ERI', 'ESH', 'ESP', 'EST', 'ETH', \
'FIN', 'FJI', 'FLK', 'FRA', 'FRO', 'FSM', 'GAB', 'GBR', \
'GEO', 'GGY', 'GHA', 'GIB', 'GIN', 'GLP', 'GMB', 'GNB', \
'GNQ', 'GRC', 'GRD', 'GRL', 'GTM', 'GUF', 'GUM', 'GUY', \
'HKG', 'HMD', 'HND', 'HRV', 'HTI', 'HUN', 'IDN', 'IMN', \
'IND', 'IOT', 'IRL', 'IRN', 'IRQ', 'ISL', 'ISR', 'ITA', \
'JAM', 'JEY', 'JOR', 'JPN', 'KAZ', 'KEN', 'KGZ', 'KHM', \
'KIR', 'KNA', 'KOR', 'KWT', 'LAO', 'LBN', 'LBR', 'LBY', \
'LCA', 'LIE', 'LKA', 'LSO', 'LTU', 'LUX', 'LVA', 'MAC', \
'MAF', 'MAR', 'MCO', 'MDA', 'MDG', 'MDV', 'MEX', 'MHL', \
'MKD', 'MLI', 'MLT', 'MMR', 'MNE', 'MNG', 'MNP', 'MOZ', \
'MRT', 'MSR', 'MTQ', 'MUS', 'MWI', 'MYS', 'MYT', 'NAM', \
'NCL', 'NER', 'NFK', 'NGA', 'NIC', 'NIU', 'NLD', 'NOR', \
'NPL', 'NRU', 'NZL', 'OMN', 'PAK', 'PAN', 'PCN', 'PER', \
'PHL', 'PLW', 'PNG', 'POL', 'PRI', 'PRK', 'PRT', 'PRY', \
'PSE', 'PYF', 'QAT', 'REU', 'ROU', 'RUS', 'RWA', 'SAU', \
'SDN', 'SEN', 'SGP', 'SGS', 'SHN', 'SJM', 'SLB', 'SLE', \
'SLV', 'SMR', 'SOM', 'SPM', 'SRB', 'SSD', 'STP', 'SUR', \
'SVK', 'SVN', 'SWE', 'SWZ', 'SXM', 'SYC', 'SYR', 'TCA', \
'TCD', 'TGO', 'THA', 'TJK', 'TKL', 'TKM', 'TLS', 'TON', \
'TTO', 'TUN', 'TUR', 'TUV', 'TWN', 'TZA', 'UGA', 'UKR', \
'UMI', 'URY', 'USA', 'UZB', 'VAT', 'VCT', 'VEN', 'VGB', \
'VIR', 'VNM', 'VUT', 'WLF', 'WSM', 'YEM', 'ZAF', 'ZMB', \
'ZWE', 'all', 'ALL', 'All']
def download(country=['MX', 'CA', 'US'], indicator=['NY.GDP.MKTP.CD', 'NY.GNS.ICTR.ZS'],
start=2003, end=2005,errors='warn'):
"""
Download data series from the World Bank's World Development Indicators
Parameters
----------
indicator: string or list of strings
taken from the ``id`` field in ``WDIsearch()``
country: string or list of strings.
``all`` downloads data for all countries
2 or 3 character ISO country codes select individual
countries (e.g.``US``,``CA``) or (e.g.``USA``,``CAN``). The codes
can be mixed.
The two ISO lists of countries, provided by wikipedia, are hardcoded
into pandas as of 11/10/2014.
start: int
First year of the data series
end: int
Last year of the data series (inclusive)
errors: str {'ignore', 'warn', 'raise'}, default 'warn'
Country codes are validated against a hardcoded list. This controls
the outcome of that validation, and attempts to also apply
to the results from world bank.
errors='raise', will raise a ValueError on a bad country code.
Returns
-------
``pandas`` DataFrame with columns: country, iso_code, year,
indicator value.
"""
if type(country) == str:
country = [country]
bad_countries = np.setdiff1d(country, country_codes)
# Validate the input
if len(bad_countries) > 0:
tmp = ", ".join(bad_countries)
if errors == 'raise':
raise ValueError("Invalid Country Code(s): %s" % tmp)
if errors == 'warn':
warnings.warn('Non-standard ISO country codes: %s' % tmp)
# Work with a list of indicators
if type(indicator) == str:
indicator = [indicator]
# Download
data = []
bad_indicators = {}
for ind in indicator:
one_indicator_data,msg = _get_data(ind, country, start, end)
if msg == "Success":
data.append(one_indicator_data)
else:
bad_indicators[ind] = msg
if len(bad_indicators.keys()) > 0:
bad_ind_msgs = [i + " : " + m for i,m in bad_indicators.items()]
bad_ind_msgs = "\n\n".join(bad_ind_msgs)
bad_ind_msgs = "\n\nInvalid Indicators:\n\n%s" % bad_ind_msgs
if errors == 'raise':
raise ValueError(bad_ind_msgs)
if errors == 'warn':
warnings.warn(bad_ind_msgs)
# Confirm we actually got some data, and build Dataframe
if len(data) > 0:
out = reduce(lambda x, y: x.merge(y, how='outer'), data)
out = out.drop('iso_code', axis=1)
out = out.set_index(['country', 'year'])
out = out.convert_objects(convert_numeric=True)
return out
else:
msg = "No indicators returned data."
if errors == 'ignore':
msg += " Set errors='warn' for more information."
raise ValueError(msg)
def _get_data(indicator="NY.GNS.ICTR.GN.ZS", country='US',
start=2002, end=2005):
if type(country) == str:
country = [country]
countries = ';'.join(country)
# Build URL for api call
url = ("http://api.worldbank.org/countries/" + countries + "/indicators/" +
indicator + "?date=" + str(start) + ":" + str(end) +
"&per_page=25000&format=json")
# Download
with urlopen(url) as response:
data = response.read()
# Check to see if there is a possible problem
possible_message = json.loads(data)[0]
if 'message' in possible_message.keys():
msg = possible_message['message'][0]
try:
msg = msg['key'].split() + ["\n "] + msg['value'].split()
wb_err = ' '.join(msg)
except:
wb_err = ""
if 'key' in msg.keys():
wb_err = msg['key'] + "\n "
if 'value' in msg.keys():
wb_err += msg['value']
error_msg = "Problem with a World Bank Query \n %s"
return None, error_msg % wb_err
if 'total' in possible_message.keys():
if possible_message['total'] == 0:
return None, "No results from world bank."
# Parse JSON file
data = json.loads(data)[1]
country = [x['country']['value'] for x in data]
iso_code = [x['country']['id'] for x in data]
year = [x['date'] for x in data]
value = [x['value'] for x in data]
# Prepare output
out = pandas.DataFrame([country, iso_code, year, value]).T
out.columns = ['country', 'iso_code', 'year', indicator]
return out,"Success"
def get_countries():
'''Query information about countries
'''
url = 'http://api.worldbank.org/countries/?per_page=1000&format=json'
with urlopen(url) as response:
data = response.read()
data = json.loads(data)[1]
data = pandas.DataFrame(data)
data.adminregion = [x['value'] for x in data.adminregion]
data.incomeLevel = [x['value'] for x in data.incomeLevel]
data.lendingType = [x['value'] for x in data.lendingType]
data.region = [x['value'] for x in data.region]
data = data.rename(columns={'id': 'iso3c', 'iso2Code': 'iso2c'})
return data
def get_indicators():
'''Download information about all World Bank data series
'''
url = 'http://api.worldbank.org/indicators?per_page=50000&format=json'
with urlopen(url) as response:
data = response.read()
data = json.loads(data)[1]
data = pandas.DataFrame(data)
# Clean fields
data.source = [x['value'] for x in data.source]
fun = lambda x: x.encode('ascii', 'ignore')
data.sourceOrganization = data.sourceOrganization.apply(fun)
# Clean topic field
def get_value(x):
try:
return x['value']
except:
return ''
fun = lambda x: [get_value(y) for y in x]
data.topics = data.topics.apply(fun)
data.topics = data.topics.apply(lambda x: ' ; '.join(x))
# Clean outpu
data = data.sort(columns='id')
data.index = pandas.Index(lrange(data.shape[0]))
return data
_cached_series = None
def search(string='gdp.*capi', field='name', case=False):
"""
Search available data series from the world bank
Parameters
----------
string: string
regular expression
field: string
id, name, source, sourceNote, sourceOrganization, topics
See notes below
case: bool
case sensitive search?
Notes
-----
The first time this function is run it will download and cache the full
list of available series. Depending on the speed of your network
connection, this can take time. Subsequent searches will use the cached
copy, so they should be much faster.
id : Data series indicator (for use with the ``indicator`` argument of
``WDI()``) e.g. NY.GNS.ICTR.GN.ZS"
name: Short description of the data series
source: Data collection project
sourceOrganization: Data collection organization
note:
sourceNote:
topics:
"""
# Create cached list of series if it does not exist
global _cached_series
if type(_cached_series) is not pandas.core.frame.DataFrame:
_cached_series = get_indicators()
data = _cached_series[field]
idx = data.str.contains(string, case=case)
out = _cached_series.ix[idx].dropna()
return out
| mit |
bikong2/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/backends/backend_macosx.py | 2 | 7342 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
TimerBase)
from matplotlib.figure import Figure
from matplotlib import rcParams
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
from .backend_agg import RendererAgg, FigureCanvasAgg
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
class TimerMac(_macosx.Timer, TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses CoreFoundation
run loops for timer events.
Attributes
----------
interval : int
The time between timer events in milliseconds. Default is 1000 ms.
single_shot : bool
Boolean flag indicating whether this timer should operate as single
shot (run once and then stop). Defaults to False.
callbacks : list
Stores list of (func, args) tuples that will be called upon timer
events. This list can be manipulated directly, or the functions
`add_callback` and `remove_callback` can be used.
'''
# completely implemented at the C-level (in _macosx.Timer)
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
Attributes
----------
figure : `matplotlib.figure.Figure`
A high-level Figure instance
"""
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
_macosx.FigureCanvas.__init__(self, width, height)
self._device_scale = 1.0
def _set_device_scale(self, value):
if self._device_scale != value:
self.figure.dpi = self.figure.dpi / self._device_scale * value
self._device_scale = value
def get_renderer(self, cleared=False):
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
try:
self._lastKey, self._renderer
except AttributeError:
need_new_renderer = True
else:
need_new_renderer = (self._lastKey != key)
if need_new_renderer:
self._renderer = RendererAgg(w, h, self.figure.dpi)
self._lastKey = key
elif cleared:
self._renderer.clear()
return self._renderer
def _draw(self):
renderer = self.get_renderer()
if not self.figure.stale:
return renderer
self.figure.draw(renderer)
return renderer
def draw(self):
self.invalidate()
def draw_idle(self, *args, **kwargs):
self.invalidate()
def blit(self, bbox):
self.invalidate()
def resize(self, width, height):
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width * self._device_scale,
height * self._device_scale,
forward=False)
FigureCanvasBase.resize_event(self)
self.draw_idle()
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
Other Parameters
----------------
interval : scalar
Timer interval in milliseconds
callbacks : list
Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
will be executed by the timer every *interval*.
"""
return TimerMac(*args, **kwargs)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
if matplotlib.is_interactive():
self.show()
self.canvas.draw_idle()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(int(x0), int(y0), int(x1), int(y1))
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self, *args):
filename = _macosx.choose_save_file('Save the figure',
self.canvas.get_default_filename())
if filename is None: # Cancel
return
self.canvas.figure.savefig(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
@_Backend.export
class _BackendMac(_Backend):
FigureCanvas = FigureCanvasMac
FigureManager = FigureManagerMac
def trigger_manager_draw(manager):
# For performance reasons, we don't want to redraw the figure after
# each draw command. Instead, we mark the figure as invalid, so that it
# will be redrawn as soon as the event loop resumes via PyOS_InputHook.
# This function should be called after each draw event, even if
# matplotlib is not running interactively.
manager.canvas.invalidate()
@staticmethod
def mainloop():
_macosx.show()
| mit |
ltiao/scikit-learn | sklearn/utils/estimator_checks.py | 3 | 55953 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.decomposition import NMF, ProjectedGradientNMF
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'GaussianProcessRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
# Estimators with deprecated transform methods. Should be removed in 0.19 when
# _LearntSelectorMixin is removed.
DEPRECATED_TRANSFORM = [
"RandomForestClassifier", "RandomForestRegressor", "ExtraTreesClassifier",
"ExtraTreesRegressor", "DecisionTreeClassifier",
"DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor",
"LinearSVC", "SGDClassifier", "SGDRegressor", "Perceptron",
"LogisticRegression", "LogisticRegressionCV",
"GradientBoostingClassifier", "GradientBoostingRegressor"]
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def check_supervised_y_no_nan(name, Estimator):
# Checks that the Estimator targets are not NaN.
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.ones(10) * np.inf
y = multioutput_estimator_convert_y_2d(name, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
Estimator().fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised warning as expected, but "
"does not match expected error message" \
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
if name not in DEPRECATED_TRANSFORM:
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check. Estimator is a class object (not an instance).
"""
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_testing_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
if isinstance(estimator, NMF):
if not isinstance(estimator, ProjectedGradientNMF):
estimator.set_params(solver='cd')
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_testing_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_testing_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_testing_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
if name in DEPRECATED_TRANSFORM:
funcs = ["score"]
else:
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
if name in DEPRECATED_TRANSFORM:
funcs = ["fit", "score", "partial_fit", "fit_predict"]
else:
funcs = [
"fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
if name in DEPRECATED_TRANSFORM:
methods = ["predict", "decision_function", "predict_proba"]
else:
methods = [
"predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_testing_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
@ignore_warnings
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
if name in DEPRECATED_TRANSFORM:
check_methods = ["predict", "decision_function", "predict_proba"]
else:
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_testing_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_testing_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_testing_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_testing_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_testing_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_testing_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_testing_parameters(regressor_1)
set_testing_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_testing_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_testing_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_testing_parameters(estimator_1)
set_testing_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self'
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in name:
return np.reshape(y, (-1, 1))
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = np.reshape(y_, (-1, 1))
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
def check_classifiers_regression_target(name, Estimator):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = Estimator()
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
| bsd-3-clause |
great-expectations/great_expectations | great_expectations/core/expectation_configuration.py | 1 | 49533 | import json
import logging
from copy import deepcopy
from typing import Any, Dict
import jsonpatch
from great_expectations.core.evaluation_parameters import (
_deduplicate_evaluation_parameter_dependencies,
build_evaluation_parameters,
find_evaluation_parameter_dependencies,
)
from great_expectations.core.urn import ge_urn
from great_expectations.core.util import (
convert_to_json_serializable,
ensure_json_serializable,
nested_update,
)
from great_expectations.exceptions import (
InvalidExpectationConfigurationError,
InvalidExpectationKwargsError,
ParserError,
)
from great_expectations.expectations.registry import get_expectation_impl
from great_expectations.marshmallow__shade import (
Schema,
ValidationError,
fields,
post_load,
)
from great_expectations.types import SerializableDictDot
logger = logging.getLogger(__name__)
def parse_result_format(result_format):
"""This is a simple helper utility that can be used to parse a string result_format into the dict format used
internally by great_expectations. It is not necessary but allows shorthand for result_format in cases where
there is no need to specify a custom partial_unexpected_count."""
if isinstance(result_format, str):
result_format = {"result_format": result_format, "partial_unexpected_count": 20}
else:
if "partial_unexpected_count" not in result_format:
result_format["partial_unexpected_count"] = 20
return result_format
class ExpectationConfiguration(SerializableDictDot):
"""ExpectationConfiguration defines the parameters and name of a specific expectation."""
kwarg_lookup_dict = {
"expect_column_to_exist": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["column_index"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"column_index": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_table_columns_to_match_ordered_list": {
"domain_kwargs": [],
"success_kwargs": ["column_list"],
"default_kwarg_values": {
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_table_column_count_to_be_between": {
"domain_kwargs": [],
"success_kwargs": ["min_value", "max_value"],
"default_kwarg_values": {
"min_value": None,
"max_value": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_table_column_count_to_equal": {
"domain_kwargs": [],
"success_kwargs": ["value"],
"default_kwarg_values": {
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_table_row_count_to_be_between": {
"domain_kwargs": [],
"success_kwargs": ["min_value", "max_value"],
"default_kwarg_values": {
"min_value": None,
"max_value": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_table_row_count_to_equal": {
"domain_kwargs": [],
"success_kwargs": ["value"],
"default_kwarg_values": {
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_unique": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_not_be_null": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_compound_columns_to_be_unique": {
"domain_kwargs": ["column_list", "row_condition", "condition_parser"],
"success_kwargs": ["ignore_row_if"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"ignore_row_if": "all_values_are_missing",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_null": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_of_type": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["type_", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_in_type_list": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["type_list", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_in_set": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["value_set", "mostly", "parse_strings_as_datetimes"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"parse_strings_as_datetimes": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_not_be_in_set": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["value_set", "mostly", "parse_strings_as_datetimes"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"parse_strings_as_datetimes": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": [
"min_value",
"max_value",
"strict_min",
"strict_max",
"allow_cross_type_comparisons",
"parse_strings_as_datetimes",
"output_strftime_format",
"mostly",
],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"allow_cross_type_comparisons": None,
"parse_strings_as_datetimes": None,
"output_strftime_format": None,
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_increasing": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["strictly", "parse_strings_as_datetimes", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"strictly": None,
"parse_strings_as_datetimes": None,
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_decreasing": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["strictly", "parse_strings_as_datetimes", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"strictly": None,
"parse_strings_as_datetimes": None,
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_value_lengths_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["min_value", "max_value", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_value_lengths_to_equal": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["value", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_match_regex": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["regex", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_not_match_regex": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["regex", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_match_regex_list": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["regex_list", "match_on", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"match_on": "any",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_not_match_regex_list": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["regex_list", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_match_strftime_format": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["strftime_format", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_dateutil_parseable": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_be_json_parseable": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_values_to_match_json_schema": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["json_schema", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["distribution", "p_value", "params"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"p_value": 0.05,
"params": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_distinct_values_to_be_in_set": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["value_set", "parse_strings_as_datetimes"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"parse_strings_as_datetimes": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_distinct_values_to_equal_set": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["value_set", "parse_strings_as_datetimes"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"parse_strings_as_datetimes": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_distinct_values_to_contain_set": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["value_set", "parse_strings_as_datetimes"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"parse_strings_as_datetimes": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_mean_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["min_value", "max_value", "strict_min", "strict_max"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_median_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["min_value", "max_value", "strict_min", "strict_max"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_quantile_values_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["quantile_ranges", "allow_relative_error"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"allow_relative_error": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_stdev_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["min_value", "max_value", "strict_min", "strict_max"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_unique_value_count_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["min_value", "max_value"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_proportion_of_unique_values_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["min_value", "max_value", "strict_min", "strict_max"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_most_common_value_to_be_in_set": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["value_set", "ties_okay"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"ties_okay": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_sum_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["min_value", "max_value", "strict_min", "strict_max"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_min_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": [
"min_value",
"max_value",
"strict_min",
"strict_max",
"parse_strings_as_datetimes",
"output_strftime_format",
],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"parse_strings_as_datetimes": None,
"output_strftime_format": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_max_to_be_between": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": [
"min_value",
"max_value",
"strict_min",
"strict_max",
"parse_strings_as_datetimes",
"output_strftime_format",
],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"min_value": None,
"max_value": None,
"strict_min": False,
"strict_max": False,
"parse_strings_as_datetimes": None,
"output_strftime_format": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_chisquare_test_p_value_to_be_greater_than": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["partition_object", "p", "tail_weight_holdout"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"partition_object": None,
"p": 0.05,
"tail_weight_holdout": 0,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_bootstrapped_ks_test_p_value_to_be_greater_than": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": [
"partition_object",
"p",
"bootstrap_samples",
"bootstrap_sample_size",
],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"partition_object": None,
"p": 0.05,
"bootstrap_samples": None,
"bootstrap_sample_size": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_kl_divergence_to_be_less_than": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": [
"partition_object",
"threshold",
"tail_weight_holdout",
"internal_weight_holdout",
"bucketize_data",
],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"partition_object": None,
"threshold": None,
"tail_weight_holdout": 0,
"internal_weight_holdout": 0,
"bucketize_data": True,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_pair_values_to_be_equal": {
"domain_kwargs": [
"column_A",
"column_B",
"row_condition",
"condition_parser",
],
"success_kwargs": ["ignore_row_if"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"ignore_row_if": "both_values_are_missing",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_pair_values_A_to_be_greater_than_B": {
"domain_kwargs": [
"column_A",
"column_B",
"row_condition",
"condition_parser",
],
"success_kwargs": [
"or_equal",
"parse_strings_as_datetimes",
"allow_cross_type_comparisons",
"ignore_row_if",
],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"or_equal": None,
"parse_strings_as_datetimes": None,
"allow_cross_type_comparisons": None,
"ignore_row_if": "both_values_are_missing",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_pair_values_to_be_in_set": {
"domain_kwargs": [
"column_A",
"column_B",
"row_condition",
"condition_parser",
],
"success_kwargs": ["value_pairs_set", "ignore_row_if"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"ignore_row_if": "both_values_are_missing",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_multicolumn_values_to_be_unique": {
"domain_kwargs": ["column_list", "row_condition", "condition_parser"],
"success_kwargs": ["ignore_row_if"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"ignore_row_if": "all_values_are_missing",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"_expect_column_values_to_be_of_type__aggregate": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["type_", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"_expect_column_values_to_be_of_type__map": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["type_", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"_expect_column_values_to_be_in_type_list__aggregate": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["type_list", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"_expect_column_values_to_be_in_type_list__map": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["type_list", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_value_z_scores_to_be_less_than": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["threshold", "mostly", "double_sided"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": 1,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_file_line_regex_match_count_to_be_between": {
"domain_kwargs": [],
"success_kwargs": [
"regex",
"expected_min_count",
"expected_max_count",
"skip",
],
"default_kwarg_values": {
"expected_min_count": 0,
"expected_max_count": None,
"skip": None,
"mostly": 1,
"nonnull_lines_regex": r"^\s*$",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
"meta": None,
"_lines": None,
},
},
"expect_file_line_regex_match_count_to_equal": {
"domain_kwargs": [],
"success_kwargs": ["regex", "expected_count", "skip"],
"default_kwarg_values": {
"expected_count": 0,
"skip": None,
"mostly": 1,
"nonnull_lines_regex": r"^\s*$",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
"meta": None,
"_lines": None,
},
},
"expect_file_hash_to_equal": {
"domain_kwargs": [],
"success_kwargs": ["value", "hash_alg"],
"default_kwarg_values": {
"hash_alg": "md5",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
"meta": None,
},
},
"expect_file_size_to_be_between": {
"domain_kwargs": [],
"success_kwargs": ["minsize", "maxsize"],
"default_kwarg_values": {
"minsize": 0,
"maxsize": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
"meta": None,
},
},
"expect_file_to_exist": {
"domain_kwargs": [],
"success_kwargs": ["filepath"],
"default_kwarg_values": {
"filepath": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
"meta": None,
},
},
"expect_file_to_have_valid_table_header": {
"domain_kwargs": [],
"success_kwargs": ["regex", "skip"],
"default_kwarg_values": {
"skip": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
"meta": None,
},
},
"expect_file_to_be_valid_json": {
"domain_kwargs": [],
"success_kwargs": ["schema"],
"default_kwarg_values": {
"schema": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
"meta": None,
},
},
}
runtime_kwargs = ["result_format", "include_config", "catch_exceptions"]
def __init__(self, expectation_type, kwargs, meta=None, success_on_last_run=None):
if not isinstance(expectation_type, str):
raise InvalidExpectationConfigurationError(
"expectation_type must be a string"
)
self._expectation_type = expectation_type
if not isinstance(kwargs, dict):
raise InvalidExpectationConfigurationError(
"expectation configuration kwargs must be a dict."
)
self._kwargs = kwargs
self._raw_kwargs = None # the kwargs before evaluation parameters are evaluated
if meta is None:
meta = {}
# We require meta information to be serializable, but do not convert until necessary
ensure_json_serializable(meta)
self.meta = meta
self.success_on_last_run = success_on_last_run
def process_evaluation_parameters(
self, evaluation_parameters, interactive_evaluation=True, data_context=None
):
if self._raw_kwargs is not None:
logger.debug(
"evaluation_parameters have already been built on this expectation"
)
(evaluation_args, substituted_parameters,) = build_evaluation_parameters(
self._kwargs,
evaluation_parameters,
interactive_evaluation,
data_context,
)
self._raw_kwargs = self._kwargs
self._kwargs = evaluation_args
if len(substituted_parameters) > 0:
self.meta["substituted_parameters"] = substituted_parameters
def get_raw_configuration(self):
# return configuration without substituted evaluation parameters
raw_config = deepcopy(self)
if raw_config._raw_kwargs is not None:
raw_config._kwargs = raw_config._raw_kwargs
raw_config._raw_kwargs = None
return raw_config
def patch(self, op: str, path: str, value: Any) -> "ExpectationConfiguration":
"""
Args:
op: A jsonpatch operation. One of 'add', 'replace', or 'remove'
path: A jsonpatch path for the patch operation
value: The value to patch
Returns:
The patched ExpectationConfiguration object
"""
if op not in ["add", "replace", "remove"]:
raise ValueError("Op must be either 'add', 'replace', or 'remove'")
try:
valid_path = path.split("/")[1]
except IndexError:
raise IndexError(
"Ensure you have a valid jsonpatch path of the form '/path/foo' "
"(see http://jsonpatch.com/)"
)
if valid_path not in self.get_runtime_kwargs().keys():
raise ValueError("Path not available in kwargs (see http://jsonpatch.com/)")
# TODO: Call validate_kwargs when implemented
patch = jsonpatch.JsonPatch([{"op": op, "path": path, "value": value}])
patch.apply(self.kwargs, in_place=True)
return self
@property
def expectation_type(self):
return self._expectation_type
@property
def kwargs(self):
return self._kwargs
def _get_default_custom_kwargs(self):
# NOTE: this is a holdover until class-first expectations control their
# defaults, and so defaults are inherited.
if self.expectation_type.startswith("expect_column_pair"):
return {
"domain_kwargs": [
"column_A",
"column_B",
"row_condition",
"condition_parser",
],
# NOTE: this is almost certainly incomplete; subclasses should override
"success_kwargs": [],
"default_kwarg_values": {
"column_A": None,
"column_B": None,
"row_condition": None,
"condition_parser": None,
},
}
elif self.expectation_type.startswith("expect_column"):
return {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
# NOTE: this is almost certainly incomplete; subclasses should override
"success_kwargs": [],
"default_kwarg_values": {
"column": None,
"row_condition": None,
"condition_parser": None,
},
}
logger.warning("Requested kwargs for an unrecognized expectation.")
return {
"domain_kwargs": [],
# NOTE: this is almost certainly incomplete; subclasses should override
"success_kwargs": [],
"default_kwarg_values": {},
}
def get_domain_kwargs(self):
expectation_kwargs_dict = self.kwarg_lookup_dict.get(
self.expectation_type, None
)
if expectation_kwargs_dict is None:
impl = get_expectation_impl(self.expectation_type)
if impl is not None:
domain_keys = impl.domain_keys
default_kwarg_values = impl.default_kwarg_values
else:
expectation_kwargs_dict = self._get_default_custom_kwargs()
default_kwarg_values = expectation_kwargs_dict.get(
"default_kwarg_values", dict()
)
domain_keys = expectation_kwargs_dict["domain_kwargs"]
else:
default_kwarg_values = expectation_kwargs_dict.get(
"default_kwarg_values", dict()
)
domain_keys = expectation_kwargs_dict["domain_kwargs"]
domain_kwargs = {
key: self.kwargs.get(key, default_kwarg_values.get(key))
for key in domain_keys
}
missing_kwargs = set(domain_keys) - set(domain_kwargs.keys())
if missing_kwargs:
raise InvalidExpectationKwargsError(
f"Missing domain kwargs: {list(missing_kwargs)}"
)
return domain_kwargs
def get_success_kwargs(self):
expectation_kwargs_dict = self.kwarg_lookup_dict.get(
self.expectation_type, None
)
if expectation_kwargs_dict is None:
impl = get_expectation_impl(self.expectation_type)
if impl is not None:
success_keys = impl.success_keys
default_kwarg_values = impl.default_kwarg_values
else:
expectation_kwargs_dict = self._get_default_custom_kwargs()
default_kwarg_values = expectation_kwargs_dict.get(
"default_kwarg_values", dict()
)
success_keys = expectation_kwargs_dict["success_kwargs"]
else:
default_kwarg_values = expectation_kwargs_dict.get(
"default_kwarg_values", dict()
)
success_keys = expectation_kwargs_dict["success_kwargs"]
domain_kwargs = self.get_domain_kwargs()
success_kwargs = {
key: self.kwargs.get(key, default_kwarg_values.get(key))
for key in success_keys
}
success_kwargs.update(domain_kwargs)
return success_kwargs
def get_runtime_kwargs(self, runtime_configuration=None):
expectation_kwargs_dict = self.kwarg_lookup_dict.get(
self.expectation_type, None
)
if expectation_kwargs_dict is None:
impl = get_expectation_impl(self.expectation_type)
if impl is not None:
runtime_keys = impl.runtime_keys
default_kwarg_values = impl.default_kwarg_values
else:
expectation_kwargs_dict = self._get_default_custom_kwargs()
default_kwarg_values = expectation_kwargs_dict.get(
"default_kwarg_values", dict()
)
runtime_keys = self.runtime_kwargs
else:
default_kwarg_values = expectation_kwargs_dict.get(
"default_kwarg_values", dict()
)
runtime_keys = self.runtime_kwargs
success_kwargs = self.get_success_kwargs()
lookup_kwargs = deepcopy(self.kwargs)
if runtime_configuration:
lookup_kwargs.update(runtime_configuration)
runtime_kwargs = {
key: lookup_kwargs.get(key, default_kwarg_values.get(key))
for key in runtime_keys
}
runtime_kwargs["result_format"] = parse_result_format(
runtime_kwargs["result_format"]
)
runtime_kwargs.update(success_kwargs)
return runtime_kwargs
def applies_to_same_domain(self, other_expectation_configuration):
if (
not self.expectation_type
== other_expectation_configuration.expectation_type
):
return False
return (
self.get_domain_kwargs()
== other_expectation_configuration.get_domain_kwargs()
)
def isEquivalentTo(self, other, match_type="success"):
"""ExpectationConfiguration equivalence does not include meta, and relies on *equivalence* of kwargs."""
if not isinstance(other, self.__class__):
if isinstance(other, dict):
try:
other = expectationConfigurationSchema.load(other)
except ValidationError:
logger.debug(
"Unable to evaluate equivalence of ExpectationConfiguration object with dict because "
"dict other could not be instantiated as an ExpectationConfiguration"
)
return NotImplemented
else:
# Delegate comparison to the other instance
return NotImplemented
if match_type == "domain":
return all(
(
self.expectation_type == other.expectation_type,
self.get_domain_kwargs() == other.get_domain_kwargs(),
)
)
elif match_type == "success":
return all(
(
self.expectation_type == other.expectation_type,
self.get_success_kwargs() == other.get_success_kwargs(),
)
)
elif match_type == "runtime":
return all(
(
self.expectation_type == other.expectation_type,
self.kwargs == other.kwargs,
)
)
def __eq__(self, other):
"""ExpectationConfiguration equality does include meta, but ignores instance identity."""
if not isinstance(other, self.__class__):
# Delegate comparison to the other instance's __eq__.
return NotImplemented
this_kwargs: dict = convert_to_json_serializable(self.kwargs)
other_kwargs: dict = convert_to_json_serializable(other.kwargs)
this_meta: dict = convert_to_json_serializable(self.meta)
other_meta: dict = convert_to_json_serializable(other.meta)
return all(
(
self.expectation_type == other.expectation_type,
this_kwargs == other_kwargs,
this_meta == other_meta,
)
)
def __ne__(self, other):
# By using the == operator, the returned NotImplemented is handled correctly.
return not self == other
def __repr__(self):
return json.dumps(self.to_json_dict())
def __str__(self):
return json.dumps(self.to_json_dict(), indent=2)
def to_json_dict(self):
myself = expectationConfigurationSchema.dump(self)
# NOTE - JPC - 20191031: migrate to expectation-specific schemas that subclass result with properly-typed
# schemas to get serialization all-the-way down via dump
myself["kwargs"] = convert_to_json_serializable(myself["kwargs"])
return myself
def get_evaluation_parameter_dependencies(self):
parsed_dependencies = dict()
for key, value in self.kwargs.items():
if isinstance(value, dict) and "$PARAMETER" in value:
param_string_dependencies = find_evaluation_parameter_dependencies(
value["$PARAMETER"]
)
nested_update(parsed_dependencies, param_string_dependencies)
dependencies = dict()
urns = parsed_dependencies.get("urns", [])
for string_urn in urns:
try:
urn = ge_urn.parseString(string_urn)
except ParserError:
logger.warning(
"Unable to parse great_expectations urn {}".format(
value["$PARAMETER"]
)
)
continue
if not urn.get("metric_kwargs"):
nested_update(
dependencies,
{urn["expectation_suite_name"]: [urn["metric_name"]]},
)
else:
nested_update(
dependencies,
{
urn["expectation_suite_name"]: [
{
"metric_kwargs_id": {
urn["metric_kwargs"]: [urn["metric_name"]]
}
}
]
},
)
dependencies = _deduplicate_evaluation_parameter_dependencies(dependencies)
return dependencies
def _get_expectation_impl(self):
return get_expectation_impl(self.expectation_type)
def validate(
self,
validator,
runtime_configuration=None,
):
expectation_impl = self._get_expectation_impl()
return expectation_impl(self).validate(
validator=validator,
runtime_configuration=runtime_configuration,
)
def metrics_validate(
self,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine=None,
):
expectation_impl = self._get_expectation_impl()
return expectation_impl(self).metrics_validate(
metrics,
runtime_configuration=runtime_configuration,
execution_engine=execution_engine,
)
class ExpectationConfigurationSchema(Schema):
expectation_type = fields.Str(
required=True,
error_messages={
"required": "expectation_type missing in expectation configuration"
},
)
kwargs = fields.Dict()
meta = fields.Dict()
# noinspection PyUnusedLocal
@post_load
def make_expectation_configuration(self, data, **kwargs):
return ExpectationConfiguration(**data)
expectationConfigurationSchema = ExpectationConfigurationSchema()
| apache-2.0 |
cyberphox/MissionPlanner | Lib/site-packages/scipy/signal/waveforms.py | 55 | 11609 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
def sawtooth(t, width=1):
"""
Return a periodic sawtooth waveform.
The sawtooth waveform has a period 2*pi, rises from -1 to 1 on the
interval 0 to width*2*pi and drops from 1 to -1 on the interval
width*2*pi to 2*pi. `width` must be in the interval [0,1].
Parameters
----------
t : array_like
Time.
width : float, optional
Width of the waveform. Default is 1.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 20*np.pi, 500)
>>> plt.plot(x, sp.signal.sawtooth(x))
"""
t,w = asarray(t), asarray(width)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,tsub / (pi*wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3, (pi*(wsub+1)-tsub)/(pi*(1-wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period 2*pi, has value +1 from 0 to 2*pi*duty
and -1 from 2*pi*duty to 2*pi. `duty` must be in the interval [0,1].
Parameters
----------
t : array_like
The input time array.
duty : float, optional
Duty cycle.
Returns
-------
y : array_like
The output square wave.
"""
t,w = asarray(t), asarray(duty)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to duty*2*pi function is
# 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3,-1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, retenv=False):
"""
Return a gaussian modulated sinusoid: exp(-a t^2) exp(1j*2*pi*fc*t).
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray, or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (Hz).
Default is 0.5.
bwr: float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi*fc*bw)**2 / (4.0*log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref)/a)
yenv = exp(-a*t*t)
yI = yenv * cos(2*pi*fc*t)
yQ = yenv * sin(2*pi*fc*t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per time unit';
there is no assumption here that the time unit is one second. The
important distinction is that the units of rotation are cycles, not
radians.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
f0 : float
Frequency (in Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (in Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2*pi*f(t)``.
``f(t)`` is defined below.
See Also
--------
scipy.signal.waveforms.sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f1 must be positive, and f0 must be greater than f1.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2*pi * (f0*t + 0.5*beta*t*t)
elif method in ['quadratic','quad','q']:
beta = (f1 - f0)/(t1**2)
if vertex_zero:
phase = 2*pi * (f0*t + beta * t**3/3)
else:
phase = 2*pi * (f1*t + beta * ((t1 - t)**3 - t1**3)/3)
elif method in ['logarithmic', 'log', 'lo']:
if f0*f1 <= 0.0:
raise ValueError("For a geometric chirp, f0 and f1 must be nonzero " \
"and have the same sign.")
if f0 == f1:
phase = 2*pi * f0 * t
else:
beta = t1 / log(f1/f0)
phase = 2*pi * beta * f0 * (pow(f1/f0, t/t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f1 <= 0.0 or f0 <= f1:
raise ValueError("hyperbolic chirp requires f0 > f1 > 0.0.")
c = f1*t1
df = f0 - f1
phase = 2*pi * (f0 * c / df) * log((df*t + c)/c)
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic', "
"or 'hyperbolic', but a value of %r was given." % method)
return phase
def sweep_poly(t, poly, phi=0):
"""Frequency-swept cosine generator, with a time-dependent frequency
specified as a polynomial.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1D ndarray (or array-like), or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees. Default is 0.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2 * pi * f(t)``;
``f(t)`` is defined above.
See Also
--------
scipy.signal.waveforms.chirp
Notes
-----
.. versionadded:: 0.8.0
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2*pi * polyval(intpoly, t)
return phase
| gpl-3.0 |
cjqian/incubator-airflow | tests/contrib/hooks/test_bigquery_hook.py | 10 | 9247 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import mock
from airflow.contrib.hooks import bigquery_hook as hook
from oauth2client.contrib.gce import HttpAccessTokenRefreshError
bq_available = True
try:
hook.BigQueryHook().get_service()
except HttpAccessTokenRefreshError:
bq_available = False
class TestBigQueryDataframeResults(unittest.TestCase):
def setUp(self):
self.instance = hook.BigQueryHook()
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_output_is_dataframe_with_valid_query(self):
import pandas as pd
df = self.instance.get_pandas_df('select 1')
self.assertIsInstance(df, pd.DataFrame)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_invalid_query(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('from `1`')
self.assertIn('pandas_gbq.gbq.GenericGBQException: Reason: invalidQuery',
str(context.exception), "")
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_suceeds_with_explicit_legacy_query(self):
df = self.instance.get_pandas_df('select 1', dialect='legacy')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_suceeds_with_explicit_std_query(self):
df = self.instance.get_pandas_df('select * except(b) from (select 1 a, 2 b)', dialect='standard')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_incompatible_syntax(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('select * except(b) from (select 1 a, 2 b)', dialect='legacy')
self.assertIn('pandas_gbq.gbq.GenericGBQException: Reason: invalidQuery',
str(context.exception), "")
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('dataset.table', None)
self.assertIn('INTERNAL: No default project is specified',
str(context.exception), "")
def test_split_dataset_table(self):
project, dataset, table = hook._split_tablename('dataset.table',
'project')
self.assertEqual("project", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative:dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_sql_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative.dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_colon_in_project(self):
project, dataset, table = hook._split_tablename('alt1:alt.dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_valid_double_column(self):
project, dataset, table = hook._split_tablename('alt1:alt:dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_invalid_syntax_triple_colon(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt3:dataset.table',
'project')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_triple_dot(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_column_double_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt.dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_colon_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt:dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_dot_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project', 'var_x')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
class TestBigQueryHookSourceFormat(unittest.TestCase):
def test_invalid_source_format(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load("test.test", "test_schema.json", ["test_data.json"], source_format="json")
# since we passed 'json' in, and it's not valid, make sure it's present in the error string.
self.assertIn("JSON", str(context.exception))
# Helpers to test_cancel_queries that have mock_poll_job_complete returning false, unless mock_job_cancel was called with the same job_id
mock_canceled_jobs = []
def mock_poll_job_complete(job_id):
return job_id in mock_canceled_jobs
def mock_job_cancel(projectId, jobId):
mock_canceled_jobs.append(jobId)
return mock.Mock()
class TestBigQueryBaseCursor(unittest.TestCase):
def test_invalid_schema_update_options(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"]
)
self.assertIn("THIS IS NOT VALID", str(context.exception))
def test_invalid_schema_update_and_write_disposition(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY'
)
self.assertIn("schema_update_options is only", str(context.exception))
@mock.patch("airflow.contrib.hooks.bigquery_hook.LoggingMixin")
@mock.patch("airflow.contrib.hooks.bigquery_hook.time")
def test_cancel_queries(self, mocked_logging, mocked_time):
project_id = 12345
running_job_id = 3
mock_jobs = mock.Mock()
mock_jobs.cancel = mock.Mock(side_effect=mock_job_cancel)
mock_service = mock.Mock()
mock_service.jobs = mock.Mock(return_value=mock_jobs)
bq_hook = hook.BigQueryBaseCursor(mock_service, project_id)
bq_hook.running_job_id = running_job_id
bq_hook.poll_job_complete = mock.Mock(side_effect=mock_poll_job_complete)
bq_hook.cancel_query()
mock_jobs.cancel.assert_called_with(projectId=project_id, jobId=running_job_id)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/cluster/tests/test_mean_shift.py | 150 | 3651 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_,ms2.cluster_centers_)
assert_array_equal(ms1.labels_,ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| gpl-2.0 |
ocastany/GradissimoCalculator | examples/T3_Waist.py | 1 | 1122 | #!/usr/bin/python3
# encoding: utf-8
from gradissimo import *
from matplotlib import pyplot
set_wavelength(1.31e-6)
##############################################################################
# Demonstration that the diameter at waist does not depend on the material,
# but the position of the waist depends on the material.
# Consider a test profile at the entrance of the material. Remember that
# waist diameter and reduced curvature do not change at a material interface.
P = GaussianProfile(w=30e-6, C=-1/200e-6)
# Create the two materials to compare...
HS_1 = HomogeneousSpace(1.00)
HS_2 = HomogeneousSpace(1.60)
# Build the beams...
beam1 = P.beam(HS_1)
beam2 = P.beam(HS_2)
# Print the result...
print("Beam in two different materials but with the same input profile...")
w0_1 = beam1.waist_profile.w
z0_1 = beam1.waist_position
print("Waist for beam 1: {:.4e} m at distance {:.2e} m".format(w0_1, z0_1))
w0_2 = beam2.waist_profile.w
z0_2 = beam2.waist_position
print("Waist for beam 2: {:.4e} m at distance {:.2e} m".format(w0_2, z0_2))
beam1.plot(z2=2*z0_1)
beam2.plot(z2=2*z0_2)
pyplot.show()
| gpl-3.0 |
huzq/scikit-learn | benchmarks/bench_glmnet.py | 20 | 3872 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of matplotlib.pyplot
import matplotlib.pyplot as plt
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
plt.clf()
xx = range(0, n * step, step)
plt.title('Lasso regression on sample dataset (%d features)' % n_features)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of samples to classify')
plt.ylabel('Time (s)')
plt.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
plt.figure('scikit-learn vs. glmnet benchmark results')
plt.title('Regression in high dimensional spaces (%d samples)' % n_samples)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
| bsd-3-clause |
massmutual/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
jakevdp/multiband_LS | figures/fig04_regularization_example.py | 1 | 2542 | """
Here we plot an example of how regularization can affect the fit
"""
import sys
import os
sys.path.append(os.path.abspath('../..'))
import numpy as np
import matplotlib.pyplot as plt
# Use seaborn settings for plot styles
import seaborn; seaborn.set()
from gatspy.datasets import RRLyraeGenerated
from gatspy.periodic import LombScargle
# Choose a Sesar 2010 object to base our fits on
lcid = 1019544
rrlyrae = RRLyraeGenerated(lcid, random_state=0)
# Generate data in a 6-month observing season
Nobs = 60
rng = np.random.RandomState(0)
nights = np.arange(180)
rng.shuffle(nights)
nights = nights[:Nobs]
t = 57000 + nights + 0.05 * rng.randn(Nobs)
dmag = 0.06 + 0.01 * rng.randn(Nobs)
mag = rrlyrae.generated('r', t, err=dmag, corrected=False)
periods = np.linspace(0.2, 1.4, 1000)
phase = (t / rrlyrae.period) % 1
phasefit = np.linspace(0, 1, 1000)
tfit = rrlyrae.period * phasefit
fig = plt.figure(figsize=(10, 4))
gs = plt.GridSpec(2, 2, left=0.07, right=0.95, wspace=0.15, bottom=0.15)
ax = [fig.add_subplot(gs[:, 0]),
fig.add_subplot(gs[0, 1]),
fig.add_subplot(gs[1, 1])]
# Plot the data
ax[0].errorbar(phase, mag, dmag, fmt='o', color='#AAAAAA')
ylim = ax[0].get_ylim()
# Here we construct some regularization.
Nterms = 20
sigma_r_inv = np.vstack([np.arange(Nterms + 1),
np.arange(Nterms + 1)]).T.ravel()[1:] ** 2
models = [0.5 * sigma_r_inv ** 2, None]
for i, reg in enumerate(models):
model = LombScargle(Nterms=Nterms, regularization=reg,
regularize_by_trace=False).fit(t, mag, dmag)
if reg is None:
label = "unregularized"
else:
label = "regularized"
lines = ax[0].plot(phasefit, model.predict(tfit, period=rrlyrae.period),
label=label)
ax[1 + i].plot(periods, model.periodogram(periods), lw=1,
c=lines[0].get_color())
ax[1 + i].set_title("{0} Periodogram ({1} terms)".format(label.title(),
Nterms))
ax[1 + i].set_ylabel('power')
ax[1 + i].set_xlim(0.2, 1.4)
ax[1 + i].set_ylim(0, 1)
#ax[1 + i].yaxis.set_major_formatter(plt.NullFormatter())
ax[0].set_xlabel('phase')
ax[0].set_ylabel('magnitude')
ax[0].set_ylim(ylim)
ax[0].invert_yaxis()
ax[0].legend(loc='upper left')
ax[0].set_title('Folded Data (P={0:.3f} days)'.format(rrlyrae.period))
ax[1].xaxis.set_major_formatter(plt.NullFormatter())
ax[2].set_xlabel('period (days)')
plt.savefig('fig04.pdf')
plt.show()
| bsd-2-clause |
jchodera/LiquidBenchmark | src/old/find_static_dielectric.py | 2 | 3451 | import re
from rdkit import Chem
from rdkit.Chem import AllChem
import pandas as pd
import glob
from thermopyl import thermoml_lib, cirpy
data = pd.read_hdf("./data.h5", 'data')
# SEE GOOGLE DOC!!!!!!# SEE GOOGLE DOC!!!!!!# SEE GOOGLE DOC!!!!!!# SEE GOOGLE DOC!!!!!!
bad_filenames = ["./10.1016/j.fluid.2013.12.014.xml"]
data = data[~data.filename.isin(bad_filenames)]
# SEE GOOGLE DOC!!!!!!# SEE GOOGLE DOC!!!!!!# SEE GOOGLE DOC!!!!!!# SEE GOOGLE DOC!!!!!!
experiments = ["Mass density, kg/m3", "Relative permittivity at zero frequency"] # , "Isothermal compressibility, 1/kPa", "Isobaric coefficient of expansion, 1/K"]
ind_list = [data[exp].dropna().index for exp in experiments]
ind = reduce(lambda x,y: x.union(y), ind_list)
X = data.ix[ind]
name_to_formula = pd.read_hdf("./compound_name_to_formula.h5", 'data')
name_to_formula = name_to_formula.dropna()
X["n_components"] = X.components.apply(lambda x: len(x.split("__")))
X = X[X.n_components == 1]
X.dropna(axis=1, how='all', inplace=True)
X["formula"] = X.components.apply(lambda chemical: name_to_formula[chemical])
heavy_atoms = ["N", "C", "O", "S", "Cl", "Br", "F"]
desired_atoms = ["H"] + heavy_atoms
X["n_atoms"] = X.formula.apply(lambda formula_string : thermoml_lib.count_atoms(formula_string))
X["n_heavy_atoms"] = X.formula.apply(lambda formula_string : thermoml_lib.count_atoms_in_set(formula_string, heavy_atoms))
X["n_desired_atoms"] = X.formula.apply(lambda formula_string : thermoml_lib.count_atoms_in_set(formula_string, desired_atoms))
X["n_other_atoms"] = X.n_atoms - X.n_desired_atoms
X = X[X.n_other_atoms == 0]
X = X[X.n_heavy_atoms > 0]
X = X[X.n_heavy_atoms <= 10]
X.dropna(axis=1, how='all', inplace=True)
X["smiles"] = X.components.apply(lambda x: cirpy.resolve(x, "smiles")) # This should be cached via sklearn.
X = X[X.smiles != None]
X = X.ix[X.smiles.dropna().index]
X["cas"] = X.components.apply(lambda x: thermoml_lib.get_first_entry(cirpy.resolve(x, "cas"))) # This should be cached via sklearn.
X = X[X.cas != None]
X = X.ix[X.cas.dropna().index]
# Neither names (components) nor smiles are unique. Use CAS to ensure consistency.
cannonical_smiles_lookup = X.groupby("cas").smiles.first()
cannonical_components_lookup = X.groupby("cas").components.first()
X["smiles"] = X.cas.apply(lambda x: cannonical_smiles_lookup[x])
X["components"] = X.cas.apply(lambda x: cannonical_components_lookup[x])
X = X[X["Temperature, K"] > 270]
X = X[X["Temperature, K"] < 330]
X = X[X["Pressure, kPa"] > 100.]
X = X[X["Pressure, kPa"] < 102.]
X.dropna(axis=1, how='all', inplace=True)
X["Pressure, kPa"] = 101.325 # Assume everything within range is comparable.
X["Temperature, K"] = X["Temperature, K"].apply(lambda x: x.round(1)) # Round at the 0.1 digit.
mu = X.groupby(["components", "smiles", "cas", "Temperature, K", "Pressure, kPa"])[experiments].mean()
sigma_std = X.groupby(["components", "smiles", "cas", "Temperature, K", "Pressure, kPa"])[experiments].std().dropna()
sigma_est = X.groupby(["components", "smiles", "cas", "Temperature, K", "Pressure, kPa"])[[e + "_std" for e in experiments]].mean().dropna()
sigma = pd.concat((sigma_std, sigma_est))
sigma["index"] = sigma.index
sigma.drop_duplicates(cols='index', take_last=True, inplace=True)
del sigma["index"]
for e in experiments:
mu[e + "_std"] = sigma[e + "_std"]
q = mu.reset_index()
q = q.ix[q[experiments].dropna().index]
q.to_csv("./tables/data_dielectric.csv")
| gpl-2.0 |
dubourg/openturns | python/doc/pyplots/UserDefinedCovarianceModel.py | 2 | 1077 | import openturns as ot
from math import exp
from matplotlib import pyplot as plt
from openturns.viewer import View
def C(s, t):
return exp(-4.0 * abs(s - t) / (1 + (s * s + t * t)))
N = 64
a = 4.0
#myMesh = ot.IntervalMesher([N]).build(ot.Interval(-a, a))
myMesh = ot.RegularGrid(-a, 2 * a / N, N + 1)
myCovarianceCollection = ot.CovarianceMatrixCollection()
for k in range(myMesh.getVerticesNumber()):
t = myMesh.getVertices()[k]
for l in range(k + 1):
s = myMesh.getVertices()[l]
matrix = ot.CovarianceMatrix(1)
matrix[0, 0] = C(s[0], t[0])
myCovarianceCollection.add(matrix)
covarianceModel = ot.UserDefinedCovarianceModel(myMesh, myCovarianceCollection)
def f(x):
return [covarianceModel([x[0]], [x[1]])[0, 0]]
func = ot.PythonFunction(2, 1, f)
func.setDescription(['$s$', '$t$', '$cov$'])
cov_graph = func.draw([-a] * 2, [a] * 2, [512] * 2)
fig = plt.figure(figsize=(10, 4))
plt.suptitle('User defined covariance model')
cov_axis = fig.add_subplot(111)
View(cov_graph, figure=fig, axes=[cov_axis], add_legend=False)
| gpl-3.0 |
chen0031/nupic | nupic/math/roc_utils.py | 49 | 8308 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Utility functions to compute ROC (Receiver Operator Characteristic) curves
and AUC (Area Under the Curve).
The ROCCurve() and AreaUnderCurve() functions are based on the roc_curve()
and auc() functions found in metrics.py module of scikit-learn
(http://scikit-learn.org/stable/). Scikit-learn has a BSD license (3 clause).
Following is the original license/credits statement from the top of the
metrics.py file:
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD Style.
"""
import numpy as np
def ROCCurve(y_true, y_score):
"""compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
true binary labels
y_score : array, shape = [n_samples]
target scores, can either be probability estimates of
the positive class, confidence values, or binary decisions.
Returns
-------
fpr : array, shape = [>2]
False Positive Rates
tpr : array, shape = [>2]
True Positive Rates
thresholds : array, shape = [>2]
Thresholds on y_score used to compute fpr and tpr
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
References
----------
http://en.wikipedia.org/wiki/Receiver_operating_characteristic
"""
y_true = np.ravel(y_true)
classes = np.unique(y_true)
# ROC only for binary classification
if classes.shape[0] != 2:
raise ValueError("ROC is defined for binary classification only")
y_score = np.ravel(y_score)
n_pos = float(np.sum(y_true == classes[1])) # nb of true positive
n_neg = float(np.sum(y_true == classes[0])) # nb of true negative
thresholds = np.unique(y_score)
neg_value, pos_value = classes[0], classes[1]
tpr = np.empty(thresholds.size, dtype=np.float) # True positive rate
fpr = np.empty(thresholds.size, dtype=np.float) # False positive rate
# Build tpr/fpr vector
current_pos_count = current_neg_count = sum_pos = sum_neg = idx = 0
signal = np.c_[y_score, y_true]
sorted_signal = signal[signal[:, 0].argsort(), :][::-1]
last_score = sorted_signal[0][0]
for score, value in sorted_signal:
if score == last_score:
if value == pos_value:
current_pos_count += 1
else:
current_neg_count += 1
else:
tpr[idx] = (sum_pos + current_pos_count) / n_pos
fpr[idx] = (sum_neg + current_neg_count) / n_neg
sum_pos += current_pos_count
sum_neg += current_neg_count
current_pos_count = 1 if value == pos_value else 0
current_neg_count = 1 if value == neg_value else 0
idx += 1
last_score = score
else:
tpr[-1] = (sum_pos + current_pos_count) / n_pos
fpr[-1] = (sum_neg + current_neg_count) / n_neg
# hard decisions, add (0,0)
if fpr.shape[0] == 2:
fpr = np.array([0.0, fpr[0], fpr[1]])
tpr = np.array([0.0, tpr[0], tpr[1]])
# trivial decisions, add (0,0) and (1,1)
elif fpr.shape[0] == 1:
fpr = np.array([0.0, fpr[0], 1.0])
tpr = np.array([0.0, tpr[0], 1.0])
return fpr, tpr, thresholds
def AreaUnderCurve(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
Parameters
----------
x : array, shape = [n]
x coordinates
y : array, shape = [n]
y coordinates
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred)
>>> metrics.auc(fpr, tpr)
0.75
"""
#x, y = check_arrays(x, y)
if x.shape[0] != y.shape[0]:
raise ValueError('x and y should have the same shape'
' to compute area under curve,'
' but x.shape = %s and y.shape = %s.'
% (x.shape, y.shape))
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
# reorder the data points according to the x axis
order = np.argsort(x)
x = x[order]
y = y[order]
h = np.diff(x)
area = np.sum(h * (y[1:] + y[:-1])) / 2.0
return area
def _printNPArray(x, precision=2):
format = "%%.%df" % (precision)
for elem in x:
print format % (elem),
print
def _test():
"""
This is a toy example, to show the basic functionality:
The dataset is:
actual prediction
-------------------------
0 0.1
0 0.4
1 0.5
1 0.3
1 0.45
Some ROC terminology:
A True Positive (TP) is when we predict TRUE and the actual value is 1.
A False Positive (FP) is when we predict TRUE, but the actual value is 0.
The True Positive Rate (TPR) is TP/P, where P is the total number of actual
positives (3 in this example, the last 3 samples).
The False Positive Rate (FPR) is FP/N, where N is the total number of actual
negatives (2 in this example, the first 2 samples)
Here are the classifications at various choices for the threshold. The
prediction is TRUE if the predicted value is >= threshold and FALSE otherwise.
actual pred 0.50 0.45 0.40 0.30 0.10
---------------------------------------------------------
0 0.1 0 0 0 0 1
0 0.4 0 0 1 1 1
1 0.5 1 1 1 1 1
1 0.3 0 0 0 1 1
1 0.45 0 1 1 1 1
TruePos(TP) 1 2 2 3 3
FalsePos(FP) 0 0 1 1 2
TruePosRate(TPR) 1/3 2/3 2/3 3/3 3/3
FalsePosRate(FPR) 0/2 0/2 1/2 1/2 2/2
The ROC curve is a plot of FPR on the x-axis and TPR on the y-axis. Basically,
one can pick any operating point along this curve to run, the operating point
determined by which threshold you want to use. By changing the threshold, you
tradeoff TP's for FPs.
The more area under this curve, the better the classification algorithm is.
The AreaUnderCurve() function can be used to compute the area under this
curve.
"""
yTrue = np.array([0, 0, 1, 1, 1])
yScore = np.array([0.1, 0.4, 0.5, 0.3, 0.45])
(fpr, tpr, thresholds) = ROCCurve(yTrue, yScore)
print "Actual: ",
_printNPArray(yTrue)
print "Predicted: ",
_printNPArray(yScore)
print
print "Thresholds:",
_printNPArray(thresholds[::-1])
print "FPR(x): ",
_printNPArray(fpr)
print "TPR(y): ",
_printNPArray(tpr)
print
area = AreaUnderCurve(fpr, tpr)
print "AUC: ", area
if __name__=='__main__':
_test()
| agpl-3.0 |
chanceraine/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py | 72 | 2225 |
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
| agpl-3.0 |
nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015 | Code/Machine_Learning_Algos/10k_Tests/ml_classification_qda.py | 1 | 2709 | __author__ = 'Arnav'
# !/usr/bin/env python
'''
Using : Python 2.7+ (backward compatibility exists for Python 3.x if separate environment created)
Required files : hdf5_getters.py
Required packages : numpy, pandas, sklearn
# Uses QDA for classification
'''
import pandas
import numpy as np
# main function
if __name__ == '__main__':
col_input = ['genre', 'AvgBarDuration','Loudness', 'Tempo','ArtistFamiliarity','ArtistHotttnesss','SongHotttnesss',
'Mode[0]','Mode[1]','Year',
'Key[0]','Key[1]','Key[2]','Key[3]','Key[4]','Key[5]',
'Key[6]','Key[7]','Key[8]','Key[9]','Key[10]','Key[11]',
'PicthesMean[0]','PicthesMean[1]','PicthesMean[2]','PicthesMean[3]','PicthesMean[4]','PicthesMean[5]',
'PicthesMean[6]','PicthesMean[7]','PicthesMean[8]','PicthesMean[9]','PicthesMean[10]','PicthesMean[11]',
'PitchesVar[0]','PitchesVar[1]','PitchesVar[2]','PitchesVar[3]','PitchesVar[4]','PitchesVar[5]',
'PitchesVar[6]','PitchesVar[7]','PitchesVar[8]','PitchesVar[9]','PitchesVar[10]','PitchesVar[11]',
'TimbreMean[0]','TimbreMean[1]','TimbreMean[2]','TimbreMean[3]','TimbreMean[4]','TimbreMean[5]',
'TimbreMean[6]','TimbreMean[7]','TimbreMean[8]','TimbreMean[9]','TimbreMean[10]','TimbreMean[11]',
'TimbreVar[0]','TimbreVar[1]','TimbreVar[2]','TimbreVar[3]','TimbreVar[4]','TimbreVar[5]',
'TimbreVar[6]','TimbreVar[7]','TimbreVar[8]','TimbreVar[9]','TimbreVar[10]','TimbreVar[11]']
df_input = pandas.read_csv('pandas_merged_output_cleaned_None.csv',
header=None, delimiter="|", names=col_input)
df_input = df_input.dropna()
#df_input = df_input[df_input['Year'] != 0][df_input['genre'] != 'CLASSICAL']
#df_input = df_input[df_input['Year'] != 0][df_input['Year'] < 1992][df_input['genre'] != 'CLASSICAL']
df_input = df_input[df_input['Year'] != 0][df_input['Year'] >= 1992][df_input['genre'] != 'CLASSICAL']
df_input_target = df_input[list(range(0, 1))].as_matrix()
df_input_data = df_input[list(range(1, 70))].as_matrix()
# splitting the data into training and testing sets
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df_input_data, df_input_target.tolist())
# Start QDA Classification
from sklearn.qda import QDA
clf = QDA(priors=None, reg_param=0.001).fit(X_train, np.ravel(y_train[:]))
predicted = clf.predict(X_test)
matches = (predicted == [item for sublist in y_test for item in sublist])
print "Accuracy : ", (matches.sum() / float(len(matches)))
| mit |
tridesclous/tridesclous | tridesclous/peeler_engine_geometry.py | 1 | 28001 | """
Here implementation that tale in account the geometry
of the probe to speed up template matching.
"""
import time
import numpy as np
import joblib
from concurrent.futures import ThreadPoolExecutor
import itertools
from .peeler_engine_base import PeelerEngineGeneric
from .peeler_tools import *
from .peeler_tools import _dtype_spike
import sklearn.metrics.pairwise
from .cltools import HAVE_PYOPENCL, OpenCL_Helper
if HAVE_PYOPENCL:
import pyopencl
mf = pyopencl.mem_flags
from .peakdetector import get_peak_detector_class
try:
import numba
HAVE_NUMBA = True
from .numba_tools import numba_explore_best_shift, numba_sparse_scalar_product
except ImportError:
HAVE_NUMBA = False
class PeelerEngineGeometrical(PeelerEngineGeneric):
def change_params(self, **kargs):
PeelerEngineGeneric.change_params(self, **kargs)
def initialize(self, **kargs):
PeelerEngineGeneric.initialize(self, **kargs)
# create peak detector
p = dict(self.catalogue['peak_detector_params'])
self.peakdetector_engine = p.pop('engine')
self.peakdetector_method = p.pop('method')
PeakDetector_class = get_peak_detector_class(self.peakdetector_method, self.peakdetector_engine)
chunksize = self.fifo_size-2*self.n_span # not the real chunksize here
self.peakdetector = PeakDetector_class(self.sample_rate, self.nb_channel,
chunksize, self.internal_dtype, self.geometry)
self.peakdetector.change_params(**p)
# some attrs
self.shifts = np.arange(-self.maximum_jitter_shift, self.maximum_jitter_shift+1)
self.nb_shift = self.shifts.size
#~ self.channel_distances = sklearn.metrics.pairwise.euclidean_distances(self.geometry).astype('float32')
#~ self.channels_adjacency = {}
#~ for c in range(self.nb_channel):
#~ if self.use_sparse_template:
#~ nearest, = np.nonzero(self.channel_distances[c, :]<self.adjacency_radius_um)
#~ self.channels_adjacency[c] = nearest
#~ else:
#~ self.channels_adjacency[c] = np.arange(self.nb_channel, dtype='int64')
self.mask_already_tested = np.zeros((self.fifo_size, self.nb_channel), dtype='bool')
def initialize_before_each_segment(self, **kargs):
PeelerEngineGeneric.initialize_before_each_segment(self, **kargs)
self.peakdetector.initialize_stream()
def detect_local_peaks_before_peeling_loop(self):
# reset tested mask
self.mask_already_tested[:] = False
# and detect peak
self.re_detect_local_peak()
#~ print('detect_local_peaks_before_peeling_loop', self.pending_peaks.size)
def re_detect_local_peak(self):
mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)
if mask.ndim ==1:
#~ mask &= ~self.mask_already_tested[self.n_span:-self.n_span, 0]
sample_indexes, = np.nonzero(mask)
sample_indexes += self.n_span
tested = self.mask_already_tested[sample_indexes, 0]
sample_indexes = sample_indexes[~tested]
chan_indexes = np.zeros(sample_indexes.size, dtype='int64')
else:
#~ mask &= ~self.mask_already_tested[self.n_span:-self.n_span, :]
sample_indexes, chan_indexes = np.nonzero(mask)
sample_indexes += self.n_span
tested = self.mask_already_tested[sample_indexes, chan_indexes]
sample_indexes = sample_indexes[~tested]
chan_indexes = chan_indexes[~tested]
amplitudes = np.abs(self.fifo_residuals[sample_indexes, chan_indexes])
order = np.argsort(amplitudes)[::-1]
dtype_peak = [('sample_index', 'int32'), ('chan_index', 'int32'), ('peak_value', 'float32')]
self.pending_peaks = np.zeros(sample_indexes.size, dtype=dtype_peak)
self.pending_peaks['sample_index'] = sample_indexes
self.pending_peaks['chan_index'] = chan_indexes
self.pending_peaks['peak_value'] = amplitudes
self.pending_peaks = self.pending_peaks[order]
#~ print('re_detect_local_peak', self.pending_peaks.size)
def select_next_peak(self):
#~ print(len(self.pending_peaks))
if len(self.pending_peaks)>0:
sample_ind, chan_ind, ampl = self.pending_peaks[0]
self.pending_peaks = self.pending_peaks[1:]
return sample_ind, chan_ind
else:
return LABEL_NO_MORE_PEAK, None
def on_accepted_spike(self, sample_ind, cluster_idx, jitter):
# remove spike prediction from fifo residuals
#~ t1 = time.perf_counter()
pos, pred = make_prediction_one_spike(sample_ind, cluster_idx, jitter, self.fifo_residuals.dtype, self.catalogue)
#~ t2 = time.perf_counter()
#~ print(' make_prediction_one_spike', (t2-t1)*1000)
#~ t1 = time.perf_counter()
self.fifo_residuals[pos:pos+self.peak_width_long, :] -= pred
#~ t2 = time.perf_counter()
#~ print(' self.fifo_residuals -', (t2-t1)*1000)
# this prevent search peaks in the zone until next "reset_to_not_tested"
#~ t1 = time.perf_counter()
self.clean_pending_peaks_zone(sample_ind, cluster_idx)
#~ t2 = time.perf_counter()
#~ print(' self.clean_pending_peaks_zone -', (t2-t1)*1000)
def clean_pending_peaks_zone(self, sample_ind, cluster_idx):
# TODO test with sparse_mask_level3s!!!!!
mask = self.sparse_mask_level1[cluster_idx, :]
#~ t1 = time.perf_counter()
#~ keep = np.zeros(self.pending_peaks.size, dtype='bool')
#~ for i, peak in enumerate(self.pending_peaks):
#~ in_zone = mask[peak['chan_index']] and \
#~ (peak['sample_index']+self.n_left)<sample_ind and \
#~ sample_ind<(peak['sample_index']+self.n_right)
#~ keep[i] = not(in_zone)
peaks = self.pending_peaks
in_zone = mask[peaks['chan_index']] &\
((peaks['sample_index']+self.n_left)<sample_ind) & \
((peaks['sample_index']+self.n_right)>sample_ind)
keep = ~ in_zone
#~ t2 = time.perf_counter()
#~ print(' clean_pending_peaks_zone loop', (t2-t1)*1000)
self.pending_peaks = self.pending_peaks[keep]
#~ print('clean_pending_peaks_zone', self.pending_peaks.size)
def set_already_tested(self, sample_ind, peak_chan):
self.mask_already_tested[sample_ind, peak_chan] = True
def reset_to_not_tested(self, good_spikes):
for spike in good_spikes:
# each good spike can remove from
cluster_idx = self.catalogue['label_to_index'][spike.cluster_label]
chan_mask = self.sparse_mask_level1[cluster_idx, :]
self.mask_already_tested[spike.index + self.n_left_long:spike.index + self.n_right_long][:, chan_mask] = False
self.re_detect_local_peak()
def get_no_label_peaks(self):
mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)
nolabel_indexes, chan_indexes = np.nonzero(mask)
#~ nolabel_indexes, chan_indexes = np.nonzero(~self.mask_not_already_tested)
nolabel_indexes += self.n_span
nolabel_indexes = nolabel_indexes[nolabel_indexes<(self.chunksize+self.n_span)]
bad_spikes = np.zeros(nolabel_indexes.shape[0], dtype=_dtype_spike)
bad_spikes['index'] = nolabel_indexes
bad_spikes['cluster_label'] = LABEL_UNCLASSIFIED
return bad_spikes
def get_best_template(self, left_ind, chan_ind):
full_waveform = self.fifo_residuals[left_ind:left_ind+self.peak_width,:]
centers0 = self.catalogue['centers0']
projections = self.catalogue['projections']
strict_low = self.catalogue['boundaries'][:, 0]
strict_high = self.catalogue['boundaries'][:, 1]
flexible_low = self.catalogue['boundaries'][:, 2]
flexible_high = self.catalogue['boundaries'][:, 3]
n = centers0.shape[0]
flat_waveform = full_waveform.flatten()
flat_centers0 = centers0.reshape(n, -1)
#~ scalar_products = np.zeros(n, dtype='float32')
#~ for i in range(n):
#~ sp = np.sum((flat_waveform - flat_centers0[i, :]) * projections[i, :])
#~ scalar_products[i] = sp
#~ scalar_products = np.sum((flat_waveform[np.newaxis, :] - flat_centers0[:, :]) * projections[:, :], axis=1)
#~ print(scalar_products)
#~ t1 = time.perf_counter()
scalar_products = numba_sparse_scalar_product(self.fifo_residuals, left_ind, centers0, projections, chan_ind,
self.sparse_mask_level1, )
#~ t2 = time.perf_counter()
#~ print('numba_sparse_scalar_product', (t2-t1)*1000)
#~ print(scalar_products)
possible_idx, = np.nonzero((scalar_products < strict_high) & (scalar_products > strict_low))
#~ possible_idx, = np.nonzero((scalar_products < flexible_high) & (scalar_products > flexible_low))
#~ print('possible_idx', possible_idx)
#~ print('scalar_products[possible_idx]', scalar_products[possible_idx])
#~ do_plot = False
if len(possible_idx) == 1:
extra_idx = None
candidates_idx =possible_idx
elif len(possible_idx) == 0:
#~ extra_idx, = np.nonzero((np.abs(scalar_products) < 0.5))
extra_idx, = np.nonzero((scalar_products < flexible_high) & (scalar_products > flexible_low))
#~ if len(extra_idx) ==0:
# give a try to very far ones.
#~ extra_idx, = np.nonzero((np.abs(scalar_products) < 1.))
#~ print('extra_idx', extra_idx)
#~ if len(extra_idx) ==0:
#~ candidates_idx = []
#~ else:
#~ candidates_idx = extra_idx
candidates_idx = extra_idx
#~ candidates_idx =possible_idx
#~ pass
elif len(possible_idx) > 1 :
extra_idx = None
candidates_idx = possible_idx
debug_plot_change = False
if len(candidates_idx) > 0:
#~ t1 = time.perf_counter()
candidates_idx = np.array(candidates_idx, dtype='int64')
common_mask = np.sum(self.sparse_mask_level3[candidates_idx, :], axis=0) > 0
shift_scalar_product, shift_distance = numba_explore_best_shift(self.fifo_residuals, left_ind, self.catalogue['centers0'],
self.catalogue['projections'], candidates_idx, self.maximum_jitter_shift, common_mask, self.sparse_mask_level1)
#~ i0, i1 = np.unravel_index(np.argmin(np.abs(shift_scalar_product), axis=None), shift_scalar_product.shape)
i0, i1 = np.unravel_index(np.argmin(shift_distance, axis=None), shift_distance.shape)
#~ best_idx = candidates_idx[i0]
shift = self.shifts[i1]
cluster_idx = candidates_idx[i0]
final_scalar_product = shift_scalar_product[i0, i1]
#~ t2 = time.perf_counter()
#~ print('numba_explore_best_shift', (t2-t1)*1000)
#~ print('shift', shift)
#~ print('cluster_idx', cluster_idx)
#~ print('final_scalar_product', final_scalar_product)
if np.abs(shift) == self.maximum_jitter_shift:
cluster_idx = None
shift = None
final_scalar_product = None
#~ print('maximum_jitter_shift >> cluster_idx = None ')
#~ do_plot = True
#~ i0_bis, i1_bis = np.unravel_index(np.argmin(np.abs(shift_scalar_product), axis=None), shift_scalar_product.shape)
#~ if i0 != i0_bis:
#~ debug_plot_change = True
#~ print('Warning')
#~ print(possible_idx)
#~ print(shift_scalar_product)
#~ print(shift_distance)
#~ if best_idx != cluster_idx:
#~ print('*'*50)
#~ print('best_idx != cluster_idx', best_idx, cluster_idx)
#~ print('*'*50)
#~ cluster_idx = best_idx
#~ debug_plot_change = True
else:
cluster_idx = None
shift = None
final_scalar_product = None
#~ import matplotlib.pyplot as plt
#~ fig, ax = plt.subplots()
#~ ax.plot(self.shifts, shift_scalar_product.T)
#~ plt.show()
#~ print('ici',)
# DEBUG OMP
#~ from sklearn.linear_model import orthogonal_mp_gram
#~ from sklearn.linear_model import OrthogonalMatchingPursuit
#~ n_nonzero_coefs = 2
#~ omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
#~ X = self.catalogue['centers0'].reshape(self.catalogue['centers0'].shape[0], -1).T
#~ waveform = self.fifo_residuals[left_ind:left_ind+self.peak_width,:].flatten()
#~ y = waveform
#~ omp.fit(X, y)
#~ coef = omp.coef_
#~ idx_r, = coef.nonzero()
#~ cluster_idx_omp = np.argmin(np.abs(coef - 1))
#~ if cluster_idx_omp != cluster_idx and coef[cluster_idx_omp] > 0.5:
#~ if True:
if False:
#~ if cluster_idx in (3,6):
#~ if do_plot:
#~ if False:
#~ if final_scalar_product is not None and np.abs(final_scalar_product) > 0.5:
#~ if True:
#~ if len(possible_idx) != 1:
#~ if len(possible_idx) > 1:
#~ if len(candidates_idx) > 1:
#~ if 7 in possible_idx or cluster_idx == 7:
#~ if cluster_idx not in possible_idx and len(possible_idx) > 0:
#~ if debug_plot_change:
import matplotlib.pyplot as plt
print()
print('best cluster_idx', cluster_idx)
print('possible_idx', possible_idx)
print('extra_idx', extra_idx)
print(scalar_products[possible_idx])
print(strict_high[possible_idx])
print('cluster_idx_omp', cluster_idx_omp)
fig, ax = plt.subplots()
ax.plot(coef)
if cluster_idx is not None:
ax.axvline(cluster_idx)
ax.set_title(f'{cluster_idx} omp {cluster_idx_omp}')
#~ plt.show()
fig, ax = plt.subplots()
shift2 = 0 if shift is None else shift
full_waveform2 = self.fifo_residuals[left_ind+shift2:left_ind+shift2+self.peak_width,:]
ax.plot(full_waveform2.T.flatten(), color='k')
if shift !=0 and shift is not None:
ax.plot(full_waveform.T.flatten(), color='grey', ls='--')
for idx in candidates_idx:
ax.plot(self.catalogue['centers0'][idx, :].T.flatten(), color='m')
ax.plot(self.catalogue['centers0'][cluster_idx_omp, :].T.flatten(), color='y')
if cluster_idx is not None:
ax.plot(self.catalogue['centers0'][cluster_idx, :].T.flatten(), color='c', ls='--')
ax.set_title(f'best {cluster_idx} shift {shift} possible_idx {possible_idx}')
if shift is not None:
fig, ax = plt.subplots()
#~ ax.plot(self.shifts, np.abs(shift_scalar_product).T)
ax.plot(self.shifts, shift_scalar_product.T)
ax.axhline(0)
fig, ax = plt.subplots()
ax.plot(self.shifts, np.abs(shift_distance).T)
plt.show()
best_template_info = {'nb_candidate' : len(candidates_idx), 'final_scalar_product':final_scalar_product}
return cluster_idx, shift, best_template_info
def accept_tempate(self, left_ind, cluster_idx, jitter, best_template_info):
if jitter is None:
# this must have a jitter
jitter = 0
#~ if np.abs(jitter) > (self.maximum_jitter_shift - 0.5):
#~ return False
strict_low = self.catalogue['boundaries'][:, 0]
strict_high = self.catalogue['boundaries'][:, 1]
flexible_low = self.catalogue['boundaries'][:, 2]
flexible_high = self.catalogue['boundaries'][:, 3]
#~ flat_waveform = full_waveform.flatten()
#~ sp2 = np.sum((flat_waveform - centers0[cluster_idx, :].flatten()) * projections[cluster_idx, :])
sp = best_template_info['final_scalar_product']
nb_candidate = best_template_info['nb_candidate']
if nb_candidate == 1:
#~ accept_template = strict_low[cluster_idx] < sp < strict_high[cluster_idx]
accept_template = flexible_low[cluster_idx] < sp < flexible_high[cluster_idx]
else:
accept_template = flexible_low[cluster_idx] < sp < flexible_high[cluster_idx]
# waveform L2 on mask
#~ full_waveform = self.fifo_residuals[left_ind:left_ind+self.peak_width,:]
#~ wf = full_waveform[:, mask]
# prediction with interpolation
#~ _, pred_wf = make_prediction_one_spike(left_ind - self.n_left, cluster_idx, jitter, self.fifo_residuals.dtype, self.catalogue, long=False)
#~ pred_wf = pred_wf[:, mask]
#~ dist = (pred_wf - wf) ** 2
# criteria per channel
#~ residual_nrj_by_chan = np.sum(dist, axis=0)
#~ wf_nrj = np.sum(wf**2, axis=0)
#~ weight = self.weight_per_template_dict[cluster_idx]
#~ crietria_weighted = (wf_nrj>residual_nrj_by_chan).astype('float') * weight
#~ accept_template = np.sum(crietria_weighted) >= 0.7 * np.sum(weight)
# criteria per sample
#~ dist * np.abs(pred_wf) <
#~ dist_w = dist / np.abs(pred_wf)
#~ gain = (dist < wf**2).astype('float') * np.abs(pred_wf) / np.sum(np.abs(pred_wf))
#~ gain = (wf / pred_wf - 1) * np.abs(pred_wf) / np.sum(np.abs(pred_wf))
#~ gain = (pred_wf**2 / wf**1 - 1) * np.abs(pred_wf) / np.sum(np.abs(pred_wf))
#~ accept_template = np.sum(gain) > 0.8
#~ accept_template = np.sum(gain) > 0.7
#~ accept_template0 = np.sum(gain) > 0.6
#~ accept_template = np.sum(gain) > 0.5
# criteria max residual
#~ max_res = np.max(np.abs(pred_wf - wf))
#~ max_pred = np.max(np.abs(pred_wf))
#~ accept_template1 = max_pred > max_res
#~ accept_template = False
# debug
#~ limit_sp =self.catalogue['sp_normed_limit'][cluster_idx, :]
#~ sp = np.sum(self.catalogue['centers0_normed'] * full_waveform * self.catalogue['template_weight'])
#~ print('limit_sp', limit_sp, 'sp', sp)
#~ accept_template = False
#~ immediate_accept = False
# DEBUG always refuse!!!!!
#~ accept_template = False
#~ label = self.catalogue['cluster_labels'][cluster_idx]
# debug
#~ if label == 13:
#~ if accept_template and not immediate_accept:
#~ accept_template = False
# debug
#~ if label == 13:
#~ if not hasattr(self, 'count_accept'):
#~ self.count_accept = {}
#~ self.count_accept[label] = {'accept_template':0, 'immediate_accept':0, 'not_accepted':0}
#~ if accept_template:
#~ self.count_accept[label]['accept_template'] += 1
#~ if immediate_accept:
#~ self.count_accept[label]['immediate_accept'] += 1
#~ else:
#~ self.count_accept[label]['not_accepted'] += 1
#~ print(self.count_accept)
#~ if self._plot_debug:
#~ if not accept_template and label in []:
#~ if not accept_template:
#~ if accept_template:
#~ if True:
if False:
#~ if not immediate_accept:
#~ if immediate_accept:
#~ if immediate_accept:
#~ if label == 7 and not accept_template:
#~ if label == 7:
#~ if label == 121:
#~ if label == 5:
#~ if nb_candidate > 1:
#~ if label == 13 and accept_template and not immediate_accept:
#~ if label == 13 and not accept_template:
#~ if label in (7,9):
#~ nears = np.array([ 5813767, 5813767, 11200038, 11322540, 14989650, 14989673, 14989692, 14989710, 15119220, 15830377, 16138346, 16216666, 17078883])
#~ print(np.abs((left_ind - self.n_left) - nears))
#~ print(np.abs((left_ind - self.n_left) - nears) < 2)
#~ if label == 5 and np.any(np.abs((left_ind - self.n_left) - nears) < 50):
#~ if immediate_accept:
import matplotlib.pyplot as plt
mask = self.sparse_mask_level2[cluster_idx]
full_waveform = self.fifo_residuals[left_ind:left_ind+self.peak_width,:]
wf = full_waveform[:, mask]
_, pred_waveform = make_prediction_one_spike(left_ind - self.n_left, cluster_idx, jitter, self.fifo_residuals.dtype, self.catalogue, long=False)
pred_wf = pred_waveform[:, mask]
if accept_template:
color = 'g'
else:
color = 'r'
#~ if accept_template:
#~ if immediate_accept:
#~ color = 'g'
#~ else:
#~ color = 'c'
#~ else:
#~ color = 'r'
#~ if not immediate_accept:
#~ fig, ax = plt.subplots()
#~ ax.plot(gain.T.flatten(), color=color)
#~ ax.set_title('{}'.format(np.sum(gain)))
#~ fig, ax = plt.subplots()
#~ ax.plot(feat_centroids.T, alpha=0.5)
#~ ax.plot(feat_waveform, color='k')
fig, ax = plt.subplots()
ax.plot(full_waveform.T.flatten(), color='k')
ax.plot(pred_waveform.T.flatten(), color=color)
l0, l1 = strict_low[cluster_idx], strict_high[cluster_idx]
l2, l3 = flexible_low[cluster_idx], flexible_high[cluster_idx]
title = f'{cluster_idx} {sp:0.3f} lim [{l0:0.3f} {l1:0.3f}] [{l2:0.3f} {l3:0.3f}] {nb_candidate}'
ax.set_title(title)
#~ fig, ax = plt.subplots()
#~ ax.plot(wf.T.flatten(), color='k')
#~ ax.plot(pred_wf.T.flatten(), color=color)
#~ ax.plot( wf.T.flatten() - pred_wf.T.flatten(), color=color, ls='--')
print()
print('cluster_idx',cluster_idx, 'accept_template', accept_template)
#~ print(distance, self.distance_limit[cluster_idx])
#~ print('distance', distance, distance2, 'limit_distance', self.distance_limit[cluster_idx])
#~ limit_sp =self.catalogue['sp_normed_limit'][cluster_idx, :]
#~ sp = np.sum(self.catalogue['centers0_normed'] * full_waveform * self.catalogue['template_weight'])
#~ sp = np.sum(self.catalogue['centers0_normed'] * full_waveform)
#~ print('limit_sp', limit_sp, 'sp', sp)
#~ if not immediate_accept:
#~ print('np.sum(gain)', np.sum(gain))
#~ fig, ax = plt.subplots()
#~ res = wf - pred_wf
#~ count, bins = np.histogram(res, bins=150, weights=np.abs(pred_wf))
#~ ax.plot(bins[:-1], count)
#~ plt.show()
#~ if distance2 >= self.distance_limit[cluster_idx]:
#~ print(crietria_weighted, weight)
#~ print(np.sum(crietria_weighted), np.sum(weight))
#~ ax.plot(full_wf0.T.flatten(), color='y')
#~ ax.plot( full_wf.T.flatten() - full_wf0.T.flatten(), color='y')
#~ ax.set_title('not accepted')
plt.show()
return accept_template
def _plot_after_inner_peeling_loop(self):
pass
def _plot_before_peeling_loop(self):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plot_sigs = self.fifo_residuals.copy()
self._plot_sigs_before = plot_sigs
#~ chan_order = np.argsort(self.channel_distances[0, :])
for c in range(self.nb_channel):
#~ for c in chan_order:
plot_sigs[:, c] += c*30
ax.plot(plot_sigs, color='k')
ax.axvline(self.fifo_size - self.n_right_long, color='r')
ax.axvline(-self.n_left_long, color='r')
mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)
sample_inds, chan_inds= np.nonzero(mask)
sample_inds += self.n_span
ax.scatter(sample_inds, plot_sigs[sample_inds, chan_inds], color='r')
ax.set_title(f'nb peak {sample_inds.size}')
#~ plt.show()
def _plot_label_unclassified(self, left_ind, peak_chan, cluster_idx, jitter):
return
import matplotlib.pyplot as plt
#~ print('LABEL UNCLASSIFIED', left_ind, cluster_idx)
fig, ax = plt.subplots()
wf = self.fifo_residuals[left_ind:left_ind+self.peak_width, :]
wf0 = self.catalogue['centers0'][cluster_idx, :, :]
ax.plot(wf.T.flatten(), color='b')
#~ ax.plot(wf0.T.flatten(), color='g')
ax.set_title(f'label_unclassified {left_ind-self.n_left} {cluster_idx} chan{peak_chan}')
ax.axvline(peak_chan*self.peak_width-self.n_left)
plt.show()
def _plot_after_peeling_loop(self, good_spikes):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plot_sigs = self.fifo_residuals.copy()
for c in range(self.nb_channel):
plot_sigs[:, c] += c*30
ax.plot(plot_sigs, color='k')
ax.plot(self._plot_sigs_before, color='b')
ax.axvline(self.fifo_size - self.n_right_long, color='r')
ax.axvline(-self.n_left_long, color='r')
mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)
sample_inds, chan_inds= np.nonzero(mask)
sample_inds += self.n_span
ax.scatter(sample_inds, plot_sigs[sample_inds, chan_inds], color='r')
good_spikes = np.array(good_spikes, dtype=_dtype_spike)
pred = make_prediction_signals(good_spikes, self.internal_dtype, plot_sigs.shape, self.catalogue, safe=True)
plot_pred = pred.copy()
for c in range(self.nb_channel):
plot_pred[:, c] += c*30
ax.plot(plot_pred, color='m')
plt.show()
| mit |
paris-saclay-cds/ramp-workflow | rampwf/tests/kits/drug_spectra/submissions/starting_kit/regressor.py | 1 | 1353 | from sklearn.ensemble import GradientBoostingRegressor
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator
import numpy as np
class Regressor(BaseEstimator):
def __init__(self):
self.n_components = 10
self.n_estimators = 40
self.learning_rate = 0.2
self.list_molecule = ['A', 'B', 'Q', 'R']
self.dict_reg = {}
for mol in self.list_molecule:
self.dict_reg[mol] = Pipeline([
('pca', PCA(n_components=self.n_components)),
('reg', GradientBoostingRegressor(
n_estimators=self.n_estimators,
learning_rate=self.learning_rate,
random_state=42))
])
def fit(self, X, y):
for i, mol in enumerate(self.list_molecule):
ind_mol = np.where(np.argmax(X[:, -4:], axis=1) == i)[0]
X_mol = X[ind_mol]
y_mol = y[ind_mol]
self.dict_reg[mol].fit(X_mol, np.log(y_mol))
def predict(self, X):
y_pred = np.zeros(X.shape[0])
for i, mol in enumerate(self.list_molecule):
ind_mol = np.where(np.argmax(X[:, -4:], axis=1) == i)[0]
X_mol = X[ind_mol]
y_pred[ind_mol] = np.exp(self.dict_reg[mol].predict(X_mol))
return y_pred
| bsd-3-clause |