repo_name
stringlengths 6
96
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 762
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ahoyosid/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
ebothmann/seaborn | seaborn/tests/test_matrix.py | 2 | 28570 | import itertools
import tempfile
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
from scipy.spatial import distance
from scipy.cluster import hierarchy
import nose.tools as nt
import numpy.testing as npt
import pandas.util.testing as pdt
from numpy.testing.decorators import skipif
from .. import matrix as mat
from .. import color_palette
from ..external.six.moves import range
try:
import fastcluster
assert fastcluster
_no_fastcluster = False
except ImportError:
_no_fastcluster = True
class TestHeatmap(object):
rs = np.random.RandomState(sum(map(ord, "heatmap")))
x_norm = rs.randn(4, 8)
letters = pd.Series(["A", "B", "C", "D"], name="letters")
df_norm = pd.DataFrame(x_norm, index=letters)
x_unif = rs.rand(20, 13)
df_unif = pd.DataFrame(x_unif)
default_kws = dict(vmin=None, vmax=None, cmap=None, center=None,
robust=False, annot=False, fmt=".2f", annot_kws=None,
cbar=True, cbar_kws=None, mask=None)
def test_ndarray_input(self):
p = mat._HeatMapper(self.x_norm, **self.default_kws)
npt.assert_array_equal(p.plot_data, self.x_norm[::-1])
pdt.assert_frame_equal(p.data, pd.DataFrame(self.x_norm).ix[::-1])
npt.assert_array_equal(p.xticklabels, np.arange(8))
npt.assert_array_equal(p.yticklabels, np.arange(4)[::-1])
nt.assert_equal(p.xlabel, "")
nt.assert_equal(p.ylabel, "")
def test_df_input(self):
p = mat._HeatMapper(self.df_norm, **self.default_kws)
npt.assert_array_equal(p.plot_data, self.x_norm[::-1])
pdt.assert_frame_equal(p.data, self.df_norm.ix[::-1])
npt.assert_array_equal(p.xticklabels, np.arange(8))
npt.assert_array_equal(p.yticklabels, ["D", "C", "B", "A"])
nt.assert_equal(p.xlabel, "")
nt.assert_equal(p.ylabel, "letters")
def test_df_multindex_input(self):
df = self.df_norm.copy()
index = pd.MultiIndex.from_tuples([("A", 1), ("B", 2),
("C", 3), ("D", 4)],
names=["letter", "number"])
index.name = "letter-number"
df.index = index
p = mat._HeatMapper(df, **self.default_kws)
npt.assert_array_equal(p.yticklabels, ["D-4", "C-3", "B-2", "A-1"])
nt.assert_equal(p.ylabel, "letter-number")
p = mat._HeatMapper(df.T, **self.default_kws)
npt.assert_array_equal(p.xticklabels, ["A-1", "B-2", "C-3", "D-4"])
nt.assert_equal(p.xlabel, "letter-number")
def test_mask_input(self):
kws = self.default_kws.copy()
mask = self.x_norm > 0
kws['mask'] = mask
p = mat._HeatMapper(self.x_norm, **kws)
plot_data = np.ma.masked_where(mask, self.x_norm)
npt.assert_array_equal(p.plot_data, plot_data[::-1])
def test_default_sequential_vlims(self):
p = mat._HeatMapper(self.df_unif, **self.default_kws)
nt.assert_equal(p.vmin, self.x_unif.min())
nt.assert_equal(p.vmax, self.x_unif.max())
nt.assert_true(not p.divergent)
def test_default_diverging_vlims(self):
p = mat._HeatMapper(self.df_norm, **self.default_kws)
vlim = max(abs(self.x_norm.min()), abs(self.x_norm.max()))
nt.assert_equal(p.vmin, -vlim)
nt.assert_equal(p.vmax, vlim)
nt.assert_true(p.divergent)
def test_robust_sequential_vlims(self):
kws = self.default_kws.copy()
kws["robust"] = True
p = mat._HeatMapper(self.df_unif, **kws)
nt.assert_equal(p.vmin, np.percentile(self.x_unif, 2))
nt.assert_equal(p.vmax, np.percentile(self.x_unif, 98))
def test_custom_sequential_vlims(self):
kws = self.default_kws.copy()
kws["vmin"] = 0
kws["vmax"] = 1
p = mat._HeatMapper(self.df_unif, **kws)
nt.assert_equal(p.vmin, 0)
nt.assert_equal(p.vmax, 1)
def test_custom_diverging_vlims(self):
kws = self.default_kws.copy()
kws["vmin"] = -4
kws["vmax"] = 5
p = mat._HeatMapper(self.df_norm, **kws)
nt.assert_equal(p.vmin, -5)
nt.assert_equal(p.vmax, 5)
def test_array_with_nans(self):
x1 = self.rs.rand(10, 10)
nulls = np.zeros(10) * np.nan
x2 = np.c_[x1, nulls]
m1 = mat._HeatMapper(x1, **self.default_kws)
m2 = mat._HeatMapper(x2, **self.default_kws)
nt.assert_equal(m1.vmin, m2.vmin)
nt.assert_equal(m1.vmax, m2.vmax)
def test_mask(self):
df = pd.DataFrame(data={'a': [1, 1, 1],
'b': [2, np.nan, 2],
'c': [3, 3, np.nan]})
kws = self.default_kws.copy()
kws["mask"] = np.isnan(df.values)
m = mat._HeatMapper(df, **kws)
npt.assert_array_equal(np.isnan(m.plot_data.data),
m.plot_data.mask)
def test_custom_cmap(self):
kws = self.default_kws.copy()
kws["cmap"] = "BuGn"
p = mat._HeatMapper(self.df_unif, **kws)
nt.assert_equal(p.cmap, "BuGn")
def test_centered_vlims(self):
kws = self.default_kws.copy()
kws["center"] = .5
p = mat._HeatMapper(self.df_unif, **kws)
nt.assert_true(p.divergent)
nt.assert_equal(p.vmax - .5, .5 - p.vmin)
def test_tickabels_off(self):
kws = self.default_kws.copy()
kws['xticklabels'] = False
kws['yticklabels'] = False
p = mat._HeatMapper(self.df_norm, **kws)
nt.assert_equal(p.xticklabels, ['' for _ in range(
self.df_norm.shape[1])])
nt.assert_equal(p.yticklabels, ['' for _ in range(
self.df_norm.shape[0])])
def test_custom_ticklabels(self):
kws = self.default_kws.copy()
xticklabels = list('iheartheatmaps'[:self.df_norm.shape[1]])
yticklabels = list('heatmapsarecool'[:self.df_norm.shape[0]])
kws['xticklabels'] = xticklabels
kws['yticklabels'] = yticklabels
p = mat._HeatMapper(self.df_norm, **kws)
nt.assert_equal(p.xticklabels, xticklabels)
nt.assert_equal(p.yticklabels, yticklabels[::-1])
def test_heatmap_annotation(self):
ax = mat.heatmap(self.df_norm, annot=True, fmt=".1f",
annot_kws={"fontsize": 14})
for val, text in zip(self.x_norm[::-1].flat, ax.texts):
nt.assert_equal(text.get_text(), "{:.1f}".format(val))
nt.assert_equal(text.get_fontsize(), 14)
def test_heatmap_cbar(self):
f = plt.figure()
mat.heatmap(self.df_norm)
nt.assert_equal(len(f.axes), 2)
plt.close(f)
f = plt.figure()
mat.heatmap(self.df_norm, cbar=False)
nt.assert_equal(len(f.axes), 1)
plt.close(f)
f, (ax1, ax2) = plt.subplots(2)
mat.heatmap(self.df_norm, ax=ax1, cbar_ax=ax2)
nt.assert_equal(len(f.axes), 2)
plt.close(f)
def test_heatmap_axes(self):
ax = mat.heatmap(self.df_norm)
xtl = [int(l.get_text()) for l in ax.get_xticklabels()]
nt.assert_equal(xtl, list(self.df_norm.columns))
ytl = [l.get_text() for l in ax.get_yticklabels()]
nt.assert_equal(ytl, list(self.df_norm.index[::-1]))
nt.assert_equal(ax.get_xlabel(), "")
nt.assert_equal(ax.get_ylabel(), "letters")
nt.assert_equal(ax.get_xlim(), (0, 8))
nt.assert_equal(ax.get_ylim(), (0, 4))
plt.close("all")
def test_heatmap_ticklabel_rotation(self):
f, ax = plt.subplots(figsize=(2, 2))
mat.heatmap(self.df_norm, ax=ax)
for t in ax.get_xticklabels():
nt.assert_equal(t.get_rotation(), 0)
for t in ax.get_yticklabels():
nt.assert_equal(t.get_rotation(), 90)
plt.close(f)
df = self.df_norm.copy()
df.columns = [str(c) * 10 for c in df.columns]
df.index = [i * 10 for i in df.index]
f, ax = plt.subplots(figsize=(2, 2))
mat.heatmap(df, ax=ax)
for t in ax.get_xticklabels():
nt.assert_equal(t.get_rotation(), 90)
for t in ax.get_yticklabels():
nt.assert_equal(t.get_rotation(), 0)
plt.close(f)
def test_heatmap_inner_lines(self):
c = (0, 0, 1, 1)
ax = mat.heatmap(self.df_norm, linewidths=2, linecolor=c)
mesh = ax.collections[0]
nt.assert_equal(mesh.get_linewidths()[0], 2)
nt.assert_equal(tuple(mesh.get_edgecolor()[0]), c)
plt.close("all")
def test_square_aspect(self):
ax = mat.heatmap(self.df_norm, square=True)
nt.assert_equal(ax.get_aspect(), "equal")
plt.close("all")
class TestDendrogram(object):
rs = np.random.RandomState(sum(map(ord, "dendrogram")))
x_norm = rs.randn(4, 8) + np.arange(8)
x_norm = (x_norm.T + np.arange(4)).T
letters = pd.Series(["A", "B", "C", "D", "E", "F", "G", "H"],
name="letters")
df_norm = pd.DataFrame(x_norm, columns=letters)
try:
import fastcluster
x_norm_linkage = fastcluster.linkage_vector(x_norm.T,
metric='euclidean',
method='single')
except ImportError:
x_norm_distances = distance.squareform(
distance.pdist(x_norm.T, metric='euclidean'))
x_norm_linkage = hierarchy.linkage(x_norm_distances, method='single')
x_norm_dendrogram = hierarchy.dendrogram(x_norm_linkage, no_plot=True,
color_list=['k'],
color_threshold=-np.inf)
x_norm_leaves = x_norm_dendrogram['leaves']
df_norm_leaves = np.asarray(df_norm.columns[x_norm_leaves])
default_kws = dict(linkage=None, metric='euclidean', method='single',
axis=1, label=True, rotate=False)
def test_ndarray_input(self):
p = mat._DendrogramPlotter(self.x_norm, **self.default_kws)
npt.assert_array_equal(p.array.T, self.x_norm)
pdt.assert_frame_equal(p.data.T, pd.DataFrame(self.x_norm))
npt.assert_array_equal(p.linkage, self.x_norm_linkage)
nt.assert_dict_equal(p.dendrogram, self.x_norm_dendrogram)
npt.assert_array_equal(p.reordered_ind, self.x_norm_leaves)
npt.assert_array_equal(p.xticklabels, self.x_norm_leaves)
npt.assert_array_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, None)
nt.assert_equal(p.ylabel, '')
def test_df_input(self):
p = mat._DendrogramPlotter(self.df_norm, **self.default_kws)
npt.assert_array_equal(p.array.T, np.asarray(self.df_norm))
pdt.assert_frame_equal(p.data.T, self.df_norm)
npt.assert_array_equal(p.linkage, self.x_norm_linkage)
nt.assert_dict_equal(p.dendrogram, self.x_norm_dendrogram)
npt.assert_array_equal(p.xticklabels,
np.asarray(self.df_norm.columns)[
self.x_norm_leaves])
npt.assert_array_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, 'letters')
nt.assert_equal(p.ylabel, '')
def test_df_multindex_input(self):
df = self.df_norm.copy()
index = pd.MultiIndex.from_tuples([("A", 1), ("B", 2),
("C", 3), ("D", 4)],
names=["letter", "number"])
index.name = "letter-number"
df.index = index
kws = self.default_kws.copy()
kws['label'] = True
p = mat._DendrogramPlotter(df.T, **kws)
xticklabels = ["A-1", "B-2", "C-3", "D-4"]
xticklabels = [xticklabels[i] for i in p.reordered_ind]
npt.assert_array_equal(p.xticklabels, xticklabels)
npt.assert_array_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, "letter-number")
def test_axis0_input(self):
kws = self.default_kws.copy()
kws['axis'] = 0
p = mat._DendrogramPlotter(self.df_norm.T, **kws)
npt.assert_array_equal(p.array, np.asarray(self.df_norm.T))
pdt.assert_frame_equal(p.data, self.df_norm.T)
npt.assert_array_equal(p.linkage, self.x_norm_linkage)
nt.assert_dict_equal(p.dendrogram, self.x_norm_dendrogram)
npt.assert_array_equal(p.xticklabels, self.df_norm_leaves)
npt.assert_array_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, 'letters')
nt.assert_equal(p.ylabel, '')
def test_rotate_input(self):
kws = self.default_kws.copy()
kws['rotate'] = True
p = mat._DendrogramPlotter(self.df_norm, **kws)
npt.assert_array_equal(p.array.T, np.asarray(self.df_norm))
pdt.assert_frame_equal(p.data.T, self.df_norm)
npt.assert_array_equal(p.xticklabels, [])
npt.assert_array_equal(p.yticklabels, self.df_norm_leaves)
nt.assert_equal(p.xlabel, '')
nt.assert_equal(p.ylabel, 'letters')
def test_rotate_axis0_input(self):
kws = self.default_kws.copy()
kws['rotate'] = True
kws['axis'] = 0
p = mat._DendrogramPlotter(self.df_norm.T, **kws)
npt.assert_array_equal(p.reordered_ind, self.x_norm_leaves)
def test_custom_linkage(self):
kws = self.default_kws.copy()
try:
import fastcluster
linkage = fastcluster.linkage_vector(self.x_norm, method='single',
metric='euclidean')
except ImportError:
d = distance.squareform(distance.pdist(self.x_norm,
metric='euclidean'))
linkage = hierarchy.linkage(d, method='single')
dendrogram = hierarchy.dendrogram(linkage, no_plot=True,
color_list=['k'],
color_threshold=-np.inf)
kws['linkage'] = linkage
p = mat._DendrogramPlotter(self.df_norm, **kws)
npt.assert_array_equal(p.linkage, linkage)
nt.assert_dict_equal(p.dendrogram, dendrogram)
def test_label_false(self):
kws = self.default_kws.copy()
kws['label'] = False
p = mat._DendrogramPlotter(self.df_norm, **kws)
nt.assert_equal(p.xticks, [])
nt.assert_equal(p.yticks, [])
nt.assert_equal(p.xticklabels, [])
nt.assert_equal(p.yticklabels, [])
nt.assert_equal(p.xlabel, "")
nt.assert_equal(p.ylabel, "")
def test_linkage_scipy(self):
p = mat._DendrogramPlotter(self.x_norm, **self.default_kws)
scipy_linkage = p._calculate_linkage_scipy()
from scipy.spatial import distance
from scipy.cluster import hierarchy
dists = distance.squareform(distance.pdist(self.x_norm.T,
metric=self.default_kws[
'metric']))
linkage = hierarchy.linkage(dists, method=self.default_kws['method'])
npt.assert_array_equal(scipy_linkage, linkage)
@skipif(_no_fastcluster)
def test_fastcluster_other_method(self):
import fastcluster
kws = self.default_kws.copy()
kws['method'] = 'average'
linkage = fastcluster.linkage(self.x_norm.T, method='average',
metric='euclidean')
p = mat._DendrogramPlotter(self.x_norm, **kws)
npt.assert_array_equal(p.linkage, linkage)
@skipif(_no_fastcluster)
def test_fastcluster_non_euclidean(self):
import fastcluster
kws = self.default_kws.copy()
kws['metric'] = 'cosine'
kws['method'] = 'average'
linkage = fastcluster.linkage(self.x_norm.T, method=kws['method'],
metric=kws['metric'])
p = mat._DendrogramPlotter(self.x_norm, **kws)
npt.assert_array_equal(p.linkage, linkage)
def test_dendrogram_plot(self):
d = mat.dendrogram(self.x_norm, **self.default_kws)
ax = plt.gca()
d.xmin, d.xmax = ax.get_xlim()
xmax = min(map(min, d.X)) + max(map(max, d.X))
nt.assert_equal(d.xmin, 0)
nt.assert_equal(d.xmax, xmax)
nt.assert_equal(len(ax.get_lines()), len(d.X))
nt.assert_equal(len(ax.get_lines()), len(d.Y))
plt.close('all')
def test_dendrogram_rotate(self):
kws = self.default_kws.copy()
kws['rotate'] = True
d = mat.dendrogram(self.x_norm, **kws)
ax = plt.gca()
d.ymin, d.ymax = ax.get_ylim()
ymax = min(map(min, d.Y)) + max(map(max, d.Y))
nt.assert_equal(d.ymin, 0)
nt.assert_equal(d.ymax, ymax)
plt.close('all')
def test_dendrogram_ticklabel_rotation(self):
f, ax = plt.subplots(figsize=(2, 2))
mat.dendrogram(self.df_norm, ax=ax)
for t in ax.get_xticklabels():
nt.assert_equal(t.get_rotation(), 0)
plt.close(f)
df = self.df_norm.copy()
df.columns = [str(c) * 10 for c in df.columns]
df.index = [i * 10 for i in df.index]
f, ax = plt.subplots(figsize=(2, 2))
mat.dendrogram(df, ax=ax)
for t in ax.get_xticklabels():
nt.assert_equal(t.get_rotation(), 90)
plt.close(f)
f, ax = plt.subplots(figsize=(2, 2))
mat.dendrogram(df.T, axis=0, rotate=True)
for t in ax.get_yticklabels():
nt.assert_equal(t.get_rotation(), 0)
plt.close(f)
class TestClustermap(object):
rs = np.random.RandomState(sum(map(ord, "clustermap")))
x_norm = rs.randn(4, 8) + np.arange(8)
x_norm = (x_norm.T + np.arange(4)).T
letters = pd.Series(["A", "B", "C", "D", "E", "F", "G", "H"],
name="letters")
df_norm = pd.DataFrame(x_norm, columns=letters)
try:
import fastcluster
x_norm_linkage = fastcluster.linkage_vector(x_norm.T,
metric='euclidean',
method='single')
except ImportError:
x_norm_distances = distance.squareform(
distance.pdist(x_norm.T, metric='euclidean'))
x_norm_linkage = hierarchy.linkage(x_norm_distances, method='single')
x_norm_dendrogram = hierarchy.dendrogram(x_norm_linkage, no_plot=True,
color_list=['k'],
color_threshold=-np.inf)
x_norm_leaves = x_norm_dendrogram['leaves']
df_norm_leaves = np.asarray(df_norm.columns[x_norm_leaves])
default_kws = dict(pivot_kws=None, z_score=None, standard_scale=None,
figsize=None, row_colors=None, col_colors=None)
default_plot_kws = dict(metric='euclidean', method='average',
colorbar_kws=None,
row_cluster=True, col_cluster=True,
row_linkage=None, col_linkage=None, mask=None)
row_colors = color_palette('Set2', df_norm.shape[0])
col_colors = color_palette('Dark2', df_norm.shape[1])
def test_ndarray_input(self):
cm = mat.ClusterGrid(self.x_norm, **self.default_kws)
pdt.assert_frame_equal(cm.data, pd.DataFrame(self.x_norm))
nt.assert_equal(len(cm.fig.axes), 4)
nt.assert_equal(cm.ax_row_colors, None)
nt.assert_equal(cm.ax_col_colors, None)
plt.close('all')
def test_df_input(self):
cm = mat.ClusterGrid(self.df_norm, **self.default_kws)
pdt.assert_frame_equal(cm.data, self.df_norm)
plt.close('all')
def test_corr_df_input(self):
df = self.df_norm.corr()
cg = mat.ClusterGrid(df, **self.default_kws)
cg.plot(**self.default_plot_kws)
diag = cg.data2d.values[np.diag_indices_from(cg.data2d)]
npt.assert_array_equal(diag, np.ones(cg.data2d.shape[0]))
plt.close('all')
def test_pivot_input(self):
df_norm = self.df_norm.copy()
df_norm.index.name = 'numbers'
df_long = pd.melt(df_norm.reset_index(), var_name='letters',
id_vars='numbers')
kws = self.default_kws.copy()
kws['pivot_kws'] = dict(index='numbers', columns='letters',
values='value')
cm = mat.ClusterGrid(df_long, **kws)
pdt.assert_frame_equal(cm.data2d, df_norm)
plt.close('all')
def test_colors_input(self):
kws = self.default_kws.copy()
kws['row_colors'] = self.row_colors
kws['col_colors'] = self.col_colors
cm = mat.ClusterGrid(self.df_norm, **kws)
npt.assert_array_equal(cm.row_colors, self.row_colors)
npt.assert_array_equal(cm.col_colors, self.col_colors)
nt.assert_equal(len(cm.fig.axes), 6)
plt.close('all')
def test_nested_colors_input(self):
kws = self.default_kws.copy()
row_colors = [self.row_colors, self.row_colors]
col_colors = [self.col_colors, self.col_colors]
kws['row_colors'] = row_colors
kws['col_colors'] = col_colors
cm = mat.ClusterGrid(self.df_norm, **kws)
npt.assert_array_equal(cm.row_colors, row_colors)
npt.assert_array_equal(cm.col_colors, col_colors)
nt.assert_equal(len(cm.fig.axes), 6)
plt.close('all')
def test_colors_input_custom_cmap(self):
kws = self.default_kws.copy()
kws['cmap'] = mpl.cm.PRGn
kws['row_colors'] = self.row_colors
kws['col_colors'] = self.col_colors
cm = mat.clustermap(self.df_norm, **kws)
npt.assert_array_equal(cm.row_colors, self.row_colors)
npt.assert_array_equal(cm.col_colors, self.col_colors)
nt.assert_equal(len(cm.fig.axes), 6)
plt.close('all')
def test_z_score(self):
df = self.df_norm.copy()
df = (df - df.mean()) / df.var()
kws = self.default_kws.copy()
kws['z_score'] = 1
cm = mat.ClusterGrid(self.df_norm, **kws)
pdt.assert_frame_equal(cm.data2d, df)
plt.close('all')
def test_z_score_axis0(self):
df = self.df_norm.copy()
df = df.T
df = (df - df.mean()) / df.var()
df = df.T
kws = self.default_kws.copy()
kws['z_score'] = 0
cm = mat.ClusterGrid(self.df_norm, **kws)
pdt.assert_frame_equal(cm.data2d, df)
plt.close('all')
def test_standard_scale(self):
df = self.df_norm.copy()
df = (df - df.min()) / (df.max() - df.min())
kws = self.default_kws.copy()
kws['standard_scale'] = 1
cm = mat.ClusterGrid(self.df_norm, **kws)
pdt.assert_frame_equal(cm.data2d, df)
plt.close('all')
def test_standard_scale_axis0(self):
df = self.df_norm.copy()
df = df.T
df = (df - df.min()) / (df.max() - df.min())
df = df.T
kws = self.default_kws.copy()
kws['standard_scale'] = 0
cm = mat.ClusterGrid(self.df_norm, **kws)
pdt.assert_frame_equal(cm.data2d, df)
plt.close('all')
def test_z_score_standard_scale(self):
kws = self.default_kws.copy()
kws['z_score'] = True
kws['standard_scale'] = True
with nt.assert_raises(ValueError):
cm = mat.ClusterGrid(self.df_norm, **kws)
plt.close('all')
def test_color_list_to_matrix_and_cmap(self):
matrix, cmap = mat.ClusterGrid.color_list_to_matrix_and_cmap(
self.col_colors, self.x_norm_leaves)
colors_set = set(self.col_colors)
col_to_value = dict((col, i) for i, col in enumerate(colors_set))
matrix_test = np.array([col_to_value[col] for col in
self.col_colors])[self.x_norm_leaves]
shape = len(self.col_colors), 1
matrix_test = matrix_test.reshape(shape)
cmap_test = mpl.colors.ListedColormap(colors_set)
npt.assert_array_equal(matrix, matrix_test)
npt.assert_array_equal(cmap.colors, cmap_test.colors)
plt.close('all')
def test_nested_color_list_to_matrix_and_cmap(self):
colors = [self.col_colors, self.col_colors]
matrix, cmap = mat.ClusterGrid.color_list_to_matrix_and_cmap(
colors, self.x_norm_leaves)
all_colors = set(itertools.chain(*colors))
color_to_value = dict((col, i) for i, col in enumerate(all_colors))
matrix_test = np.array(
[color_to_value[c] for color in colors for c in color])
shape = len(colors), len(colors[0])
matrix_test = matrix_test.reshape(shape)
matrix_test = matrix_test[:, self.x_norm_leaves]
matrix_test = matrix_test.T
cmap_test = mpl.colors.ListedColormap(all_colors)
npt.assert_array_equal(matrix, matrix_test)
npt.assert_array_equal(cmap.colors, cmap_test.colors)
plt.close('all')
def test_color_list_to_matrix_and_cmap_axis1(self):
matrix, cmap = mat.ClusterGrid.color_list_to_matrix_and_cmap(
self.col_colors, self.x_norm_leaves, axis=1)
colors_set = set(self.col_colors)
col_to_value = dict((col, i) for i, col in enumerate(colors_set))
matrix_test = np.array([col_to_value[col] for col in
self.col_colors])[self.x_norm_leaves]
shape = 1, len(self.col_colors)
matrix_test = matrix_test.reshape(shape)
cmap_test = mpl.colors.ListedColormap(colors_set)
npt.assert_array_equal(matrix, matrix_test)
npt.assert_array_equal(cmap.colors, cmap_test.colors)
plt.close('all')
def test_savefig(self):
# Not sure if this is the right way to test....
cm = mat.ClusterGrid(self.df_norm, **self.default_kws)
cm.plot(**self.default_plot_kws)
cm.savefig(tempfile.NamedTemporaryFile(), format='png')
plt.close('all')
def test_plot_dendrograms(self):
cm = mat.clustermap(self.df_norm, **self.default_kws)
nt.assert_equal(len(cm.ax_row_dendrogram.get_lines()),
len(cm.dendrogram_row.X))
nt.assert_equal(len(cm.ax_col_dendrogram.get_lines()),
len(cm.dendrogram_col.X))
data2d = self.df_norm.iloc[cm.dendrogram_row.reordered_ind,
cm.dendrogram_col.reordered_ind]
pdt.assert_frame_equal(cm.data2d, data2d)
plt.close('all')
def test_cluster_false(self):
kws = self.default_kws.copy()
kws['row_cluster'] = False
kws['col_cluster'] = False
cm = mat.clustermap(self.df_norm, **kws)
nt.assert_equal(len(cm.ax_row_dendrogram.lines), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.lines), 0)
nt.assert_equal(len(cm.ax_row_dendrogram.get_xticks()), 0)
nt.assert_equal(len(cm.ax_row_dendrogram.get_yticks()), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.get_xticks()), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.get_yticks()), 0)
pdt.assert_frame_equal(cm.data2d, self.df_norm)
plt.close('all')
def test_row_col_colors(self):
kws = self.default_kws.copy()
kws['row_colors'] = self.row_colors
kws['col_colors'] = self.col_colors
cm = mat.clustermap(self.df_norm, **kws)
nt.assert_equal(len(cm.ax_row_colors.collections), 1)
nt.assert_equal(len(cm.ax_col_colors.collections), 1)
plt.close('all')
def test_cluster_false_row_col_colors(self):
kws = self.default_kws.copy()
kws['row_cluster'] = False
kws['col_cluster'] = False
kws['row_colors'] = self.row_colors
kws['col_colors'] = self.col_colors
cm = mat.clustermap(self.df_norm, **kws)
nt.assert_equal(len(cm.ax_row_dendrogram.lines), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.lines), 0)
nt.assert_equal(len(cm.ax_row_dendrogram.get_xticks()), 0)
nt.assert_equal(len(cm.ax_row_dendrogram.get_yticks()), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.get_xticks()), 0)
nt.assert_equal(len(cm.ax_col_dendrogram.get_yticks()), 0)
nt.assert_equal(len(cm.ax_row_colors.collections), 1)
nt.assert_equal(len(cm.ax_col_colors.collections), 1)
pdt.assert_frame_equal(cm.data2d, self.df_norm)
plt.close('all')
| bsd-3-clause |
ArtsiomCh/tensorflow | tensorflow/python/estimator/canned/dnn_linear_combined_test.py | 46 | 26964 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn_linear_combined.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn_linear_combined
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import linear_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer as optimizer_lib
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class DNNOnlyModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, self._dnn_only_model_fn)
def _dnn_only_model_fn(self,
features,
labels,
mode,
head,
hidden_units,
feature_columns,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None):
return dnn_linear_combined._dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
linear_feature_columns=[],
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
# A function to mimic linear-regressor init reuse same tests.
def _linear_regressor_fn(feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyRegressorPartitionerTest(
linear_testing_utils.BaseLinearRegressorPartitionerTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPartitionerTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorEvaluationTest(
linear_testing_utils.BaseLinearRegressorEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorPredictTest(
linear_testing_utils.BaseLinearRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPredictTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorIntegrationTest(
linear_testing_utils.BaseLinearRegressorIntegrationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorIntegrationTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorTrainingTest(
linear_testing_utils.BaseLinearRegressorTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorTrainingTest.__init__(
self, _linear_regressor_fn)
def _linear_classifier_fn(feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyClassifierTrainingTest(
linear_testing_utils.BaseLinearClassifierTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierTrainingTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierClassesEvaluationTest(
linear_testing_utils.BaseLinearClassifierEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierEvaluationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierPredictTest(
linear_testing_utils.BaseLinearClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierPredictTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierIntegrationTest(
linear_testing_utils.BaseLinearClassifierIntegrationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierIntegrationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class DNNLinearCombinedRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
# A function to mimic dnn-classifier init reuse same tests.
def _dnn_classifier_fn(hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Adagrad',
config=None,
input_layer_partitioner=None):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNOnlyClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNOnlyClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
# A function to mimic dnn-regressor init reuse same tests.
def _dnn_regressor_fn(hidden_units,
feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Adagrad',
config=None,
input_layer_partitioner=None):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNOnlyRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNOnlyRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
class DNNLinearCombinedClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = self._as_label(np.reshape(data[:batch_size], (batch_size, 1)))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 2
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes-1., batch_size * input_dimension,
dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
class DNNLinearCombinedTests(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, real_optimizer, var_name_prefix):
"""Verifies global_step is None and var_names start with given prefix."""
def _minimize(loss, global_step=None, var_list=None):
self.assertIsNone(global_step)
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
var_names = [var.name for var in trainable_vars]
self.assertTrue(
all([name.startswith(var_name_prefix) for name in var_names]))
# var is used to check this op called by training.
with ops.name_scope(''):
var = variables_lib.Variable(0., name=(var_name_prefix + '_called'))
with ops.control_dependencies([var.assign(100.)]):
return real_optimizer.minimize(loss, global_step, var_list)
optimizer_mock = test.mock.NonCallableMagicMock(
spec=optimizer_lib.Optimizer, wraps=real_optimizer)
optimizer_mock.minimize = test.mock.MagicMock(wraps=_minimize)
return optimizer_mock
def test_train_op_calls_both_dnn_and_linear(self):
opt = gradient_descent.GradientDescentOptimizer(1.)
x_column = feature_column.numeric_column('x')
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[0.], [1.]])},
y=np.array([[0.], [1.]]),
batch_size=1,
shuffle=False)
est = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[x_column],
# verifies linear_optimizer is used only for linear part.
linear_optimizer=self._mock_optimizer(opt, 'linear'),
dnn_hidden_units=(2, 2),
dnn_feature_columns=[x_column],
# verifies dnn_optimizer is used only for linear part.
dnn_optimizer=self._mock_optimizer(opt, 'dnn'),
model_dir=self._model_dir)
est.train(input_fn, steps=1)
# verifies train_op fires linear minimize op
self.assertEqual(100.,
checkpoint_utils.load_variable(
self._model_dir, 'linear_called'))
# verifies train_op fires dnn minimize op
self.assertEqual(100.,
checkpoint_utils.load_variable(
self._model_dir, 'dnn_called'))
def test_dnn_and_linear_logits_are_added(self):
with ops.Graph().as_default():
variables_lib.Variable([[1.0]], name='linear/linear_model/x/weights')
variables_lib.Variable([2.0], name='linear/linear_model/bias_weights')
variables_lib.Variable([[3.0]], name='dnn/hiddenlayer_0/kernel')
variables_lib.Variable([4.0], name='dnn/hiddenlayer_0/bias')
variables_lib.Variable([[5.0]], name='dnn/logits/kernel')
variables_lib.Variable([6.0], name='dnn/logits/bias')
variables_lib.Variable(1, name='global_step', dtype=dtypes.int64)
linear_testing_utils.save_variables_to_ckpt(self._model_dir)
x_column = feature_column.numeric_column('x')
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[x_column],
dnn_hidden_units=[1],
dnn_feature_columns=[x_column],
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[10.]])}, batch_size=1, shuffle=False)
# linear logits = 10*1 + 2 = 12
# dnn logits = (10*3 + 4)*5 + 6 = 176
# logits = dnn + linear = 176 + 12 = 188
self.assertAllClose(
{
prediction_keys.PredictionKeys.PREDICTIONS: [188.],
},
next(est.predict(input_fn=input_fn)))
if __name__ == '__main__':
test.main()
| apache-2.0 |
mprelee/data-incubator-capstone | src/child_cost_investigation.py | 1 | 5797 | # Look at words
# Matt Prelee
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import nltk
import re
from sklearn import base
from sklearn.linear_model import LinearRegression, Lasso, ElasticNet
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.kernel_ridge import KernelRidge
from sklearn.grid_search import GridSearchCV
import time
from scipy import stats
from sklearn.cluster import KMeans
from config import MAIN_DATA
from preprocessing import default_preprocess, remove_outliers_iqr
CHILD_ALL_WORD_COUNT = 'data/child_all_count.txt'
CHILD_1500_WORD_COUNT = 'data/child_1500_count.txt'
CHILD_1000_WORD_COUNT = 'data/child_1000_count.txt'
CHILD_250_WORD_COUNT = 'data/child_250_count.txt'
CHILD_NOT_1500_WORD_COUNT = 'data/child_not_1500_count.txt'
ADULT_1500_WORD_COUNT = 'data/adult_1500_count.txt'
CHILD_COST_HISTOGRAM = 'static/plots/child_cost_hist.png'
ADULT_COST_HISTOGRAM = 'static/plots/adult_cost_hist.png'
ELDERLY_COST_HISTOGRAM = 'static/plots/elderly_cost_hist.png'
# Load data
df = default_preprocess(pickle.load(open(MAIN_DATA,'rb')))
kids = df[df.age_group == 'child']
plt.figure(1)
kids.Cost.hist(bins=150)
plt.xlabel('Operation Cost ($)')
plt.ylabel('Number of child patients')
plt.title('Histogram of child patient operation costs')
plt.savefig(CHILD_COST_HISTOGRAM,format='png')
plt.figure(2)
df[df.age_group == 'adult'].Cost.hist(bins=150)
plt.xlabel('Operation Cost ($)')
plt.ylabel('Number of adult patients')
plt.title('Histogram of adult patient operation costs')
plt.savefig(ADULT_COST_HISTOGRAM,format='png')
plt.figure(3)
df[df.age_group=='elderly'].Cost.hist(bins=150)
plt.xlabel('Operation Cost ($)')
plt.ylabel('Number of elderly patients')
plt.title('Histogram of elderly patient operation costs')
plt.savefig(ELDERLY_COST_HISTOGRAM,format='png')
kids1500 = kids[kids.Cost==1500]
kids1000 = kids[(kids.Cost>940) & (kids.Cost < 1020)]
kids250 = kids[(kids.Cost > 0) & (kids.Cost < 310)]
kids_not_1500 = kids[kids.Cost!=1500]
#print kids1500['fund_time'].value_counts()
#print kids_not_1500['fund_time'].value_counts()
#print 'Compare adult fund times with heart disease'
#print df[(df.age_group=='adult') & (df.Cost == 1500.)]['fund_time'].mean()
#print df[(df.age_group=='adult') & (df.Cost != 1500.)]['fund_time'].mean()
print 'Number of children costing $1500 is %d' % len(kids1500)
print 'Number of children costing $1000 is %d' % len(kids1000)
print 'Number of children costing $250 is %d' % len(kids250)
count_vectorizer_all = CountVectorizer(strip_accents='unicode',stop_words='english',ngram_range=(1,1))
count_vectorizer_1500 = CountVectorizer(strip_accents='unicode',stop_words='english',ngram_range=(1,1))
count_vectorizer_1000 = CountVectorizer(strip_accents='unicode',stop_words='english',ngram_range=(1,1))
count_vectorizer_250 = CountVectorizer(strip_accents='unicode',stop_words='english',ngram_range=(1,1))
count_vectorizer_not_1500 = CountVectorizer(strip_accents='unicode',stop_words='english',ngram_range=(1,1))
adult_1500_vect = CountVectorizer(strip_accents='unicode',stop_words='english',ngram_range=(1,1))
all_vect = CountVectorizer(strip_accents='unicode',stop_words='english',ngram_range=(1,2))
print 'Vectorizing stories...'
start_time = time.time()
count_mtx_all = count_vectorizer_all.fit_transform(kids['story'].values)
end_time = time.time()
print 'Vectorized in %d seconds.' % (end_time-start_time)
print 'Vectorizing stories...'
start_time = time.time()
count_mtx_1500 = count_vectorizer_1500.fit_transform(kids1500['story'].values)
end_time = time.time()
print 'Vectorized in %d seconds.' % (end_time-start_time)
print 'Vectorizing stories...'
start_time = time.time()
count_mtx_1000 = count_vectorizer_1000.fit_transform(kids1000['story'].values)
end_time = time.time()
print 'Vectorized in %d seconds.' % (end_time-start_time)
print 'Vectorizing stories...'
start_time = time.time()
count_mtx_250 = count_vectorizer_250.fit_transform(kids250['story'].values)
end_time = time.time()
print 'Vectorized in %d seconds.' % (end_time-start_time)
print 'Vectorizing stories...'
start_time = time.time()
count_mtx_not_1500 = count_vectorizer_not_1500.fit_transform(kids_not_1500['story'].values)
end_time = time.time()
print 'Vectorized in %d seconds.' % (end_time-start_time)
print 'Vectorizing stories...'
start_time = time.time()
all_mtx = all_vect.fit_transform(df['story'].values)
end_time = time.time()
print 'Vectorized in %d seconds.' % (end_time-start_time)
print 'Vectorizing stories...'
start_time = time.time()
adult_1500_mtx = adult_1500_vect.fit_transform(df[df.age_group=='adult']['story'].values)
#adult_1500_mtx = adult_1500_vect.fit_transform(df[(df.age_group=='adult') & (df.gender=='F')]['story'].values)
end_time = time.time()
print 'Vectorized in %d seconds.' % (end_time-start_time)
def count_and_write(mtx,vec,file) :
# Map counts
counts = mtx.sum(axis=0).T
count_dict = dict((key,int(val)) for key,val in zip(vec.get_feature_names(),counts))
print '\'heart\' appears %d times' % count_dict['heart']
with open(file,'w') as f:
for key in count_dict.keys() :
f.write(str(key) + ':'+ str(count_dict[key])+'\n')
count_and_write(count_mtx_all,count_vectorizer_all,CHILD_ALL_WORD_COUNT)
count_and_write(count_mtx_1500,count_vectorizer_1500,CHILD_1500_WORD_COUNT)
count_and_write(count_mtx_1000,count_vectorizer_1000,CHILD_1000_WORD_COUNT)
count_and_write(count_mtx_250,count_vectorizer_250,CHILD_250_WORD_COUNT)
count_and_write(count_mtx_not_1500,count_vectorizer_250,CHILD_NOT_1500_WORD_COUNT)
count_and_write(adult_1500_mtx,adult_1500_vect,ADULT_1500_WORD_COUNT)
| gpl-2.0 |
herilalaina/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
harisbal/pandas | pandas/tests/groupby/test_bin_groupby.py | 5 | 5046 | # -*- coding: utf-8 -*-
import pytest
from numpy import nan
import numpy as np
from pandas.core.dtypes.common import ensure_int64
from pandas import Index, isna
from pandas.core.groupby.ops import generate_bins_generic
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
from pandas._libs import lib, groupby, reduction
def test_series_grouper():
from pandas import Series
obj = Series(np.random.randn(10))
dummy = obj[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
grouper = reduction.SeriesGrouper(obj, np.mean, labels, 2, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[3:6].mean(), obj[6:].mean()])
assert_almost_equal(result, expected)
exp_counts = np.array([3, 4], dtype=np.int64)
assert_almost_equal(counts, exp_counts)
def test_series_bin_grouper():
from pandas import Series
obj = Series(np.random.randn(10))
dummy = obj[:0]
bins = np.array([3, 6])
grouper = reduction.SeriesBinGrouper(obj, np.mean, bins, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()])
assert_almost_equal(result, expected)
exp_counts = np.array([3, 3, 4], dtype=np.int64)
assert_almost_equal(counts, exp_counts)
class TestBinGroupers(object):
def setup_method(self, method):
self.obj = np.random.randn(10, 1)
self.labels = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2], dtype=np.int64)
self.bins = np.array([3, 6], dtype=np.int64)
def test_generate_bins(self):
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
binner = np.array([0, 3, 6, 9], dtype=np.int64)
for func in [lib.generate_bins_dt64, generate_bins_generic]:
bins = func(values, binner, closed='left')
assert ((bins == np.array([2, 5, 6])).all())
bins = func(values, binner, closed='right')
assert ((bins == np.array([3, 6, 6])).all())
for func in [lib.generate_bins_dt64, generate_bins_generic]:
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
binner = np.array([0, 3, 6], dtype=np.int64)
bins = func(values, binner, closed='right')
assert ((bins == np.array([3, 6])).all())
pytest.raises(ValueError, generate_bins_generic, values, [],
'right')
pytest.raises(ValueError, generate_bins_generic, values[:0],
binner, 'right')
pytest.raises(ValueError, generate_bins_generic, values, [4],
'right')
pytest.raises(ValueError, generate_bins_generic, values, [-3, -1],
'right')
def test_group_ohlc():
def _check(dtype):
obj = np.array(np.random.randn(20), dtype=dtype)
bins = np.array([6, 12, 20])
out = np.zeros((3, 4), dtype)
counts = np.zeros(len(out), dtype=np.int64)
labels = ensure_int64(np.repeat(np.arange(3),
np.diff(np.r_[0, bins])))
func = getattr(groupby, 'group_ohlc_%s' % dtype)
func(out, counts, obj[:, None], labels)
def _ohlc(group):
if isna(group).all():
return np.repeat(nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]),
_ohlc(obj[12:])])
assert_almost_equal(out, expected)
tm.assert_numpy_array_equal(counts,
np.array([6, 6, 8], dtype=np.int64))
obj[:6] = nan
func(out, counts, obj[:, None], labels)
expected[0] = nan
assert_almost_equal(out, expected)
_check('float32')
_check('float64')
class TestMoments(object):
pass
class TestReducer(object):
def test_int_index(self):
from pandas.core.series import Series
arr = np.random.randn(100, 4)
result = reduction.reduce(arr, np.sum, labels=Index(np.arange(4)))
expected = arr.sum(0)
assert_almost_equal(result, expected)
result = reduction.reduce(arr, np.sum, axis=1,
labels=Index(np.arange(100)))
expected = arr.sum(1)
assert_almost_equal(result, expected)
dummy = Series(0., index=np.arange(100))
result = reduction.reduce(arr, np.sum, dummy=dummy,
labels=Index(np.arange(4)))
expected = arr.sum(0)
assert_almost_equal(result, expected)
dummy = Series(0., index=np.arange(4))
result = reduction.reduce(arr, np.sum, axis=1, dummy=dummy,
labels=Index(np.arange(100)))
expected = arr.sum(1)
assert_almost_equal(result, expected)
result = reduction.reduce(arr, np.sum, axis=1, dummy=dummy,
labels=Index(np.arange(100)))
assert_almost_equal(result, expected)
| bsd-3-clause |
rtavenar/tslearn | tslearn/docs/examples/neighbors/plot_neighbors.py | 1 | 3522 | # -*- coding: utf-8 -*-
"""
Nearest neighbors
=================
This example illustrates the use of nearest neighbor methods for database
search and classification tasks.
The three-nearest neighbors of the time series from a test set are computed.
Then, the predictive performance of a three-nearest neighbors classifier [1] is
computed with three different metrics: Dynamic Time Warping [2], Euclidean
distance and SAX-MINDIST [3].
[1] `Wikipedia entry for the k-nearest neighbors algorithm
<https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm>`_
[2] H. Sakoe and S. Chiba, "Dynamic programming algorithm optimization
for spoken word recognition". IEEE Transactions on Acoustics, Speech, and
Signal Processing, 26(1), 43-49 (1978).
[3] J. Lin, E. Keogh, L. Wei and S. Lonardi, "Experiencing SAX: a novel
symbolic representation of time series". Data Mining and Knowledge Discovery,
15(2), 107-144 (2007).
"""
# Author: Romain Tavenard
# License: BSD 3 clause
import numpy
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline
from tslearn.generators import random_walk_blobs
from tslearn.preprocessing import TimeSeriesScalerMinMax
from tslearn.neighbors import KNeighborsTimeSeriesClassifier, \
KNeighborsTimeSeries
from tslearn.piecewise import SymbolicAggregateApproximation
numpy.random.seed(0)
n_ts_per_blob, sz, d, n_blobs = 20, 100, 1, 2
# Prepare data
X, y = random_walk_blobs(n_ts_per_blob=n_ts_per_blob,
sz=sz,
d=d,
n_blobs=n_blobs)
scaler = TimeSeriesScalerMinMax(value_range=(0., 1.)) # Rescale time series
X_scaled = scaler.fit_transform(X)
indices_shuffle = numpy.random.permutation(n_ts_per_blob * n_blobs)
X_shuffle = X_scaled[indices_shuffle]
y_shuffle = y[indices_shuffle]
X_train = X_shuffle[:n_ts_per_blob * n_blobs // 2]
X_test = X_shuffle[n_ts_per_blob * n_blobs // 2:]
y_train = y_shuffle[:n_ts_per_blob * n_blobs // 2]
y_test = y_shuffle[n_ts_per_blob * n_blobs // 2:]
# Nearest neighbor search
knn = KNeighborsTimeSeries(n_neighbors=3, metric="dtw")
knn.fit(X_train, y_train)
dists, ind = knn.kneighbors(X_test)
print("1. Nearest neighbour search")
print("Computed nearest neighbor indices (wrt DTW)\n", ind)
print("First nearest neighbor class:", y_test[ind[:, 0]])
# Nearest neighbor classification
knn_clf = KNeighborsTimeSeriesClassifier(n_neighbors=3, metric="dtw")
knn_clf.fit(X_train, y_train)
predicted_labels = knn_clf.predict(X_test)
print("\n2. Nearest neighbor classification using DTW")
print("Correct classification rate:", accuracy_score(y_test, predicted_labels))
# Nearest neighbor classification with a different metric (Euclidean distance)
knn_clf = KNeighborsTimeSeriesClassifier(n_neighbors=3, metric="euclidean")
knn_clf.fit(X_train, y_train)
predicted_labels = knn_clf.predict(X_test)
print("\n3. Nearest neighbor classification using L2")
print("Correct classification rate:", accuracy_score(y_test, predicted_labels))
# Nearest neighbor classification based on SAX representation
sax_trans = SymbolicAggregateApproximation(n_segments=10, alphabet_size_avg=5)
knn_clf = KNeighborsTimeSeriesClassifier(n_neighbors=3, metric="euclidean")
pipeline_model = Pipeline(steps=[('sax', sax_trans), ('knn', knn_clf)])
pipeline_model.fit(X_train, y_train)
predicted_labels = pipeline_model.predict(X_test)
print("\n4. Nearest neighbor classification using SAX+MINDIST")
print("Correct classification rate:", accuracy_score(y_test, predicted_labels))
| bsd-2-clause |
martinggww/lucasenlights | ETF/lucas/src/getDf.py | 2 | 15125 | import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import my_config as config
import numpy as np
import talib as ta
import pandas as pd
import logging, json
STICKERS = config.STICKERS
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
#Append a dict as a row in df
def appendDf(stickers, df, dict,data_frame):
entry = [dict['DATE']]
values = []
for sticker in stickers:
if sticker == 'CASH':
continue
entry.append(dict[sticker])
if data_frame == 'daily':
entry.append(dict['FUND_PERF'])
df.loc[df.shape[0]] = entry
return df
#Must kee the same sequence, DATE, xxx_daily, yyy_daily,...., FUND_PERF
def getQuantDfColumnNames(stickers, data_frame):
column_names = []
column_names.append('DATE')
for sticker in stickers:
if sticker == 'CASH':
continue
column_names.append(sticker + '_' + data_frame)
if data_frame == 'daily':
column_names.append('FUND_PERF')
return column_names
#Return FeatureDFColumnNames
def getFeatureDfColumnNames(stickers, features, rank_features):
column_names = []
for sticker in stickers:
if sticker == 'CASH':
continue
for feature in features.values():
column_names.append(sticker + feature)
column_names.append(rank_features['_KD_RANK'])
column_names.append(rank_features['_MFI_RANK'])
return column_names
#This is the columns names for Data frame,
def getDfFeatures(data_frame):
FEATURES = config.FEATURES
RANK_FEATURES = config.RANK_FEATURES
if data_frame == 'weekly':
FEATURES = config.WEEKLY_FEATURES
RANK_FEATURES = config.WEEKLY_RANK_FEATURES
elif data_frame == 'monthly':
FEATURES = config.MONTHLY_FEATURES
RANK_FEATURES = config.MONTHLY_RANK_FEATURES
return FEATURES, RANK_FEATURES
def getStatFeatures(data_frame):
FEATURES = config.STAT_FEATURES
RANK_FEATURES = config.RANK_FEATURES
if data_frame == 'weekly':
FEATURES = config.WEEKLY_STAT_FEATURES
RANK_FEATURES = config.WEEKLY_RANK_FEATURES
elif data_frame == 'monthly':
FEATURES = config.MONTHLY_STAT_FEATURES
RANK_FEATURES = config.MONTHLY_RANK_FEATURES
return FEATURES, RANK_FEATURES
def getFeatureDf():
d, w, m = readCsvFiles()
#ToDo, add a logic to check the last data
d, d_hist_mom = getDailyFeatureDf(d)
w, w_hist_mom = getWeeklyFeatureDf(w)
d = d.dropna()
w = w.dropna()
d = d.reset_index(drop=True)
w = w.reset_index(drop=True)
return d, w, None
'''
input:
@sticker:string, sticker name
output:
@df:data frame.
Read csv file as a data frame, 1)drop open, high, low, close, keep aclose and volume 2) add 3 more colummns
xxx_st_slope_momentum, xxx_lt_slope_momentum, xxx_sm_slope_momentum
3) Set those three columns as zero
'''
def readCsv(sticker, time_frame):
file_name = None
if time_frame == "daily":
file_name = '../csv/' + sticker + '.csv'
# timestamp, open, high, low, close, adjusted_close, volume, dividend_amount, split_coefficient
df = pd.read_csv(file_name, sep=',', header=0, names=['date', sticker + '_open', sticker + '_high', \
sticker + '_low', sticker + '_org_close', \
sticker + '_close', \
sticker + '_volume', sticker + '_dividend_amount', \
sticker + '_split_coefficient'])
return df
elif time_frame == "weekly":
file_name = '../csv/' + sticker + '.weekly' + '.csv'
# timestamp, open, high, low, close, adjusted close, volume, dividend amount
df = pd.read_csv(file_name, sep=',', header=0, names=['date', sticker + '_open', sticker + '_high', \
sticker + '_low', sticker + '_org_close', \
sticker + '_close', \
sticker + '_volume', sticker + '_dividend_amount'])
return df
return None
# Read the original .csv dataframe
def readCsvFilesTimeFrame(TimeFrame):
dfs = []
#Read all symbol's trade data
for sticker in STICKERS:
df = readCsv(sticker, TimeFrame)
dfs.append(df)
df = dfs[0]
for idx in xrange(1, len(dfs), 1):
_ = dfs[idx]
df = pd.merge(df, _, on='date', how='outer')
# Reverse the dataframe
df = df.iloc[::-1]
df = df.dropna()
df.drop(df.index[[0, 1]], inplace=True)
df = df.reset_index(drop=True)
return df
'''
Read original .csv files, daily, weekly and monthly
'''
def readCsvFiles():
df = readCsvFilesTimeFrame('daily')
weekly_df = readCsvFilesTimeFrame('weekly')
#monthly_df = readCsvFilesTimeFrame('monthly')
return df, weekly_df, None#monthly_df
'''Get ranks from high to low, descending...'''
def getRanks(df, feature_name):
sm_names = [sticker + feature_name for sticker in STICKERS]
sm_names.append('CASH'+feature_name)
df_sm = df.loc[:, sm_names]
# For each row, sort the values by column name
ranking_list = []
for index, row in df_sm.iterrows():
temp = row.sort_values(ascending=False, kind='quicksort', inplace=False)
values = temp.to_frame().index.values.tolist()
ranking_list.append([item.split('_')[0] for item in values])
return ranking_list
def getDiv(v1, v2):
result = []
for index in xrange(0, len(v1), 1):
if v2[index] != 0:
result.append(float(v1[index])/v2[index])
else:
result.append(1.0)
return np.array(result)
'''
This function add feature names to feature Df
'''
def AddFeatureToDf(df, data_frame):
# Add the following columns for feature extraction.
FEATURES, RANK_FEATURES = getDfFeatures(data_frame)
feature_columns = getFeatureDfColumnNames(STICKERS, FEATURES, RANK_FEATURES)
feature_df = pd.DataFrame(np.zeros((df.shape[0], len(feature_columns))), columns=feature_columns, dtype=float)
feature_df['date'] = df.date
df = pd.merge(df, feature_df, on='date', how='outer')
df.replace([np.inf, -np.inf], np.nan, inplace=True)
df.dropna(how='any', inplace=True)
if df.isnull().any().any():
logger.error("After merge, has null")
exit(1)
#print df.columns.values
#print df.head(10)
return df
def getWeeklyFeatureDf(df):
# Add the following columns for feature extraction.
df = AddFeatureToDf(df, 'weekly')
FEATURES = config.WEEKLY_FEATURES
RANK_FEATURES = config.WEEKLY_RANK_FEATURES
hist_mom_dict = {}
date = np.asarray(df['date'], dtype=str)
total_len = len(date)
for sticker in config.STICKERS:
kd_date, k, d = extractKD(sticker, date, '_weekly_KD.json')
mfi_date, mfi = extractMFI(sticker, date, '_weekly_MFI.json')
k = postProcess(total_len, k)
d = postProcess(total_len, d)
mfi = postProcess(total_len, mfi)
# get high, low, close and volume
close = np.asarray(df[sticker + '_close'], dtype=float)
#KD
_column_kd = sticker + FEATURES['_KD']
temp = (d + k)/2.0
df[_column_kd] = temp
_ssto_slope = ta.LINEARREG_SLOPE(temp, timeperiod=10)
#KD_SLOPE
_column_ssto_slope = sticker + FEATURES['_KD_SLOPE']
_ssto_slope[np.isnan(_ssto_slope)] = 0
df[_column_ssto_slope] = _ssto_slope
#MFI SLOPE
_column_mfi = sticker + FEATURES['_MFI']
_column_mfi_slope = sticker + FEATURES['_MFI_SLOPE']
mfi = np.array(mfi)
_mfi_slope = ta.LINEARREG_SLOPE(mfi, timeperiod=10)
_mfi_slope[np.isnan(_mfi_slope)] = 0
df[_column_mfi] = mfi
df[_column_mfi_slope] = _mfi_slope
#ROC
_column_roc = sticker + FEATURES['_ROC']
_roc = ta.ROC(close, timeperiod=10)
_roc[np.isnan(_roc)] = 0
df[_column_roc] = _roc
df['CASH_weekly_MFI'] = 50.0
df['CASH_weekly_K'] = 50.0
df['CASH_weekly_D'] = 50.0
df['CASH_weekly_KD'] = 50.0
df['CASH_weekly_HIST_MOM'] = 1.0
df['CASH_weekly_KD_SLOPE'] = 0.0001
df['CASH_weekly_MFI_SLOPE'] = 0.0001
#From high to low, slope from high to low
kd_rank = getRanks(df, '_weekly_KD_SLOPE')
mfi_rank = getRanks(df, '_weekly_MFI_SLOPE')
df[RANK_FEATURES['_KD_RANK']] = kd_rank
df[RANK_FEATURES['_MFI_RANK']] = mfi_rank
return df, hist_mom_dict
def extractKD(sticker, dates,data_frame):
kd = {}
file_name = '../csv/' + sticker + data_frame
with open(file_name) as json_data:
kd = json.load(json_data)
dict_list = kd['Technical Analysis: STOCH']
date_list=[]
k = []
d = []
flag = False
for date, value in sorted(dict_list.iteritems()):
if date >= dates[0] and date <= dates[-1]:
date_list.append(date)
k.append(float(value['SlowK']))
d.append(float(value['SlowD']))
return date_list, np.asarray(k), np.asarray(d)
def extractMFI(sticker, dates, data_frame):
file_name = '../csv/' + sticker + data_frame
with open(file_name) as json_data:
mfi = json.load(json_data)
dict_list = mfi['Technical Analysis: MFI']
date_list=[]
mfi = []
flag = False
for date, value in sorted(dict_list.iteritems()):
if date >= dates[0] and date <= dates[-1]:
date_list.append(date)
mfi.append(float(value['MFI']))
return date_list, np.asarray(mfi)
def postProcess(total_len, data):
list_len = len(data)
if list_len >= total_len:
return data
result = np.full(total_len - list_len, np.nan)
result = np.concatenate([result, data])
return result
def getDailyFeatureDf(df):
# Add the following columns for feature extraction.
df = AddFeatureToDf(df, 'daily')
FEATURES = config.FEATURES
RANK_FEATURES = config.RANK_FEATURES
hist_mom_dict = {}
date = np.asarray(df['date'], dtype=str)
total_len = len(date)
for sticker in config.STICKERS:
kd_date, k, d = extractKD(sticker, date, '_daily_KD.json')
mfi_date, mfi = extractMFI(sticker, date, '_daily_MFI.json')
k = postProcess(total_len, k)
d = postProcess(total_len, d)
mfi = postProcess(total_len, mfi)
# get high, low, close and volume
close = np.asarray(df[sticker + '_close'], dtype=float)
df[sticker + '_high'] = (df[sticker + '_close'] * df[sticker + '_high']) / df[sticker + '_org_close']
df[sticker + '_low'] = (df[sticker + '_close'] * df[sticker + '_low']) / df[sticker + '_org_close']
#20SMA
_column_20_sma = sticker + FEATURES['_20_SMA']
_20_sma = ta.SMA(close, timeperiod=20)
_20_sma[np.isnan(_20_sma)] = 0.0
df[_column_20_sma] = _20_sma
#50SMA
_column_50_sma = sticker + FEATURES['_50_SMA']
_50_sma = ta.SMA(close, timeperiod=50)
_50_sma[np.isnan(_50_sma)] = 0.0
df[_column_50_sma] = _50_sma
#100 SMA
_column_100_sma = sticker + FEATURES['_100_SMA']
_100_sma = ta.SMA(close, timeperiod=100)
_100_sma[np.isnan(_100_sma)] = 0.0
df[_column_100_sma] = _100_sma
#K,D
_column_kd = sticker + FEATURES['_KD']
temp = (d + k) / 2.0
df[_column_kd] = temp
_ssto_slope = ta.LINEARREG_SLOPE(temp, timeperiod=10)
#KD_SLOPE
_column_ssto_slope = sticker + FEATURES['_KD_SLOPE']
#_ssto_slope[np.isnan(_ssto_slope)] = 0
df[_column_ssto_slope] = _ssto_slope
#MFI
_column_mfi = sticker + FEATURES['_MFI']
_column_mfi_slope = sticker + FEATURES['_MFI_SLOPE']
#MFI_SLOPE
mfi = np.array(mfi)
df[_column_mfi] = mfi
_mfi_slope = ta.LINEARREG_SLOPE(mfi, timeperiod=10)
#_mfi_slope[np.isnan(_mfi_slope)] = 0
df[_column_mfi_slope] = _mfi_slope
#ROC
_column_roc = sticker + FEATURES['_ROC']
_roc = ta.ROC(close, timeperiod=10)
#_roc[np.isnan(_roc)] = 0.0
df[_column_roc] = _roc
#Historical momentum
_column_hist_mom = sticker + FEATURES['_HIST_MOM']
if sticker != 'CASH':
_hist_mom = getDiv(df[sticker + '_close'].tolist(), df[sticker + '_daily_20_SMA'].tolist()) * \
getDiv(df[sticker + '_close'].tolist(), df[sticker + '_daily_50_SMA'].tolist()) * \
getDiv(df[sticker + '_close'].tolist(), df[sticker + '_daily_100_SMA'].tolist())
_hist_mom[np.isnan(_hist_mom)] = 1.0
df[_column_hist_mom] = _hist_mom
hist_mom = {}
hist_mom['min'] = df[_column_hist_mom].min()
hist_mom['max'] = df[_column_hist_mom].max()
hist_mom_dict[sticker] = hist_mom
df['CASH_daily_K'] = 50.0
df['CASH_daily_D'] = 50.0
df['CASH_daily_KD'] = 50.0
df['CASH_daily_MFI'] = 50.0
df['CASH_daily_HIST_MOM'] = 1.0
df['CASH_daily_KD_SLOPE'] = 0.0001
df['CASH_daily_MFI_SLOPE'] = 0.0001
#From high to low, slope from high to low
kd_rank = getRanks(df, '_daily_KD_SLOPE')
mfi_rank = getRanks(df, '_daily_MFI_SLOPE')
df[RANK_FEATURES['_KD_RANK']] = kd_rank
df[RANK_FEATURES['_MFI_RANK']] = mfi_rank
return df, hist_mom_dict
def addFundPerf(df, data_frame):
FEATURES = config.FUND_FEATURES
if data_frame == "weekly":
FEATURES = config.WEEKLY_FUND_FEATURES
df[FEATURES['FUND_PERF']] = 0.0000001
df[FEATURES['FUND_MONEY_FLOW']] = 0.0000001
df[FEATURES['FUND_PRICE_FLOW']] = 0.0000001
df[FEATURES['FUND_HIST']] = 0.0000001
df.replace([np.inf, -np.inf], np.nan, inplace=True)
df.dropna(how='any', inplace=True)
if df.isnull().any().any():
logger.error("After calculation feature values, df has null")
exit(1)
df = df.reset_index(drop=True)
return df
def readStatics():
d = None
w = None
with open(config.DAILY_STATICS) as json_data:
d = json.load(json_data)
with open(config.WEEKLY_STATICS) as json_data:
w = json.load(json_data)
return d, w
def dropOffTrade(d, first_date):
index = 0
for index, row in d.iterrows():
if row['date'] == first_date:
break
d = d.drop(d.index[:index])
return d
def dropOff(df, data_frame):
drop_off_column_names = []
drops = config.DROP_OFF_COLUMN_NAMES
if data_frame == 'weekly':
drops = config.WEEKLY_DROP_OFF_COLUMN_NAMES
if data_frame == 'monthly':
drops = config.MONTHLY_DROP_OFF_COLUMN_NAMES
for sticker in STICKERS:
for drop in drops:
drop_off_column_names.append(sticker + drop)
df.drop(drop_off_column_names, axis=1, inplace=True)
return df | cc0-1.0 |
nesterione/scikit-learn | sklearn/utils/tests/test_multiclass.py | 128 | 12853 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that were'nt supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/pandas/tseries/tests/test_tslib.py | 7 | 66821 | import nose
from distutils.version import LooseVersion
import numpy as np
from pandas import tslib, lib
import pandas._period as period
import datetime
import pandas as pd
from pandas.core.api import (Timestamp, Index, Series, Timedelta, Period,
to_datetime)
from pandas.tslib import get_timezone
from pandas._period import period_asfreq, period_ordinal
from pandas.tseries.index import date_range, DatetimeIndex
from pandas.tseries.frequencies import (
get_freq,
US_RESO, MS_RESO, S_RESO, H_RESO, D_RESO, T_RESO
)
import pandas.tseries.tools as tools
import pandas.tseries.offsets as offsets
import pandas.util.testing as tm
import pandas.compat as compat
from pandas.compat.numpy import (np_datetime64_compat,
np_array_datetime64_compat)
from pandas.util.testing import assert_series_equal, _skip_if_has_locale
class TestTsUtil(tm.TestCase):
def test_try_parse_dates(self):
from dateutil.parser import parse
arr = np.array(['5/1/2000', '6/1/2000', '7/1/2000'], dtype=object)
result = lib.try_parse_dates(arr, dayfirst=True)
expected = [parse(d, dayfirst=True) for d in arr]
self.assertTrue(np.array_equal(result, expected))
def test_min_valid(self):
# Ensure that Timestamp.min is a valid Timestamp
Timestamp(Timestamp.min)
def test_max_valid(self):
# Ensure that Timestamp.max is a valid Timestamp
Timestamp(Timestamp.max)
def test_to_datetime_bijective(self):
# Ensure that converting to datetime and back only loses precision
# by going from nanoseconds to microseconds.
exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
self.assertEqual(
Timestamp(Timestamp.max.to_pydatetime()).value / 1000,
Timestamp.max.value / 1000)
exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning, check_stacklevel=False):
self.assertEqual(
Timestamp(Timestamp.min.to_pydatetime()).value / 1000,
Timestamp.min.value / 1000)
class TestTimestamp(tm.TestCase):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime.datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
self.assertEqual(calendar.timegm(base_dt.timetuple()) * 1000000000,
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime.datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime.datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
import pytz
import dateutil
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# re-creation shouldn't affect to internal value
result = Timestamp(result)
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should preserve tz
result = Timestamp(result)
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
self.assertEqual(result.value, expected_utc)
self.assertEqual(tslib.pydt_to_i8(result), expected_utc)
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime.datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
self.assertEqual(calendar.timegm(base_dt.timetuple()) * 1000000000,
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
import pytz
import dateutil
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# re-creation shouldn't affect to internal value
result = Timestamp(result)
self.assertEqual(result.value, expected)
self.assertEqual(tslib.pydt_to_i8(result), expected)
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should preserve tz
result = Timestamp(result)
self.assertEqual(result.value, expected_tz)
self.assertEqual(tslib.pydt_to_i8(result), expected_tz)
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
self.assertEqual(result.value, expected_utc)
self.assertEqual(tslib.pydt_to_i8(result), expected_utc)
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
self.assertEqual(result.value, Timestamp('2013-11-01 05:00').value)
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
self.assertEqual(repr(result), expected)
self.assertEqual(result, eval(repr(result)))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
self.assertEqual(result.value, Timestamp('2013-11-01 05:00').value)
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
self.assertEqual(repr(result), expected)
self.assertEqual(result, eval(repr(result)))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
self.assertEqual(result.value, Timestamp("2015-11-18 10:00").value)
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
self.assertEqual(repr(result), expected)
self.assertEqual(result, eval(repr(result)))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
self.assertEqual(result.value, Timestamp("2015-11-18 10:00").value)
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
self.assertEqual(repr(result), expected)
self.assertEqual(result, eval(repr(result)))
def test_constructor_invalid(self):
with tm.assertRaisesRegexp(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assertRaisesRegexp(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_positional(self):
# GH 10758
with tm.assertRaises(TypeError):
Timestamp(2000, 1)
with tm.assertRaises(ValueError):
Timestamp(2000, 0, 1)
with tm.assertRaises(ValueError):
Timestamp(2000, 13, 1)
with tm.assertRaises(ValueError):
Timestamp(2000, 1, 0)
with tm.assertRaises(ValueError):
Timestamp(2000, 1, 32)
# GH 11630
self.assertEqual(
repr(Timestamp(2015, 11, 12)),
repr(Timestamp('20151112')))
self.assertEqual(
repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)),
repr(Timestamp('2015-11-12 01:02:03.999999')))
self.assertIs(Timestamp(None), pd.NaT)
def test_constructor_keyword(self):
# GH 10758
with tm.assertRaises(TypeError):
Timestamp(year=2000, month=1)
with tm.assertRaises(ValueError):
Timestamp(year=2000, month=0, day=1)
with tm.assertRaises(ValueError):
Timestamp(year=2000, month=13, day=1)
with tm.assertRaises(ValueError):
Timestamp(year=2000, month=1, day=0)
with tm.assertRaises(ValueError):
Timestamp(year=2000, month=1, day=32)
self.assertEqual(
repr(Timestamp(year=2015, month=11, day=12)),
repr(Timestamp('20151112')))
self.assertEqual(
repr(Timestamp(year=2015, month=11, day=12,
hour=1, minute=2, second=3, microsecond=999999)),
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_fromordinal(self):
base = datetime.datetime(2000, 1, 1)
ts = Timestamp.fromordinal(base.toordinal(), freq='D')
self.assertEqual(base, ts)
self.assertEqual(ts.freq, 'D')
self.assertEqual(base.toordinal(), ts.toordinal())
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
self.assertEqual(pd.Timestamp('2000-01-01', tz='US/Eastern'), ts)
self.assertEqual(base.toordinal(), ts.toordinal())
def test_constructor_offset_depr(self):
# GH 12160
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
ts = Timestamp('2011-01-01', offset='D')
self.assertEqual(ts.freq, 'D')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
self.assertEqual(ts.offset, 'D')
msg = "Can only specify freq or offset, not both"
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01', offset='D', freq='D')
def test_constructor_offset_depr_fromordinal(self):
# GH 12160
base = datetime.datetime(2000, 1, 1)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
ts = Timestamp.fromordinal(base.toordinal(), offset='D')
self.assertEqual(pd.Timestamp('2000-01-01'), ts)
self.assertEqual(ts.freq, 'D')
self.assertEqual(base.toordinal(), ts.toordinal())
msg = "Can only specify freq or offset, not both"
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp.fromordinal(base.toordinal(), offset='D', freq='D')
def test_conversion(self):
# GH 9255
ts = Timestamp('2000-01-01')
result = ts.to_pydatetime()
expected = datetime.datetime(2000, 1, 1)
self.assertEqual(result, expected)
self.assertEqual(type(result), type(expected))
result = ts.to_datetime64()
expected = np.datetime64(ts.value, 'ns')
self.assertEqual(result, expected)
self.assertEqual(type(result), type(expected))
self.assertEqual(result.dtype, expected.dtype)
def test_repr(self):
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
dates = ['2014-03-07', '2014-01-01 09:00',
'2014-01-01 00:00:00.000000001']
# dateutil zone change (only matters for repr)
import dateutil
if (dateutil.__version__ >= LooseVersion('2.3') and
(dateutil.__version__ <= LooseVersion('2.4.0') or
dateutil.__version__ >= LooseVersion('2.6.0'))):
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/US/Pacific']
else:
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/America/Los_Angeles']
freqs = ['D', 'M', 'S', 'N']
for date in dates:
for tz in timezones:
for freq in freqs:
# avoid to match with timezone name
freq_repr = "'{0}'".format(freq)
if tz.startswith('dateutil'):
tz_repr = tz.replace('dateutil', '')
else:
tz_repr = tz
date_only = Timestamp(date)
self.assertIn(date, repr(date_only))
self.assertNotIn(tz_repr, repr(date_only))
self.assertNotIn(freq_repr, repr(date_only))
self.assertEqual(date_only, eval(repr(date_only)))
date_tz = Timestamp(date, tz=tz)
self.assertIn(date, repr(date_tz))
self.assertIn(tz_repr, repr(date_tz))
self.assertNotIn(freq_repr, repr(date_tz))
self.assertEqual(date_tz, eval(repr(date_tz)))
date_freq = Timestamp(date, freq=freq)
self.assertIn(date, repr(date_freq))
self.assertNotIn(tz_repr, repr(date_freq))
self.assertIn(freq_repr, repr(date_freq))
self.assertEqual(date_freq, eval(repr(date_freq)))
date_tz_freq = Timestamp(date, tz=tz, freq=freq)
self.assertIn(date, repr(date_tz_freq))
self.assertIn(tz_repr, repr(date_tz_freq))
self.assertIn(freq_repr, repr(date_tz_freq))
self.assertEqual(date_tz_freq, eval(repr(date_tz_freq)))
# this can cause the tz field to be populated, but it's redundant to
# information in the datestring
tm._skip_if_no_pytz()
import pytz # noqa
date_with_utc_offset = Timestamp('2014-03-13 00:00:00-0400', tz=None)
self.assertIn('2014-03-13 00:00:00-0400', repr(date_with_utc_offset))
self.assertNotIn('tzoffset', repr(date_with_utc_offset))
self.assertIn('pytz.FixedOffset(-240)', repr(date_with_utc_offset))
expr = repr(date_with_utc_offset).replace("'pytz.FixedOffset(-240)'",
'pytz.FixedOffset(-240)')
self.assertEqual(date_with_utc_offset, eval(expr))
def test_bounds_with_different_units(self):
out_of_bounds_dates = ('1677-09-21', '2262-04-12', )
time_units = ('D', 'h', 'm', 's', 'ms', 'us')
for date_string in out_of_bounds_dates:
for unit in time_units:
self.assertRaises(ValueError, Timestamp, np.datetime64(
date_string, dtype='M8[%s]' % unit))
in_bounds_dates = ('1677-09-23', '2262-04-11', )
for date_string in in_bounds_dates:
for unit in time_units:
Timestamp(np.datetime64(date_string, dtype='M8[%s]' % unit))
def test_tz(self):
t = '2014-02-01 09:00'
ts = Timestamp(t)
local = ts.tz_localize('Asia/Tokyo')
self.assertEqual(local.hour, 9)
self.assertEqual(local, Timestamp(t, tz='Asia/Tokyo'))
conv = local.tz_convert('US/Eastern')
self.assertEqual(conv, Timestamp('2014-01-31 19:00', tz='US/Eastern'))
self.assertEqual(conv.hour, 19)
# preserves nanosecond
ts = Timestamp(t) + offsets.Nano(5)
local = ts.tz_localize('Asia/Tokyo')
self.assertEqual(local.hour, 9)
self.assertEqual(local.nanosecond, 5)
conv = local.tz_convert('US/Eastern')
self.assertEqual(conv.nanosecond, 5)
self.assertEqual(conv.hour, 19)
def test_tz_localize_ambiguous(self):
ts = Timestamp('2014-11-02 01:00')
ts_dst = ts.tz_localize('US/Eastern', ambiguous=True)
ts_no_dst = ts.tz_localize('US/Eastern', ambiguous=False)
rng = date_range('2014-11-02', periods=3, freq='H', tz='US/Eastern')
self.assertEqual(rng[1], ts_dst)
self.assertEqual(rng[2], ts_no_dst)
self.assertRaises(ValueError, ts.tz_localize, 'US/Eastern',
ambiguous='infer')
# GH 8025
with tm.assertRaisesRegexp(TypeError,
'Cannot localize tz-aware Timestamp, use '
'tz_convert for conversions'):
Timestamp('2011-01-01', tz='US/Eastern').tz_localize('Asia/Tokyo')
with tm.assertRaisesRegexp(TypeError,
'Cannot convert tz-naive Timestamp, use '
'tz_localize to localize'):
Timestamp('2011-01-01').tz_convert('Asia/Tokyo')
def test_tz_localize_nonexistent(self):
# See issue 13057
from pytz.exceptions import NonExistentTimeError
times = ['2015-03-08 02:00', '2015-03-08 02:30',
'2015-03-29 02:00', '2015-03-29 02:30']
timezones = ['US/Eastern', 'US/Pacific',
'Europe/Paris', 'Europe/Belgrade']
for t, tz in zip(times, timezones):
ts = Timestamp(t)
self.assertRaises(NonExistentTimeError, ts.tz_localize,
tz)
self.assertRaises(NonExistentTimeError, ts.tz_localize,
tz, errors='raise')
self.assertIs(ts.tz_localize(tz, errors='coerce'),
pd.NaT)
def test_tz_localize_errors_ambiguous(self):
# See issue 13057
from pytz.exceptions import AmbiguousTimeError
ts = pd.Timestamp('2015-11-1 01:00')
self.assertRaises(AmbiguousTimeError,
ts.tz_localize, 'US/Pacific', errors='coerce')
def test_tz_localize_roundtrip(self):
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:
for t in ['2014-02-01 09:00', '2014-07-08 09:00',
'2014-11-01 17:00', '2014-11-05 00:00']:
ts = Timestamp(t)
localized = ts.tz_localize(tz)
self.assertEqual(localized, Timestamp(t, tz=tz))
with tm.assertRaises(TypeError):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
self.assertEqual(reset, ts)
self.assertTrue(reset.tzinfo is None)
def test_tz_convert_roundtrip(self):
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']:
for t in ['2014-02-01 09:00', '2014-07-08 09:00',
'2014-11-01 17:00', '2014-11-05 00:00']:
ts = Timestamp(t, tz='UTC')
converted = ts.tz_convert(tz)
reset = converted.tz_convert(None)
self.assertEqual(reset, Timestamp(t))
self.assertTrue(reset.tzinfo is None)
self.assertEqual(reset,
converted.tz_convert('UTC').tz_localize(None))
def test_barely_oob_dts(self):
one_us = np.timedelta64(1).astype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')
max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')
# No error for the min/max datetimes
Timestamp(min_ts_us)
Timestamp(max_ts_us)
# One us less than the minimum is an error
self.assertRaises(ValueError, Timestamp, min_ts_us - one_us)
# One us more than the maximum is an error
self.assertRaises(ValueError, Timestamp, max_ts_us + one_us)
def test_utc_z_designator(self):
self.assertEqual(get_timezone(
Timestamp('2014-11-02 01:00Z').tzinfo), 'UTC')
def test_now(self):
# #9000
ts_from_string = Timestamp('now')
ts_from_method = Timestamp.now()
ts_datetime = datetime.datetime.now()
ts_from_string_tz = Timestamp('now', tz='US/Eastern')
ts_from_method_tz = Timestamp.now(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
self.assertTrue(abs(ts_from_method - ts_from_string) < delta)
self.assertTrue(abs(ts_datetime - ts_from_method) < delta)
self.assertTrue(abs(ts_from_method_tz - ts_from_string_tz) < delta)
self.assertTrue(abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
ts_from_string = Timestamp('today')
ts_from_method = Timestamp.today()
ts_datetime = datetime.datetime.today()
ts_from_string_tz = Timestamp('today', tz='US/Eastern')
ts_from_method_tz = Timestamp.today(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
self.assertTrue(abs(ts_from_method - ts_from_string) < delta)
self.assertTrue(abs(ts_datetime - ts_from_method) < delta)
self.assertTrue(abs(ts_from_method_tz - ts_from_string_tz) < delta)
self.assertTrue(abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
def test_asm8(self):
np.random.seed(7960929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000, ]
for n in ns:
self.assertEqual(Timestamp(n).asm8.view('i8'),
np.datetime64(n, 'ns').view('i8'), n)
self.assertEqual(Timestamp('nat').asm8.view('i8'),
np.datetime64('nat', 'ns').view('i8'))
def test_fields(self):
def check(value, equal):
# that we are int/long like
self.assertTrue(isinstance(value, (int, compat.long)))
self.assertEqual(value, equal)
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
self.assertRaises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
def test_nat_fields(self):
# GH 10050
ts = Timestamp('NaT')
self.assertTrue(np.isnan(ts.year))
self.assertTrue(np.isnan(ts.month))
self.assertTrue(np.isnan(ts.day))
self.assertTrue(np.isnan(ts.hour))
self.assertTrue(np.isnan(ts.minute))
self.assertTrue(np.isnan(ts.second))
self.assertTrue(np.isnan(ts.microsecond))
self.assertTrue(np.isnan(ts.nanosecond))
self.assertTrue(np.isnan(ts.dayofweek))
self.assertTrue(np.isnan(ts.quarter))
self.assertTrue(np.isnan(ts.dayofyear))
self.assertTrue(np.isnan(ts.week))
self.assertTrue(np.isnan(ts.daysinmonth))
self.assertTrue(np.isnan(ts.days_in_month))
def test_pprint(self):
# GH12622
import pprint
nested_obj = {'foo': 1,
'bar': [{'w': {'a': Timestamp('2011-01-01')}}] * 10}
result = pprint.pformat(nested_obj, width=50)
expected = r"""{'bar': [{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}},
{'w': {'a': Timestamp('2011-01-01 00:00:00')}}],
'foo': 1}"""
self.assertEqual(result, expected)
def to_datetime_depr(self):
# see gh-8254
ts = Timestamp('2011-01-01')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = datetime.datetime(2011, 1, 1)
result = ts.to_datetime()
self.assertEqual(result, expected)
def to_pydatetime_nonzero_nano(self):
ts = Timestamp('2011-01-01 9:00:00.123456789')
# Warn the user of data loss (nanoseconds).
with tm.assert_produces_warning(UserWarning,
check_stacklevel=False):
expected = datetime.datetime(2011, 1, 1, 9, 0, 0, 123456)
result = ts.to_pydatetime()
self.assertEqual(result, expected)
class TestDatetimeParsingWrappers(tm.TestCase):
def test_does_not_convert_mixed_integer(self):
bad_date_strings = ('-50000', '999', '123.1234', 'm', 'T')
for bad_date_string in bad_date_strings:
self.assertFalse(tslib._does_string_look_like_datetime(
bad_date_string))
good_date_strings = ('2012-01-01',
'01/01/2012',
'Mon Sep 16, 2013',
'01012012',
'0101',
'1-1', )
for good_date_string in good_date_strings:
self.assertTrue(tslib._does_string_look_like_datetime(
good_date_string))
def test_parsers(self):
# https://github.com/dateutil/dateutil/issues/217
import dateutil
yearfirst = dateutil.__version__ >= LooseVersion('2.5.0')
cases = {'2011-01-01': datetime.datetime(2011, 1, 1),
'2Q2005': datetime.datetime(2005, 4, 1),
'2Q05': datetime.datetime(2005, 4, 1),
'2005Q1': datetime.datetime(2005, 1, 1),
'05Q1': datetime.datetime(2005, 1, 1),
'2011Q3': datetime.datetime(2011, 7, 1),
'11Q3': datetime.datetime(2011, 7, 1),
'3Q2011': datetime.datetime(2011, 7, 1),
'3Q11': datetime.datetime(2011, 7, 1),
# quarterly without space
'2000Q4': datetime.datetime(2000, 10, 1),
'00Q4': datetime.datetime(2000, 10, 1),
'4Q2000': datetime.datetime(2000, 10, 1),
'4Q00': datetime.datetime(2000, 10, 1),
'2000q4': datetime.datetime(2000, 10, 1),
'2000-Q4': datetime.datetime(2000, 10, 1),
'00-Q4': datetime.datetime(2000, 10, 1),
'4Q-2000': datetime.datetime(2000, 10, 1),
'4Q-00': datetime.datetime(2000, 10, 1),
'00q4': datetime.datetime(2000, 10, 1),
'2005': datetime.datetime(2005, 1, 1),
'2005-11': datetime.datetime(2005, 11, 1),
'2005 11': datetime.datetime(2005, 11, 1),
'11-2005': datetime.datetime(2005, 11, 1),
'11 2005': datetime.datetime(2005, 11, 1),
'200511': datetime.datetime(2020, 5, 11),
'20051109': datetime.datetime(2005, 11, 9),
'20051109 10:15': datetime.datetime(2005, 11, 9, 10, 15),
'20051109 08H': datetime.datetime(2005, 11, 9, 8, 0),
'2005-11-09 10:15': datetime.datetime(2005, 11, 9, 10, 15),
'2005-11-09 08H': datetime.datetime(2005, 11, 9, 8, 0),
'2005/11/09 10:15': datetime.datetime(2005, 11, 9, 10, 15),
'2005/11/09 08H': datetime.datetime(2005, 11, 9, 8, 0),
"Thu Sep 25 10:36:28 2003": datetime.datetime(2003, 9, 25, 10,
36, 28),
"Thu Sep 25 2003": datetime.datetime(2003, 9, 25),
"Sep 25 2003": datetime.datetime(2003, 9, 25),
"January 1 2014": datetime.datetime(2014, 1, 1),
# GH 10537
'2014-06': datetime.datetime(2014, 6, 1),
'06-2014': datetime.datetime(2014, 6, 1),
'2014-6': datetime.datetime(2014, 6, 1),
'6-2014': datetime.datetime(2014, 6, 1),
'20010101 12': datetime.datetime(2001, 1, 1, 12),
'20010101 1234': datetime.datetime(2001, 1, 1, 12, 34),
'20010101 123456': datetime.datetime(2001, 1, 1, 12, 34, 56),
}
for date_str, expected in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str,
yearfirst=yearfirst)
result2 = to_datetime(date_str, yearfirst=yearfirst)
result3 = to_datetime([date_str], yearfirst=yearfirst)
# result5 is used below
result4 = to_datetime(np.array([date_str], dtype=object),
yearfirst=yearfirst)
result6 = DatetimeIndex([date_str], yearfirst=yearfirst)
# result7 is used below
result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst)
result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst)
for res in [result1, result2]:
self.assertEqual(res, expected)
for res in [result3, result4, result6, result8, result9]:
exp = DatetimeIndex([pd.Timestamp(expected)])
tm.assert_index_equal(res, exp)
# these really need to have yearfist, but we don't support
if not yearfirst:
result5 = Timestamp(date_str)
self.assertEqual(result5, expected)
result7 = date_range(date_str, freq='S', periods=1,
yearfirst=yearfirst)
self.assertEqual(result7, expected)
# NaT
result1, _, _ = tools.parse_time_string('NaT')
result2 = to_datetime('NaT')
result3 = Timestamp('NaT')
result4 = DatetimeIndex(['NaT'])[0]
self.assertTrue(result1 is tslib.NaT)
self.assertTrue(result1 is tslib.NaT)
self.assertTrue(result1 is tslib.NaT)
self.assertTrue(result1 is tslib.NaT)
def test_parsers_quarter_invalid(self):
cases = ['2Q 2005', '2Q-200A', '2Q-200', '22Q2005', '6Q-20', '2Q200.']
for case in cases:
self.assertRaises(ValueError, tools.parse_time_string, case)
def test_parsers_dayfirst_yearfirst(self):
tm._skip_if_no_dateutil()
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# bug fix in 2.5.2
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# revert of bug in 2.5.2
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
import dateutil
is_lt_253 = dateutil.__version__ < LooseVersion('2.5.3')
# str : dayfirst, yearfirst, expected
cases = {'10-11-12': [(False, False,
datetime.datetime(2012, 10, 11)),
(True, False,
datetime.datetime(2012, 11, 10)),
(False, True,
datetime.datetime(2010, 11, 12)),
(True, True,
datetime.datetime(2010, 12, 11))],
'20/12/21': [(False, False,
datetime.datetime(2021, 12, 20)),
(True, False,
datetime.datetime(2021, 12, 20)),
(False, True,
datetime.datetime(2020, 12, 21)),
(True, True,
datetime.datetime(2020, 12, 21))]}
from dateutil.parser import parse
for date_str, values in compat.iteritems(cases):
for dayfirst, yearfirst, expected in values:
# odd comparisons across version
# let's just skip
if dayfirst and yearfirst and is_lt_253:
continue
# compare with dateutil result
dateutil_result = parse(date_str, dayfirst=dayfirst,
yearfirst=yearfirst)
self.assertEqual(dateutil_result, expected)
result1, _, _ = tools.parse_time_string(date_str,
dayfirst=dayfirst,
yearfirst=yearfirst)
# we don't support dayfirst/yearfirst here:
if not dayfirst and not yearfirst:
result2 = Timestamp(date_str)
self.assertEqual(result2, expected)
result3 = to_datetime(date_str, dayfirst=dayfirst,
yearfirst=yearfirst)
result4 = DatetimeIndex([date_str], dayfirst=dayfirst,
yearfirst=yearfirst)[0]
self.assertEqual(result1, expected)
self.assertEqual(result3, expected)
self.assertEqual(result4, expected)
def test_parsers_timestring(self):
tm._skip_if_no_dateutil()
from dateutil.parser import parse
# must be the same as dateutil result
cases = {'10:15': (parse('10:15'), datetime.datetime(1, 1, 1, 10, 15)),
'9:05': (parse('9:05'), datetime.datetime(1, 1, 1, 9, 5))}
for date_str, (exp_now, exp_def) in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = Timestamp(date_str)
result5 = DatetimeIndex([date_str])[0]
# parse time string return time string based on default date
# others are not, and can't be changed because it is used in
# time series plot
self.assertEqual(result1, exp_def)
self.assertEqual(result2, exp_now)
self.assertEqual(result3, exp_now)
self.assertEqual(result4, exp_now)
self.assertEqual(result5, exp_now)
def test_parsers_time(self):
# GH11818
_skip_if_has_locale()
strings = ["14:15", "1415", "2:15pm", "0215pm", "14:15:00", "141500",
"2:15:00pm", "021500pm", datetime.time(14, 15)]
expected = datetime.time(14, 15)
for time_string in strings:
self.assertEqual(tools.to_time(time_string), expected)
new_string = "14.15"
self.assertRaises(ValueError, tools.to_time, new_string)
self.assertEqual(tools.to_time(new_string, format="%H.%M"), expected)
arg = ["14:15", "20:20"]
expected_arr = [datetime.time(14, 15), datetime.time(20, 20)]
self.assertEqual(tools.to_time(arg), expected_arr)
self.assertEqual(tools.to_time(arg, format="%H:%M"), expected_arr)
self.assertEqual(tools.to_time(arg, infer_time_format=True),
expected_arr)
self.assertEqual(tools.to_time(arg, format="%I:%M%p", errors="coerce"),
[None, None])
res = tools.to_time(arg, format="%I:%M%p", errors="ignore")
self.assert_numpy_array_equal(res, np.array(arg, dtype=np.object_))
with tm.assertRaises(ValueError):
tools.to_time(arg, format="%I:%M%p", errors="raise")
self.assert_series_equal(tools.to_time(Series(arg, name="test")),
Series(expected_arr, name="test"))
res = tools.to_time(np.array(arg))
self.assertIsInstance(res, list)
self.assert_equal(res, expected_arr)
def test_parsers_monthfreq(self):
cases = {'201101': datetime.datetime(2011, 1, 1, 0, 0),
'200005': datetime.datetime(2000, 5, 1, 0, 0)}
for date_str, expected in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str, freq='M')
self.assertEqual(result1, expected)
def test_parsers_quarterly_with_freq(self):
msg = ('Incorrect quarterly string is given, quarter '
'must be between 1 and 4: 2013Q5')
with tm.assertRaisesRegexp(tslib.DateParseError, msg):
tools.parse_time_string('2013Q5')
# GH 5418
msg = ('Unable to retrieve month information from given freq: '
'INVLD-L-DEC-SAT')
with tm.assertRaisesRegexp(tslib.DateParseError, msg):
tools.parse_time_string('2013Q1', freq='INVLD-L-DEC-SAT')
cases = {('2013Q2', None): datetime.datetime(2013, 4, 1),
('2013Q2', 'A-APR'): datetime.datetime(2012, 8, 1),
('2013-Q2', 'A-DEC'): datetime.datetime(2013, 4, 1)}
for (date_str, freq), exp in compat.iteritems(cases):
result, _, _ = tools.parse_time_string(date_str, freq=freq)
self.assertEqual(result, exp)
def test_parsers_timezone_minute_offsets_roundtrip(self):
# GH11708
base = to_datetime("2013-01-01 00:00:00")
dt_strings = [
('2013-01-01 05:45+0545',
"Asia/Katmandu",
"Timestamp('2013-01-01 05:45:00+0545', tz='Asia/Katmandu')"),
('2013-01-01 05:30+0530',
"Asia/Kolkata",
"Timestamp('2013-01-01 05:30:00+0530', tz='Asia/Kolkata')")
]
for dt_string, tz, dt_string_repr in dt_strings:
dt_time = to_datetime(dt_string)
self.assertEqual(base, dt_time)
converted_time = dt_time.tz_localize('UTC').tz_convert(tz)
self.assertEqual(dt_string_repr, repr(converted_time))
def test_parsers_iso8601(self):
# GH 12060
# test only the iso parser - flexibility to different
# separators and leadings 0s
# Timestamp construction falls back to dateutil
cases = {'2011-01-02': datetime.datetime(2011, 1, 2),
'2011-1-2': datetime.datetime(2011, 1, 2),
'2011-01': datetime.datetime(2011, 1, 1),
'2011-1': datetime.datetime(2011, 1, 1),
'2011 01 02': datetime.datetime(2011, 1, 2),
'2011.01.02': datetime.datetime(2011, 1, 2),
'2011/01/02': datetime.datetime(2011, 1, 2),
'2011\\01\\02': datetime.datetime(2011, 1, 2),
'2013-01-01 05:30:00': datetime.datetime(2013, 1, 1, 5, 30),
'2013-1-1 5:30:00': datetime.datetime(2013, 1, 1, 5, 30)}
for date_str, exp in compat.iteritems(cases):
actual = tslib._test_parse_iso8601(date_str)
self.assertEqual(actual, exp)
# seperators must all match - YYYYMM not valid
invalid_cases = ['2011-01/02', '2011^11^11',
'201401', '201111', '200101',
# mixed separated and unseparated
'2005-0101', '200501-01',
'20010101 12:3456', '20010101 1234:56',
# HHMMSS must have two digits in each component
# if unseparated
'20010101 1', '20010101 123', '20010101 12345',
'20010101 12345Z',
# wrong separator for HHMMSS
'2001-01-01 12-34-56']
for date_str in invalid_cases:
with tm.assertRaises(ValueError):
tslib._test_parse_iso8601(date_str)
# If no ValueError raised, let me know which case failed.
raise Exception(date_str)
class TestArrayToDatetime(tm.TestCase):
def test_parsing_valid_dates(self):
arr = np.array(['01-01-2013', '01-02-2013'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr),
np_array_datetime64_compat(
[
'2013-01-01T00:00:00.000000000-0000',
'2013-01-02T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
arr = np.array(['Mon Sep 16 2013', 'Tue Sep 17 2013'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr),
np_array_datetime64_compat(
[
'2013-09-16T00:00:00.000000000-0000',
'2013-09-17T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
def test_number_looking_strings_not_into_datetime(self):
# #4601
# These strings don't look like datetimes so they shouldn't be
# attempted to be converted
arr = np.array(['-352.737091', '183.575577'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
arr = np.array(['1', '2', '3', '4', '5'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
def test_coercing_dates_outside_of_datetime64_ns_bounds(self):
invalid_dates = [
datetime.date(1000, 1, 1),
datetime.datetime(1000, 1, 1),
'1000-01-01',
'Jan 1, 1000',
np.datetime64('1000-01-01'),
]
for invalid_date in invalid_dates:
self.assertRaises(ValueError,
tslib.array_to_datetime,
np.array(
[invalid_date], dtype='object'),
errors='raise', )
self.assert_numpy_array_equal(
tslib.array_to_datetime(
np.array([invalid_date], dtype='object'),
errors='coerce'),
np.array([tslib.iNaT], dtype='M8[ns]')
)
arr = np.array(['1/1/1000', '1/1/2000'], dtype=object)
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='coerce'),
np_array_datetime64_compat(
[
tslib.iNaT,
'2000-01-01T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
def test_coerce_of_invalid_datetimes(self):
arr = np.array(['01-01-2013', 'not_a_date', '1'], dtype=object)
# Without coercing, the presence of any invalid dates prevents
# any values from being converted
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
# With coercing, the invalid dates becomes iNaT
self.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='coerce'),
np_array_datetime64_compat(
[
'2013-01-01T00:00:00.000000000-0000',
tslib.iNaT,
tslib.iNaT
],
dtype='M8[ns]'
)
)
def test_parsing_timezone_offsets(self):
# All of these datetime strings with offsets are equivalent
# to the same datetime after the timezone offset is added
dt_strings = [
'01-01-2013 08:00:00+08:00',
'2013-01-01T08:00:00.000000000+0800',
'2012-12-31T16:00:00.000000000-0800',
'12-31-2012 23:00:00-01:00'
]
expected_output = tslib.array_to_datetime(np.array(
['01-01-2013 00:00:00'], dtype=object))
for dt_string in dt_strings:
self.assert_numpy_array_equal(
tslib.array_to_datetime(
np.array([dt_string], dtype=object)
),
expected_output
)
class TestTimestampNsOperations(tm.TestCase):
def setUp(self):
self.timestamp = Timestamp(datetime.datetime.utcnow())
def assert_ns_timedelta(self, modified_timestamp, expected_value):
value = self.timestamp.value
modified_value = modified_timestamp.value
self.assertEqual(modified_value - value, expected_value)
def test_timedelta_ns_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'ns'),
-123)
def test_timedelta_ns_based_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(
1234567898, 'ns'), 1234567898)
def test_timedelta_us_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'us'),
-123000)
def test_timedelta_ms_arithmetic(self):
time = self.timestamp + np.timedelta64(-123, 'ms')
self.assert_ns_timedelta(time, -123000000)
def test_nanosecond_string_parsing(self):
ts = Timestamp('2013-05-01 07:15:45.123456789')
# GH 7878
expected_repr = '2013-05-01 07:15:45.123456789'
expected_value = 1367392545123456789
self.assertEqual(ts.value, expected_value)
self.assertIn(expected_repr, repr(ts))
ts = Timestamp('2013-05-01 07:15:45.123456789+09:00', tz='Asia/Tokyo')
self.assertEqual(ts.value, expected_value - 9 * 3600 * 1000000000)
self.assertIn(expected_repr, repr(ts))
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='UTC')
self.assertEqual(ts.value, expected_value)
self.assertIn(expected_repr, repr(ts))
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='US/Eastern')
self.assertEqual(ts.value, expected_value + 4 * 3600 * 1000000000)
self.assertIn(expected_repr, repr(ts))
# GH 10041
ts = Timestamp('20130501T071545.123456789')
self.assertEqual(ts.value, expected_value)
self.assertIn(expected_repr, repr(ts))
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1293840000000000005
t = Timestamp('2011-01-01') + offsets.Nano(5)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 5)
t = Timestamp(t)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 5)
t = Timestamp(np_datetime64_compat('2011-01-01 00:00:00.000000005Z'))
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000005')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 5)
expected = 1293840000000000010
t = t + offsets.Nano(5)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 10)
t = Timestamp(t)
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 10)
t = Timestamp(np_datetime64_compat('2011-01-01 00:00:00.000000010Z'))
self.assertEqual(repr(t), "Timestamp('2011-01-01 00:00:00.000000010')")
self.assertEqual(t.value, expected)
self.assertEqual(t.nanosecond, 10)
def test_nat_arithmetic(self):
# GH 6873
i = 2
f = 1.5
for (left, right) in [(pd.NaT, i), (pd.NaT, f), (pd.NaT, np.nan)]:
self.assertIs(left / right, pd.NaT)
self.assertIs(left * right, pd.NaT)
self.assertIs(right * left, pd.NaT)
with tm.assertRaises(TypeError):
right / left
# Timestamp / datetime
t = Timestamp('2014-01-01')
dt = datetime.datetime(2014, 1, 1)
for (left, right) in [(pd.NaT, pd.NaT), (pd.NaT, t), (pd.NaT, dt)]:
# NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
self.assertIs(right + left, pd.NaT)
self.assertIs(left + right, pd.NaT)
self.assertIs(left - right, pd.NaT)
self.assertIs(right - left, pd.NaT)
# timedelta-like
# offsets are tested in test_offsets.py
delta = datetime.timedelta(3600)
td = Timedelta('5s')
for (left, right) in [(pd.NaT, delta), (pd.NaT, td)]:
# NaT + timedelta-like returns NaT
self.assertIs(right + left, pd.NaT)
self.assertIs(left + right, pd.NaT)
self.assertIs(right - left, pd.NaT)
self.assertIs(left - right, pd.NaT)
# GH 11718
tm._skip_if_no_pytz()
import pytz
t_utc = Timestamp('2014-01-01', tz='UTC')
t_tz = Timestamp('2014-01-01', tz='US/Eastern')
dt_tz = pytz.timezone('Asia/Tokyo').localize(dt)
for (left, right) in [(pd.NaT, t_utc), (pd.NaT, t_tz),
(pd.NaT, dt_tz)]:
# NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
self.assertIs(right + left, pd.NaT)
self.assertIs(left + right, pd.NaT)
self.assertIs(left - right, pd.NaT)
self.assertIs(right - left, pd.NaT)
# int addition / subtraction
for (left, right) in [(pd.NaT, 2), (pd.NaT, 0), (pd.NaT, -3)]:
self.assertIs(right + left, pd.NaT)
self.assertIs(left + right, pd.NaT)
self.assertIs(left - right, pd.NaT)
self.assertIs(right - left, pd.NaT)
def test_nat_arithmetic_index(self):
# GH 11718
# datetime
tm._skip_if_no_pytz()
dti = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], name='x')
exp = pd.DatetimeIndex([pd.NaT, pd.NaT], name='x')
self.assert_index_equal(dti + pd.NaT, exp)
self.assert_index_equal(pd.NaT + dti, exp)
dti_tz = pd.DatetimeIndex(['2011-01-01', '2011-01-02'],
tz='US/Eastern', name='x')
exp = pd.DatetimeIndex([pd.NaT, pd.NaT], name='x', tz='US/Eastern')
self.assert_index_equal(dti_tz + pd.NaT, exp)
self.assert_index_equal(pd.NaT + dti_tz, exp)
exp = pd.TimedeltaIndex([pd.NaT, pd.NaT], name='x')
for (left, right) in [(pd.NaT, dti), (pd.NaT, dti_tz)]:
self.assert_index_equal(left - right, exp)
self.assert_index_equal(right - left, exp)
# timedelta
tdi = pd.TimedeltaIndex(['1 day', '2 day'], name='x')
exp = pd.DatetimeIndex([pd.NaT, pd.NaT], name='x')
for (left, right) in [(pd.NaT, tdi)]:
self.assert_index_equal(left + right, exp)
self.assert_index_equal(right + left, exp)
self.assert_index_equal(left - right, exp)
self.assert_index_equal(right - left, exp)
class TestTslib(tm.TestCase):
def test_intraday_conversion_factors(self):
self.assertEqual(period_asfreq(
1, get_freq('D'), get_freq('H'), False), 24)
self.assertEqual(period_asfreq(
1, get_freq('D'), get_freq('T'), False), 1440)
self.assertEqual(period_asfreq(
1, get_freq('D'), get_freq('S'), False), 86400)
self.assertEqual(period_asfreq(1, get_freq(
'D'), get_freq('L'), False), 86400000)
self.assertEqual(period_asfreq(1, get_freq(
'D'), get_freq('U'), False), 86400000000)
self.assertEqual(period_asfreq(1, get_freq(
'D'), get_freq('N'), False), 86400000000000)
self.assertEqual(period_asfreq(
1, get_freq('H'), get_freq('T'), False), 60)
self.assertEqual(period_asfreq(
1, get_freq('H'), get_freq('S'), False), 3600)
self.assertEqual(period_asfreq(1, get_freq('H'),
get_freq('L'), False), 3600000)
self.assertEqual(period_asfreq(1, get_freq(
'H'), get_freq('U'), False), 3600000000)
self.assertEqual(period_asfreq(1, get_freq(
'H'), get_freq('N'), False), 3600000000000)
self.assertEqual(period_asfreq(
1, get_freq('T'), get_freq('S'), False), 60)
self.assertEqual(period_asfreq(
1, get_freq('T'), get_freq('L'), False), 60000)
self.assertEqual(period_asfreq(1, get_freq(
'T'), get_freq('U'), False), 60000000)
self.assertEqual(period_asfreq(1, get_freq(
'T'), get_freq('N'), False), 60000000000)
self.assertEqual(period_asfreq(
1, get_freq('S'), get_freq('L'), False), 1000)
self.assertEqual(period_asfreq(1, get_freq('S'),
get_freq('U'), False), 1000000)
self.assertEqual(period_asfreq(1, get_freq(
'S'), get_freq('N'), False), 1000000000)
self.assertEqual(period_asfreq(
1, get_freq('L'), get_freq('U'), False), 1000)
self.assertEqual(period_asfreq(1, get_freq('L'),
get_freq('N'), False), 1000000)
self.assertEqual(period_asfreq(
1, get_freq('U'), get_freq('N'), False), 1000)
def test_period_ordinal_start_values(self):
# information for 1.1.1970
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0,
get_freq('A')))
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0,
get_freq('M')))
self.assertEqual(1, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0,
get_freq('W')))
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0,
get_freq('D')))
self.assertEqual(0, period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0,
get_freq('B')))
def test_period_ordinal_week(self):
self.assertEqual(1, period_ordinal(1970, 1, 4, 0, 0, 0, 0, 0,
get_freq('W')))
self.assertEqual(2, period_ordinal(1970, 1, 5, 0, 0, 0, 0, 0,
get_freq('W')))
self.assertEqual(2284, period_ordinal(2013, 10, 6, 0, 0, 0, 0, 0,
get_freq('W')))
self.assertEqual(2285, period_ordinal(2013, 10, 7, 0, 0, 0, 0, 0,
get_freq('W')))
def test_period_ordinal_business_day(self):
# Thursday
self.assertEqual(11415, period_ordinal(2013, 10, 3, 0, 0, 0, 0, 0,
get_freq('B')))
# Friday
self.assertEqual(11416, period_ordinal(2013, 10, 4, 0, 0, 0, 0, 0,
get_freq('B')))
# Saturday
self.assertEqual(11417, period_ordinal(2013, 10, 5, 0, 0, 0, 0, 0,
get_freq('B')))
# Sunday
self.assertEqual(11417, period_ordinal(2013, 10, 6, 0, 0, 0, 0, 0,
get_freq('B')))
# Monday
self.assertEqual(11417, period_ordinal(2013, 10, 7, 0, 0, 0, 0, 0,
get_freq('B')))
# Tuesday
self.assertEqual(11418, period_ordinal(2013, 10, 8, 0, 0, 0, 0, 0,
get_freq('B')))
def test_tslib_tz_convert(self):
def compare_utc_to_local(tz_didx, utc_didx):
f = lambda x: tslib.tz_convert_single(x, 'UTC', tz_didx.tz)
result = tslib.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz)
result_single = np.vectorize(f)(tz_didx.asi8)
self.assert_numpy_array_equal(result, result_single)
def compare_local_to_utc(tz_didx, utc_didx):
f = lambda x: tslib.tz_convert_single(x, tz_didx.tz, 'UTC')
result = tslib.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC')
result_single = np.vectorize(f)(utc_didx.asi8)
self.assert_numpy_array_equal(result, result_single)
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'Europe/Moscow']:
# US: 2014-03-09 - 2014-11-11
# MOSCOW: 2014-10-26 / 2014-12-31
tz_didx = date_range('2014-03-01', '2015-01-10', freq='H', tz=tz)
utc_didx = date_range('2014-03-01', '2015-01-10', freq='H')
compare_utc_to_local(tz_didx, utc_didx)
# local tz to UTC can be differ in hourly (or higher) freqs because
# of DST
compare_local_to_utc(tz_didx, utc_didx)
tz_didx = date_range('2000-01-01', '2020-01-01', freq='D', tz=tz)
utc_didx = date_range('2000-01-01', '2020-01-01', freq='D')
compare_utc_to_local(tz_didx, utc_didx)
compare_local_to_utc(tz_didx, utc_didx)
tz_didx = date_range('2000-01-01', '2100-01-01', freq='A', tz=tz)
utc_didx = date_range('2000-01-01', '2100-01-01', freq='A')
compare_utc_to_local(tz_didx, utc_didx)
compare_local_to_utc(tz_didx, utc_didx)
# Check empty array
result = tslib.tz_convert(np.array([], dtype=np.int64),
tslib.maybe_get_tz('US/Eastern'),
tslib.maybe_get_tz('Asia/Tokyo'))
self.assert_numpy_array_equal(result, np.array([], dtype=np.int64))
# Check all-NaT array
result = tslib.tz_convert(np.array([tslib.iNaT], dtype=np.int64),
tslib.maybe_get_tz('US/Eastern'),
tslib.maybe_get_tz('Asia/Tokyo'))
self.assert_numpy_array_equal(result, np.array(
[tslib.iNaT], dtype=np.int64))
def test_shift_months(self):
s = DatetimeIndex([Timestamp('2000-01-05 00:15:00'), Timestamp(
'2000-01-31 00:23:00'), Timestamp('2000-01-01'), Timestamp(
'2000-02-29'), Timestamp('2000-12-31')])
for years in [-1, 0, 1]:
for months in [-2, 0, 2]:
actual = DatetimeIndex(tslib.shift_months(s.asi8, years * 12 +
months))
expected = DatetimeIndex([x + offsets.DateOffset(
years=years, months=months) for x in s])
tm.assert_index_equal(actual, expected)
def test_round(self):
stamp = Timestamp('2000-01-05 05:09:15.13')
def _check_round(freq, expected):
result = stamp.round(freq=freq)
self.assertEqual(result, expected)
for freq, expected in [('D', Timestamp('2000-01-05 00:00:00')),
('H', Timestamp('2000-01-05 05:00:00')),
('S', Timestamp('2000-01-05 05:09:15'))]:
_check_round(freq, expected)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
stamp.round('foo')
class TestTimestampOps(tm.TestCase):
def test_timestamp_and_datetime(self):
self.assertEqual((Timestamp(datetime.datetime(
2013, 10, 13)) - datetime.datetime(2013, 10, 12)).days, 1)
self.assertEqual((datetime.datetime(2013, 10, 12) -
Timestamp(datetime.datetime(2013, 10, 13))).days, -1)
def test_timestamp_and_series(self):
timestamp_series = Series(date_range('2014-03-17', periods=2, freq='D',
tz='US/Eastern'))
first_timestamp = timestamp_series[0]
delta_series = Series([np.timedelta64(0, 'D'), np.timedelta64(1, 'D')])
assert_series_equal(timestamp_series - first_timestamp, delta_series)
assert_series_equal(first_timestamp - timestamp_series, -delta_series)
def test_addition_subtraction_types(self):
# Assert on the types resulting from Timestamp +/- various date/time
# objects
datetime_instance = datetime.datetime(2014, 3, 4)
timedelta_instance = datetime.timedelta(seconds=1)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1,
freq='D')[0]
self.assertEqual(type(timestamp_instance + 1), Timestamp)
self.assertEqual(type(timestamp_instance - 1), Timestamp)
# Timestamp + datetime not supported, though subtraction is supported
# and yields timedelta more tests in tseries/base/tests/test_base.py
self.assertEqual(
type(timestamp_instance - datetime_instance), Timedelta)
self.assertEqual(
type(timestamp_instance + timedelta_instance), Timestamp)
self.assertEqual(
type(timestamp_instance - timedelta_instance), Timestamp)
# Timestamp +/- datetime64 not supported, so not tested (could possibly
# assert error raised?)
timedelta64_instance = np.timedelta64(1, 'D')
self.assertEqual(
type(timestamp_instance + timedelta64_instance), Timestamp)
self.assertEqual(
type(timestamp_instance - timedelta64_instance), Timestamp)
def test_addition_subtraction_preserve_frequency(self):
timestamp_instance = date_range('2014-03-05', periods=1, freq='D')[0]
timedelta_instance = datetime.timedelta(days=1)
original_freq = timestamp_instance.freq
self.assertEqual((timestamp_instance + 1).freq, original_freq)
self.assertEqual((timestamp_instance - 1).freq, original_freq)
self.assertEqual(
(timestamp_instance + timedelta_instance).freq, original_freq)
self.assertEqual(
(timestamp_instance - timedelta_instance).freq, original_freq)
timedelta64_instance = np.timedelta64(1, 'D')
self.assertEqual(
(timestamp_instance + timedelta64_instance).freq, original_freq)
self.assertEqual(
(timestamp_instance - timedelta64_instance).freq, original_freq)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
[D_RESO, D_RESO,
D_RESO, D_RESO,
H_RESO, T_RESO,
S_RESO, MS_RESO,
US_RESO]):
for tz in [None, 'Asia/Tokyo', 'US/Eastern',
'dateutil/US/Eastern']:
idx = date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
result = period.resolution(idx.asi8, idx.tz)
self.assertEqual(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
rrohan/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
simon-pepin/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 206 | 7643 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
ywcui1990/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_agg.py | 69 | 11729 | """
An agg http://antigrain.com/ backend
Features that are implemented
* capstyles and join styles
* dashes
* linewidth
* lines, rectangles, ellipses
* clipping to a rectangle
* output to RGBA and PNG
* alpha blending
* DPI scaling properly - everything scales properly (dashes, linewidths, etc)
* draw polygon
* freetype2 w/ ft2font
TODO:
* allow save to file handle
* integrate screen dpi w/ ppi and text
"""
from __future__ import division
import numpy as npy
from matplotlib import verbose, rcParams
from matplotlib.backend_bases import RendererBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, maxdict
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont
from matplotlib.ft2font import FT2Font, LOAD_FORCE_AUTOHINT
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from _backend_agg import RendererAgg as _RendererAgg
from matplotlib import _png
backend_version = 'v2.2'
class RendererAgg(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles
"""
debug=1
texd = maxdict(50) # a cache of tex image rasters
_fontd = maxdict(50)
def __init__(self, width, height, dpi):
if __debug__: verbose.report('RendererAgg.__init__', 'debug-annoying')
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
if __debug__: verbose.report('RendererAgg.__init__ width=%s, height=%s'%(width, height), 'debug-annoying')
self._renderer = _RendererAgg(int(width), int(height), dpi, debug=False)
if __debug__: verbose.report('RendererAgg.__init__ _RendererAgg done',
'debug-annoying')
#self.draw_path = self._renderer.draw_path # see below
self.draw_markers = self._renderer.draw_markers
self.draw_path_collection = self._renderer.draw_path_collection
self.draw_quad_mesh = self._renderer.draw_quad_mesh
self.draw_image = self._renderer.draw_image
self.copy_from_bbox = self._renderer.copy_from_bbox
self.restore_region = self._renderer.restore_region
self.tostring_rgba_minimized = self._renderer.tostring_rgba_minimized
self.mathtext_parser = MathTextParser('Agg')
self.bbox = Bbox.from_bounds(0, 0, self.width, self.height)
if __debug__: verbose.report('RendererAgg.__init__ done',
'debug-annoying')
def draw_path(self, gc, path, transform, rgbFace=None):
nmax = rcParams['agg.path.chunksize'] # here at least for testing
npts = path.vertices.shape[0]
if nmax > 100 and npts > nmax and path.should_simplify and rgbFace is None:
nch = npy.ceil(npts/float(nmax))
chsize = int(npy.ceil(npts/nch))
i0 = npy.arange(0, npts, chsize)
i1 = npy.zeros_like(i0)
i1[:-1] = i0[1:] - 1
i1[-1] = npts
for ii0, ii1 in zip(i0, i1):
v = path.vertices[ii0:ii1,:]
c = path.codes
if c is not None:
c = c[ii0:ii1]
c[0] = Path.MOVETO # move to end of last chunk
p = Path(v, c)
self._renderer.draw_path(gc, p, transform, rgbFace)
else:
self._renderer.draw_path(gc, path, transform, rgbFace)
def draw_mathtext(self, gc, x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if __debug__: verbose.report('RendererAgg.draw_mathtext',
'debug-annoying')
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
x = int(x) + ox
y = int(y) - oy
self._renderer.draw_text_image(font_image, x, y + 1, angle, gc)
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
Render the text
"""
if __debug__: verbose.report('RendererAgg.draw_text', 'debug-annoying')
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
font = self._get_agg_font(prop)
if font is None: return None
if len(s) == 1 and ord(s) > 127:
font.load_char(ord(s), flags=LOAD_FORCE_AUTOHINT)
else:
# We pass '0' for angle here, since it will be rotated (in raster
# space) in the following call to draw_text_image).
font.set_text(s, 0, flags=LOAD_FORCE_AUTOHINT)
font.draw_glyphs_to_bitmap()
#print x, y, int(x), int(y)
self._renderer.draw_text_image(font.get_image(), int(x), int(y) + 1, angle, gc)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
# passing rgb is a little hack to make cacheing in the
# texmanager more efficient. It is not meant to be used
# outside the backend
"""
if ismath=='TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: descent of TeX text (I am imitating backend_ps here -JKS)
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
font = self._get_agg_font(prop)
font.set_text(s, 0.0, flags=LOAD_FORCE_AUTOHINT) # the width and height of unrotated string
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def draw_tex(self, gc, x, y, s, prop, angle):
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key)
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = npy.array(Z * 255.0, npy.uint8)
self._renderer.draw_text_image(Z, x, y, angle, gc)
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def _get_agg_font(self, prop):
"""
Get the font for text instance t, cacheing for efficiency
"""
if __debug__: verbose.report('RendererAgg._get_agg_font',
'debug-annoying')
key = hash(prop)
font = self._fontd.get(key)
if font is None:
fname = findfont(prop)
font = self._fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self._fontd[fname] = font
self._fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
if __debug__: verbose.report('RendererAgg.points_to_pixels',
'debug-annoying')
return points*self.dpi/72.0
def tostring_rgb(self):
if __debug__: verbose.report('RendererAgg.tostring_rgb',
'debug-annoying')
return self._renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('RendererAgg.tostring_argb',
'debug-annoying')
return self._renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('RendererAgg.buffer_rgba',
'debug-annoying')
return self._renderer.buffer_rgba(x,y)
def clear(self):
self._renderer.clear()
def option_image_nocomposite(self):
# It is generally faster to composite each image directly to
# the Figure, and there's no file size benefit to compositing
# with the Agg backend
return True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if __debug__: verbose.report('backend_agg.new_figure_manager',
'debug-annoying')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasAgg(thisFig)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasAgg(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def copy_from_bbox(self, bbox):
renderer = self.get_renderer()
return renderer.copy_from_bbox(bbox)
def restore_region(self, region):
renderer = self.get_renderer()
return renderer.restore_region(region)
def draw(self):
"""
Draw the figure using the renderer
"""
if __debug__: verbose.report('FigureCanvasAgg.draw', 'debug-annoying')
self.renderer = self.get_renderer()
self.figure.draw(self.renderer)
def get_renderer(self):
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
try: self._lastKey, self.renderer
except AttributeError: need_new_renderer = True
else: need_new_renderer = (self._lastKey != key)
if need_new_renderer:
self.renderer = RendererAgg(w, h, self.figure.dpi)
self._lastKey = key
return self.renderer
def tostring_rgb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_rgb',
'debug-annoying')
return self.renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_argb',
'debug-annoying')
return self.renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('FigureCanvasAgg.buffer_rgba',
'debug-annoying')
return self.renderer.buffer_rgba(x,y)
def get_default_filetype(self):
return 'png'
def print_raw(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
renderer._renderer.write_rgba(filename_or_obj)
renderer.dpi = original_dpi
print_rgba = print_raw
def print_png(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
_png.write_png(renderer._renderer.buffer_rgba(0, 0),
renderer.width, renderer.height,
filename_or_obj, self.figure.dpi)
renderer.dpi = original_dpi
| agpl-3.0 |
pySTEPS/pysteps | examples/plot_cascade_decomposition.py | 1 | 4579 | #!/bin/env python
"""
Cascade decomposition
=====================
This example script shows how to compute and plot the cascade decompositon of
a single radar precipitation field in pysteps.
"""
from matplotlib import cm, pyplot as plt
import numpy as np
import os
from pprint import pprint
from pysteps.cascade.bandpass_filters import filter_gaussian
from pysteps import io, rcparams
from pysteps.cascade.decomposition import decomposition_fft
from pysteps.utils import conversion, transformation
from pysteps.visualization import plot_precip_field
###############################################################################
# Read precipitation field
# ------------------------
#
# First thing, the radar composite is imported and transformed in units
# of dB.
# Import the example radar composite
root_path = rcparams.data_sources["fmi"]["root_path"]
filename = os.path.join(
root_path, "20160928", "201609281600_fmi.radar.composite.lowest_FIN_SUOMI1.pgm.gz"
)
R, _, metadata = io.import_fmi_pgm(filename, gzipped=True)
# Convert to rain rate
R, metadata = conversion.to_rainrate(R, metadata)
# Nicely print the metadata
pprint(metadata)
# Plot the rainfall field
plot_precip_field(R, geodata=metadata)
plt.show()
# Log-transform the data
R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0)
###############################################################################
# 2D Fourier spectrum
# --------------------
#
# Compute and plot the 2D Fourier power spectrum of the precipitaton field.
# Set Nans as the fill value
R[~np.isfinite(R)] = metadata["zerovalue"]
# Compute the Fourier transform of the input field
F = abs(np.fft.fftshift(np.fft.fft2(R)))
# Plot the power spectrum
M, N = F.shape
fig, ax = plt.subplots()
im = ax.imshow(
np.log(F ** 2), vmin=4, vmax=24, cmap=cm.jet, extent=(-N / 2, N / 2, -M / 2, M / 2)
)
cb = fig.colorbar(im)
ax.set_xlabel("Wavenumber $k_x$")
ax.set_ylabel("Wavenumber $k_y$")
ax.set_title("Log-power spectrum of R")
plt.show()
###############################################################################
# Cascade decomposition
# ---------------------
#
# First, construct a set of Gaussian bandpass filters and plot the corresponding
# 1D filters.
num_cascade_levels = 7
# Construct the Gaussian bandpass filters
filter = filter_gaussian(R.shape, num_cascade_levels)
# Plot the bandpass filter weights
L = max(N, M)
fig, ax = plt.subplots()
for k in range(num_cascade_levels):
ax.semilogx(
np.linspace(0, L / 2, len(filter["weights_1d"][k, :])),
filter["weights_1d"][k, :],
"k-",
basex=pow(0.5 * L / 3, 1.0 / (num_cascade_levels - 2)),
)
ax.set_xlim(1, L / 2)
ax.set_ylim(0, 1)
xt = np.hstack([[1.0], filter["central_wavenumbers"][1:]])
ax.set_xticks(xt)
ax.set_xticklabels(["%.2f" % cf for cf in filter["central_wavenumbers"]])
ax.set_xlabel("Radial wavenumber $|\mathbf{k}|$")
ax.set_ylabel("Normalized weight")
ax.set_title("Bandpass filter weights")
plt.show()
###############################################################################
# Finally, apply the 2D Gaussian filters to decompose the radar rainfall field
# into a set of cascade levels of decreasing spatial scale and plot them.
decomp = decomposition_fft(R, filter, compute_stats=True)
# Plot the normalized cascade levels
for i in range(num_cascade_levels):
mu = decomp["means"][i]
sigma = decomp["stds"][i]
decomp["cascade_levels"][i] = (decomp["cascade_levels"][i] - mu) / sigma
fig, ax = plt.subplots(nrows=2, ncols=4)
ax[0, 0].imshow(R, cmap=cm.RdBu_r, vmin=-5, vmax=5)
ax[0, 1].imshow(decomp["cascade_levels"][0], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[0, 2].imshow(decomp["cascade_levels"][1], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[0, 3].imshow(decomp["cascade_levels"][2], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[1, 0].imshow(decomp["cascade_levels"][3], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[1, 1].imshow(decomp["cascade_levels"][4], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[1, 2].imshow(decomp["cascade_levels"][5], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[1, 3].imshow(decomp["cascade_levels"][6], cmap=cm.RdBu_r, vmin=-3, vmax=3)
ax[0, 0].set_title("Observed")
ax[0, 1].set_title("Level 1")
ax[0, 2].set_title("Level 2")
ax[0, 3].set_title("Level 3")
ax[1, 0].set_title("Level 4")
ax[1, 1].set_title("Level 5")
ax[1, 2].set_title("Level 6")
ax[1, 3].set_title("Level 7")
for i in range(2):
for j in range(4):
ax[i, j].set_xticks([])
ax[i, j].set_yticks([])
plt.tight_layout()
plt.show()
# sphinx_gallery_thumbnail_number = 4
| bsd-3-clause |
AFMD/smallProjects | nanowire-network-simulations/manningp3plot.py | 1 | 17973 | """
Created on Mon Jun 15 15:42:23 2020
@author: sturdzal
"""
#@title Imports
from shapely.geometry import LineString, MultiLineString, MultiPoint, Point
from shapely.ops import cascaded_union
from scipy.special import comb
from itertools import product
import scipy.stats as stats
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import math
import numpy as np
from itertools import islice
from cvxopt import matrix, solvers
from cvxopt.base import sparse
from cvxopt.base import matrix as m
from cvxopt.lapack import *
from cvxopt.blas import *
import cvxopt.misc as misc
#from pykrylov.symmlq import symmlq
#from pykrylov.symmlq import *
#from symmlq import *
#import symmlq
import networkx as nx
from itertools import islice, combinations
from collections import Counter, defaultdict
#from pykrylov.linop import PysparseLinearOperator
#from pykrylov.symmlq import *
import scipy
from scipy.sparse.linalg import *
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import minres
import os.path
import time
import os
import matplotlib.pyplot as plt
import random
from statistics import mean
#------------------Parameter--------------------
R_junc = 1.0 # 100100
#R_junc_list = [1000, 10000, 100000, 10000000, 10000000]
rho0 = 0.314 #0.0790 #0.8 #0.0790 #0.0226
#rho0_list = [0.000314, 0.00314, 0.0314, 0.314, 3.14, 31.4, 314]
wire_diameter = 2 #30.0
wire_length= 1.0 #6.0
extinction_coeff = 4 #0.2
box_length = 5 #15.0 5x wire length gives good results independent of tol for e-9 to e-15
samples = 1
elec_length = box_length
box_y = box_length
lead_sep = box_length
n_min = 0.16411
nstep = 10*n_min
n_initial = 40*n_min #1.90079+30*nstep #0.16411
n_final = 80*n_min #1.90079+31*nstep
percentage_chance = 0.0
distl = False
lower_l = 2.2
upper_l = np.inf
sigmal = 2.0
lmean = wire_length
A0 = math.pi*((wire_diameter*0.001)/2)**2
# End ---------- Parameters block -------------
# ---------- Parameters for symmlq routine -------------
tol=1e-10
show=False
maxit=None
#----------- Parameters for Calculation time display --------
start_time = time.process_time()
# ---------- Output file -------------
res_file = "output2.txt"
if os.path.exists(res_file)==False:
open(res_file, "w").write("Density AF Transmittance Average_resistance resStdev Junct_density R_junc rho0 wire_diameter wire_length box_length samples nstep n_initial n_final tolerance_minres distl lower_l upper_l sigmal junctions_removal calctime\n")
#res_dist = open(res_file,"a")
# ---------- Auxiliary lists for ensemble calculation -------------
res_list=[]
short_sep_list=[]
junc_dens=[]
dens_temp=[]
avg_res_temp=[]
st_dev_temp=[]
resistancelist=[]
transmittancelist=[]
for wire_diameter in np.arange(0.5, 0.6, 0.1):
for density in np.arange(n_initial,n_final,nstep):
for sample in range(samples):
while True:
try:
area = box_length**2 # box area (in um^2)
box_x = box_length # box width (in um)
box_y = box_length # box length (in um)
num_junc = 0 # junction counter
nwires = area*density # total number of nanowires
# Start ---------- Creation of random stick coordinates and electrodes -------------
# a single wire is represented by a set of initial and final coordinates as [(x1,y1),(x2,y2)].
x1 = np.random.rand(int(nwires))*box_x
y1 = np.random.rand(int(nwires))*box_y
length_array = np.zeros(int(nwires))
if distl == True:
lengths = stats.truncnorm((lower_l - lmean) / sigmal, (upper_l - lmean) / sigmal, loc=lmean, scale=sigmal)
length_array = lengths.rvs(size=nwires)
else:
length_array.fill(wire_length)
# Sorting the angles that define the wire orientation (in radians from 0 to 2 *pi).
theta1 = np.random.rand(int(nwires))*2.0*math.pi
x2 = length_array * np.cos(theta1) + x1
y2 = length_array * np.sin(theta1) + y1
# Adding to the coordinate list (x1,y1) the points corresponding to the contact leads.
x1 = np.insert(x1, 0, 0.0)
x1 = np.insert(x1, 0,0)
# Adding to the coordinate list (x2,y2) the points corresponding to the contact leads.
x2 = np.insert(x2, 0, 0.0)
x2 = np.insert(x2, 0,0)
ypostop = box_y/2 + elec_length/2
yposbot = box_y/2 - elec_length/2
y1 = np.insert(y1, 0,ypostop)
y1 = np.insert(y1, 0,ypostop)
y2 = np.insert(y2, 0,yposbot)
y2 = np.insert(y2, 0, yposbot)
xposleft = box_x/2-lead_sep/2
xposright = box_x/2+lead_sep/2
x1[0]= xposleft
x2[0] = xposleft
x1[1] = xposright
x2[1] = xposright
# Merging [(x1,y1),(x2,y2)] in accordance to shapely format.
# coords1 = zip(x1,y1)
# coords2 = zip(x2,y2)
# coords = zip(coords1,coords2)
coords1 = list(zip(x1,y1))
coords2 = list(zip(x2,y2))
coords = list(zip(coords1,coords2))
mlines = MultiLineString(coords)
nwires_plus_leads = int(nwires+2)
# End ---------- Creation of random stick coordinates and electrodes -------------
# Start ---------- Identifying intersections between wires -------------
# all pair wire combination
lines_comb = combinations(mlines, 2)
# list storing True or False for pair intersection
intersection_check = [pair[0].intersects(pair[1]) for pair in lines_comb]
# list storing the indexes of intersection_check where the intersection between two wires is TRUE
intersections = [i for i, x in enumerate(intersection_check) if x and random.random() > percentage_chance]
# full list containing all non-repeated combinations of wires
combination_index = list((i,j) for ((i,_),(j,_)) in combinations(enumerate(mlines), 2))
# list storing the connection (wire_i, wire_j)
intersection_index = [combination_index[intersections[i]] for i in range(len(intersections))]
# checking the coordinates for interesection points
inter_point_coll = [pair[0].intersection(pair[1]) for pair in combinations(mlines, 2)]
# eliminating empty shapely points from the previous list
no_empty_inter_point_coll = [inter_point_coll[intersections[i]] for i in range(len(intersections))]
# total number of intersections
nintersections = len(intersection_index)
# End ---------- Identifying intersections between wires -------------
# Start ---------- MNR nodal mapping -------------
# dictionary containing wire index: [list of wires connected to a given wire]
wire_touch_list = defaultdict(list)
for k, v in intersection_index:
wire_touch_list[k].append(v)
wire_touch_list[v].append(k)
# dictionary containing wire index: [label nodes following MNR mapping]
wire_touch_label_list = defaultdict(list)
each_wire_inter_point_storage = defaultdict(list)
label = 2
# Assigning new node labelling according to MNR mapping
for i in iter(wire_touch_list.items()):
for j in range(len(i[1])):
cpoint = mlines[i[0]].intersection(mlines[i[1][j]])
npoint = (cpoint.x,cpoint.y)
each_wire_inter_point_storage[i[0]].append(npoint)
if i[0] > 1:
wire_touch_label_list[i[0]].append(label)
label += 1
else:
wire_touch_label_list[i[0]].append(i[0])
maxl = label # dimension of the resistance matrix
# flattening intersection_index for counting the amount of occurances of wire i
flat = list(sum(intersection_index, ()))
conn_per_wire = Counter(flat)
# checking for isolated wires
complete_list = range(nwires_plus_leads)
isolated_wires = [x for x in complete_list if not x in flat]
# list containing the length segments of each wire (if it has a junction)
each_wire_length_storage = [[] for _ in range(nwires_plus_leads)]
# Routine that obtains the segment lengths on each wire
for i in each_wire_inter_point_storage:
point_ini = Point(mlines[i].coords[0])
point_fin = Point(mlines[i].coords[1])
wlength = point_ini.distance(point_fin)
wire_points = each_wire_inter_point_storage[i]
dist = [0.0]*(len(wire_points)+1)
for j in range(len(wire_points)):
point = Point(wire_points[j])
dist[j] = point_ini.distance(point)
dist[-1] = wlength
dist.sort()
dist_sep = [0.0]*len(dist)
dist_sep[0] = dist[0]
dist_sep[1:len(dist)] = [dist[k]-dist[k-1] for k in range(1,len(dist))]
each_wire_length_storage[i].append(dist_sep)
# End ---------- MNR nodal mapping -------------
# The MNR mapping associated to the NWN is also converted into a mathematical graph given by G.
# G contains 2*nintersections nodes and we conventioned that left and right electrodes are labelled as node 0 and 1, respectively.
G = nx.Graph()
G.add_nodes_from(range(2*nintersections))
mr_matrix_plus = np.zeros((2*nintersections,2*nintersections))
inner_count = 0
inter_count = 0
#nx.draw(G)
#nx.draw_random(G)
#nx.draw_circular(G)
nx.draw_spectral(G, node_size= 10)
##nx.draw_networkx_nodes(G)
plt.show()
# Start ---------- Building resistance matrix -------------
for iwire in range(nwires_plus_leads):
if each_wire_inter_point_storage[iwire]:
for j, pointj in enumerate(each_wire_inter_point_storage[iwire]):
point = Point(pointj)
for i, pointw in enumerate(each_wire_inter_point_storage[iwire]):
comp_pointw = Point(pointw)
inter_dist = point.distance(comp_pointw)
round_inter_dist = round(inter_dist, 4)
for il in each_wire_length_storage[iwire][0]:
value = float(il)
value = round(value,4)
if value == round_inter_dist and value != 0:
inner_resis = (float(value) * rho0 / A0)
if iwire != 0 and iwire != 1 and mr_matrix_plus[wire_touch_label_list[iwire][i], wire_touch_label_list[iwire][j]] == 0.0:
mr_matrix_plus[wire_touch_label_list[iwire][i], wire_touch_label_list[iwire][j]] = -1.0/inner_resis
mr_matrix_plus[wire_touch_label_list[iwire][j], wire_touch_label_list[iwire][i]] = -1.0/inner_resis
G.add_edge(wire_touch_label_list[iwire][i],wire_touch_label_list[iwire][j])
inner_count += 1
for k, label in enumerate(wire_touch_list[iwire]):
for kk, pointk in enumerate(each_wire_inter_point_storage[label]):
pointk = Point(pointk)
inter_dist = point.distance(pointk)
round_inter_dist = round(inter_dist, 4)
if round_inter_dist == 0 and mr_matrix_plus[wire_touch_label_list[iwire][j], wire_touch_label_list[label][kk]] == 0:
G.add_edge(wire_touch_label_list[label][kk],wire_touch_label_list[iwire][j])
r0 = -1/R_junc
mr_matrix_plus[wire_touch_label_list[iwire][j], wire_touch_label_list[label][kk]] = r0
mr_matrix_plus[wire_touch_label_list[label][kk], wire_touch_label_list[iwire][j]] = r0
sum_rows_mr_plus = mr_matrix_plus.sum(1)
np.fill_diagonal(mr_matrix_plus, abs(sum_rows_mr_plus))
mr_nozero_rows_plus = mr_matrix_plus[~(mr_matrix_plus==0).all(1),:]
# nonconnected wires are eliminated from the resistance matrix
mr_nonconnected_plus = mr_nozero_rows_plus[:,~(mr_nozero_rows_plus==0).all(0)]
# End ---------- Building resistance matrix -------------
# input current vector
i0 = 1.0 # absolute value of the current (in Amp)
ic = np.zeros(mr_nonconnected_plus.shape[0])
ic[0] = +i0
ic[1] = -i0
Imatrix = m(ic)
# Solving Ohm's law in matrix form, R^(-1)V = I. Resulting voltages are in Volts.
#Amatrix = m(mr_nonconnected_plus)
#Amatrix = np.array(mr_nonconnected_plus)
#ks = Symmlq(Imatrix)
#elec_pot_mr = ks.solve(Gfun)
#print Gfun
#print Imatrix
#or
#ks = Symmlq(Gfun)
#print Amatrix
#elec_pot_mr = ks.solve(Imatrix)
Amatrix = csc_matrix(mr_nonconnected_plus)
elec_pot_mr = minres(Amatrix, Imatrix, tol=tol)
#elec_pot_mr = Symmlq(Imatrix, Gfun, show=show, rtol=tol, maxit=maxit)
#elec_pot_mr = minres(Imatrix, Amatrix)
# Sheet resistance
resistance = ((elec_pot_mr[0][0] - elec_pot_mr[0][1]))/i0
# Checking if there is a path connecting electrodes at nodes 0 and 1
if nx.has_path(G,0,1):
separation_short = nx.shortest_path_length(G,0,1)
res_list.append(resistance)
short_sep_list.append(separation_short)
junc_dens.append(float(nintersections)/area)
except IndexError:
continue
break
AF = density*wire_diameter*wire_length*0.001
transmittance = round(math.exp(-AF*extinction_coeff), 4)
junc_avg = np.mean(junc_dens)
resAvg = np.mean(res_list)
resStd = np.std(res_list)
short = np.mean(short_sep_list)
dens_temp.append(junc_avg)
avg_res_temp.append(resAvg)
st_dev_temp.append(resStd)
open(res_file,"a").write("%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n" %(density,AF,transmittance,resAvg,resStd,junc_avg,R_junc,rho0,wire_diameter,wire_length,box_length,samples,nstep,n_initial,n_final,tol,distl,lower_l,upper_l,sigmal,percentage_chance,round(time.process_time() - start_time, 5)))
print("Density: %s, Transmittance: %s, Average resistance: %s, Standard deviation: %s, Junction density: %s, Junctions removed: %s" %(density,transmittance,round(resAvg, 6),round(resStd, 4),round(junc_avg, 4), percentage_chance))
print("runtime was", round(time.process_time() - start_time, 5), "seconds")
transmittancelist.append((transmittance**(-1/2))-1)
resistancelist.append(resAvg)
res_list=[]
short_sep_list=[]
junc_dens=[]
print(transmittancelist)
print(resistancelist)
#plot T^(-1/2)-1 vs Rs
import matplotlib.pyplot as plt
plt.plot(resistancelist,transmittancelist,'bo')
plt.title('T^(-1/2)-1 vs Rs Log-Log Scale')
plt.ylabel('T^(-1/2)-1')
plt.xlabel('Rs')
plt.yscale('log')
plt.xscale('log')
plt.show()
#line fit of loglog plot
xs = np.log10(np.array(resistancelist, dtype=np.float64))
ys = np.log10(np.array(transmittancelist, dtype=np.float64))
def best_fit_slope_and_intercept(xs,ys):
m = (((mean(xs)*mean(ys)) - mean(xs*ys)) /
((mean(xs)*mean(xs)) - mean(xs*xs)))
b = mean(ys) - m*mean(xs)
return m, b
m, b = best_fit_slope_and_intercept(xs,ys)
print(m,b)
#plot best fit line on graph
regression_line = [(m*x)+b for x in xs]
import matplotlib.pyplot as plt
from matplotlib import style
import matplotlib.patches as mpatches
style.use('ggplot')
plt.plot(xs,ys,'bo')
plt.plot(xs, regression_line,'r-')
plt.title('T^(-1/2)-1 vs Rs Log-Log Scale with Line Fit')
plt.ylabel('T^(-1/2)-1')
plt.xlabel('Rs')
red_patch = mpatches.Patch(color='red', label='Line Fit y = %s x + %s' %(round(m,3),round(b,3)))
plt.legend(handles=[red_patch])
plt.show()
open(res_file,"a").close()
duration = 0.1
freq = 1100
| gpl-2.0 |
regardscitoyens/Collaborateurs-Senat | bin/convert.py | 1 | 8575 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, re, json
filepath = sys.argv[1]
with open(filepath, 'r') as xml_file:
xml = xml_file.read()
drawMap = False
if len(sys.argv) > 2:
drawMap = True
minl = 100
mint = 200
maxt = 1100
l1 = 300
parls_type = "senateurs"
parls_first = True
if "senateurs_collaborateurs" in filepath:
mint = 220
elif "collaborateurs_senateurs" in filepath:
parls_first = False
elif "deputes_collaborateurs" in filepath:
parls_type = "deputes"
minl = 50
mint = 220
maxt = 800
l1 = 75
l2 = 150
l3 = 250
l4 = 275
l5 = 350
parl_type = parls_type.rstrip("s")
with open("data/%s.json" % parls_type, 'r') as f:
parls = [p[parl_type] for p in json.load(f)[parls_type]]
Mme = r"^M[.mle]+\s+"
clean_Mme = re.compile(Mme)
re_clean_bal = re.compile(r'<[^>]+>')
re_clean_dash = re.compile(r'\s*-\s*')
re_clean_spaces = re.compile(r'\s+')
clean = lambda x: re_clean_spaces.sub(' ', re_clean_dash.sub('-', re_clean_bal.sub('', x))).strip()
regexps = [(re.compile(r), s) for r, s in [
(u'[àÀâÂ]', 'a'),
(u'[éÉèÈêÊëË]', 'e'),
(u'[îÎïÏ]', 'i'),
(u'[ôÔöÔ]', 'o'),
(u'[ùÙûÛüÜ]', 'u'),
(u'[çÇ]', 'c'),
]]
def clean_accents(t):
if not isinstance(t, unicode):
t = t.decode('utf-8')
for r, s in regexps:
t = r.sub(s, t)
return t
checker = lambda x: clean(clean_accents(x)).lower().strip()
reorder = lambda p: checker("%s %s" % (p['nom'].replace("%s " % p['prenom'], ""), p['prenom']))
maj = ur"A-ZÀÂÉÈÊËÎÏÔÖÙÛÜÇ"
particule = r"d(?:(?:e l)?'|[iu] |e(?:[ls]| la)? )"
re_name = re.compile(ur"(%s)((?:(?:%s)?[%s'\-]+\s+)+)([%s][a-zàâéèêëîïôöùûüç].*)$" % (Mme, particule, maj, maj))
def split_name(name):
match = re_name.search(name.decode('utf-8'))
if not match:
sys.stderr.write("WARNING: could not split name %s\n" % name)
return name, "", ""
sexe = "H" if "." in match.group(1) else "F"
nom = match.group(2).strip()
prenom = match.group(3).strip()
return nom, prenom, sexe
def split_collab(record):
if record[4].endswith("*"):
record[4] = record[4].rstrip(" *")
record[10] = "Congé sans solde"
else: record[10] = ""
record[5], record[6], record[7] = split_name(record[4])
exceptions = [
("deromedi jacqueline", "deromedi jacky"),
("yonnet-salvator evelyne", "yonnet evelyne"),
("laufoaulu lopeleto", "laufoaulu robert"),
("azerot bruno", "azerot bruno nestor"),
("de la verpillere charles", "de la verpilliere charles"),
("debre bernard andre", "debre bernard"),
("le borgn pierre-yves", "le borgn' pierre-yves"),
("vlody jean-jacques", "vlody jean jacques"),
("destans jean louis", "destans jean-louis"),
("goasgen claude", "goasguen claude"),
("zimmermann marie jo", "zimmermann marie-jo")
]
def find_parl(record, splitted=False):
nom = checker(clean_Mme.sub('', record[0]))
for bad, gd in exceptions:
nom = nom.replace(bad, gd)
if not splitted:
record[1], record[2], record[3] = split_name(record[0])
for parl in parls:
if reorder(parl) == nom:
record[0] = parl["nom"]
#record[1] = parl["nom_de_famille"]
#record[2] = parl["prenom"]
#record[3] = parl["sexe"]
record[8] = parl["url_nos%s_api" % parls_type].replace('json', 'xml')
record[9] = parl.get("url_institution", parl.get("url_an", ""))
return
sys.stderr.write("Could not find %s -> %s\n" % (record[0], nom.encode('utf-8')))
# Reorder xml lines
xml_ordered = ""
page_lines = []
re_ordline = re.compile(r'<text top="(\d+)" left="(\d+)"[^>]', re.I)
def ordline(l):
y, x = re_ordline.search(l).groups()
y = int(y)
x = int(x)
if parls_type == "deputes" and x < l3:
y -= 3
return (y, x)
for line in (xml).split("\n"):
if line.startswith('<text'):
page_lines.append(line)
elif line.startswith('</page'):
xml_ordered += "\n".join(sorted(page_lines, key=ordline))
page_lines = []
topvals = {}
leftvals = {}
maxtop = 0
maxleft = 0
results = []
headers = ['parlementaire', 'nom_parlementaire', 'prénom_parlementaire', 'sexe_parlementaire', 'collaborateur', 'nom_collaborateur', 'prénom_collaborateur', 'sexe_collaborateur', 'url_api_RC', 'url_institution', 'information complémentaire']
record = ["", "", "", "", "", "", "", "", "", "", ""]
re_line = re.compile(r'<page number|text top="(\d+)" left="(\d+)"[^>]*font="(\d+)">(.*)</text>', re.I)
re_tosplit = re.compile(r'^(.*) ((?:M.|Mme) .*)$')
re_collabtosplit = re.compile(r'^\s*(M\.|Mme) (.+)$')
sexize = lambda val: "H" if val == "M." else "F"
for line in (xml_ordered).split("\n"):
#print >> sys.stderr, "DEBUG %s" % line
line = line.replace(" ", " ")
attrs = re_line.search(line)
if not attrs or not attrs.groups():
raise Exception("WARNING : line detected with good font but wrong format %s" % line)
font = int(attrs.group(3))
top = int(attrs.group(1))
if top > maxtop:
maxtop = top
if not font in topvals:
topvals[font] = []
topvals[font].append(top)
left = int(attrs.group(2))
if left > maxleft:
maxleft = left
if not font in leftvals:
leftvals[font] = []
leftvals[font].append(left)
if drawMap:
continue
#print "DEBUG %s %s %s %s" % (font, left, top, text)
if top < mint or top > maxt:
continue
if left < minl:
continue
text = attrs.group(4).replace("&", "&")
# Handle députés
if parls_type == "deputes":
val = clean(text)
if left < l1:
record[3] = val.replace(" .", ".")
elif left < l2:
record[2] = val
elif left < l3:
record[1] = val
record[0] = record[3] + " " + record[1] + " " + record[2]
find_parl(record, splitted=True)
record[3] = sexize(record[3])
elif left < l4:
splitted = re_collabtosplit.search(val)
if splitted:
record[7] = splitted.group(1)
record[6] = splitted.group(2)
else:
record[7] = val.replace(" .", ".")
elif left < l5:
record[6] = val
else:
record[5] = val
record[4] = record[7] + " " + record[5] + " " + record[6]
record[7] = sexize(record[7])
results.append(list(record))
continue
# Handle sénateurs
if left < l1:
val = clean(text)
idx = 0 if parls_first else 4
tosplit = re_tosplit.search(val)
if tosplit:
sys.stderr.write("WARNING: splitting %s\n" % val)
idx2 = 4 if parls_first else 0
record[idx], record[idx2] = tosplit.groups()
find_parl(record)
split_collab(record)
results.append(list(record))
continue
record[idx] = val
if parls_first:
find_parl(record)
else:
split_collab(record)
else:
idx = 4 if parls_first else 0
record[idx] = clean(text)
if not parls_first:
find_parl(record)
else:
split_collab(record)
results.append(list(record))
if not drawMap:
print ",".join(['"%s"' % h for h in headers])
for i in sorted(results, key=lambda x: ("%s %s - %s %s" % (x[1], x[2], x[5], x[6])).lower()):
for j in range(len(i)):
i[j] = clean(i[j])
try: i[j] = i[j].encode('utf-8')
except: pass
print ",".join([str(i[a]) if isinstance(i[a], int) else "\"%s\"" % i[a].replace('"', '""') for a,_ in enumerate(i)])
else:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
fig = plt.figure(figsize=(8.5, 12))
ax = fig.add_subplot(111)
ax.grid(True, fillstyle='left')
nf = len(leftvals)
for font in leftvals:
color = cm.jet(1.5*font/nf)
ax.plot(leftvals[font], topvals[font], 'ro', color=color, marker=".")
plt.figtext((font+1.)/(nf+1), 0.95, "font %d" % font, color=color)
plt.xticks(np.arange(0, maxleft + 50, 50))
plt.yticks(np.arange(0, maxtop + 50, 50))
plt.xlim(0, maxleft + 50)
plt.ylim(0, maxtop + 50)
plt.gca().invert_yaxis()
mappath = filepath.replace(".xml", ".png").replace("pdfs/", "pdfmaps/")
fig.savefig(mappath)
fig.clf()
plt.close(fig)
| agpl-3.0 |
sofianehaddad/otmorris | python/src/plot_sensitivity.py | 1 | 3422 | """
Plot Morris elementary effects
"""
import openturns as ot
import numpy as np
import matplotlib
import pylab as plt
import warnings
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}"]
class PlotEE(object):
"""
Plot elementary effects
-----------------------
The class perform the plot of sensitivity indices issued from Morris class.
Parameters
----------
morris: :class:`~otmorris.Morris`
A Morris object.
output_marginal: int
Index of output marginal of interest.
Default value is 0
absolute_mean: bool
Interest is mean of absolute elementary effects .
Default value is True
title: str
Title for the graph
"""
def __init__(self,
result,
output_marginal=0,
absolute_mean=True,
title="Elementary effects",
**kwargs):
# set figure
self._fig, self._ax = plt.subplots()
# Check input object type
if not (hasattr(result, 'getStandardDeviationElementaryEffects') and hasattr(result, 'getClassName')):
raise TypeError(" `result` should be of class Morris ")
if absolute_mean:
mean = result.getMeanAbsoluteElementaryEffects(output_marginal)
else:
mean = result.getMeanElementaryEffects(output_marginal)
sigma = result.getStandardDeviationElementaryEffects(output_marginal)
dim = len(sigma)
input_description = map(lambda x: "X" + str(x + 1), range(dim))
# Plot effects
self._ax.plot(mean, sigma, 'bo')
# Annotate points
dmu = (plt.np.max(mean) - plt.np.min(mean)) / len(mean)
dsg = (plt.np.max(sigma) - plt.np.min(sigma)) / len(sigma)
for i, txt in enumerate(input_description):
self._ax.annotate(
txt, (mean[i] + 0.05 * dmu, sigma[i] + 0.05 * dsg))
self._ax.set_xlabel(r"$\boldsymbol{\mu}$", fontsize=14)
self._ax.set_ylabel(r"$\boldsymbol{\sigma}$", fontsize=14)
self._ax.legend(loc=0)
self._ax.grid(True)
self._fig.suptitle(title, fontsize=18)
def show(self, **kwargs):
"""
Display the graph on screen.
Parameters
----------
kwargs:
block: bool, optional
If true (default), block until the graph is closed.
These parameters are passed to matplotlib.pyplot.show()
"""
self._fig.show(**kwargs)
def save(self, fname, **kwargs):
"""
Save the graph as file.
Parameters
----------
fname: bool, optional
A string containing a path to a filename from which file format is deduced.
kwargs:
Refer to matplotlib.figure.Figure.savefig documentation for valid keyword arguments.
"""
self._fig.savefig(fname, **kwargs)
def getFigure(self):
"""
Accessor to the underlying figure object.
Refer to matplotlib.figure.Figure for further information.
"""
return self._fig
def getAxes(self):
"""
Get the list of Axes objects.
Refer to matplotlib.axes.Axes for further information.
"""
return self._ax
def close(self):
"""Close the figure."""
plt.close(self._fig)
| lgpl-3.0 |
mph-/lcapy | lcapy/texpr.py | 1 | 7057 | """This module provides the TimeDomainExpression class to represent
time domain expressions.
Copyright 2014--2021 Michael Hayes, UCECE
"""
from __future__ import division
from .domains import TimeDomain
from .expr import Expr, expr_make
from .functions import exp
from .sym import fsym, ssym, tsym, j, oo
from .laplace import laplace_transform
from .fourier import fourier_transform
from .units import u as uu
from sympy import Heaviside, Integral, limit, Expr as symExpr
__all__ = ('texpr', )
class TimeDomainExpression(TimeDomain, Expr):
"""t-domain expression or symbol."""
var = tsym
def __init__(self, val, **assumptions):
check = assumptions.pop('check', True)
assumptions['real'] = True
super(TimeDomainExpression, self).__init__(val, **assumptions)
expr = self.expr
if check and expr.has(ssym) and not expr.has(Integral):
raise ValueError(
't-domain expression %s cannot depend on s' % expr)
if check and expr.has(fsym) and not expr.has(Integral):
raise ValueError(
't-domain expression %s cannot depend on f' % expr)
def _mul_compatible_domains(self, x):
if self.domain == x.domain:
return True
return x.is_constant_domain
def _div_compatible_domains(self, x):
if self.domain == x.domain:
return True
return x.is_constant_domain
@property
def abs(self):
"""Return absolute value."""
return self.__class__(abs(self.expr), **self.assumptions)
def as_expr(self):
return TimeDomainExpression(self)
def infer_assumptions(self):
self.assumptions.infer_from_expr(self)
def LT(self, evaluate=True, **assumptions):
"""Determine one-sided Laplace transform with 0- as the lower limit.
This is an alias for laplace."""
return self.laplace(evaluate, **assumptions)
def laplace(self, evaluate=True, **assumptions):
"""Determine one-sided Laplace transform with 0- as the lower limit."""
assumptions = self.assumptions.merge_and_infer(self, **assumptions)
result = laplace_transform(self.expr, self.var, ssym, evaluate=evaluate)
return self.change(result, domain='laplace', units_scale=uu.s, **assumptions)
def phasor(self, **assumptions):
"""Convert to phasor domain."""
from .phasor import PhasorTimeDomainExpression
return PhasorTimeDomainExpression.from_time(self, **assumptions)
def FT(self, var=None, evaluate=True, **assumptions):
"""Attempt Fourier transform.
X(f) = \int_{-\infty}^{\infty} x(t) exp(-j 2\pi f t) dt."""
from .symbols import f, omega, Omega, F
if var is None:
var = f
if id(var) not in (id(f), id(F), id(omega), id(Omega)):
raise ValueError('FT requires var to be f, F, omega, or Omega`, not %s' % var)
assumptions = self.assumptions.merge_and_infer(self, **assumptions)
result = fourier_transform(self.expr, self.var, fsym, evaluate=evaluate)
result = self.change(result, domain='fourier', units_scale=uu.s, **assumptions)
result = result(var)
result = result.expand(diracdelta=True, wrt=var)
result = result.simplify()
return result
def fourier(self, var=None, evaluate=True, **assumptions):
"""Attempt Fourier transform. This is an alias for FT."""
return self.FT(var, evaluate, **assumptions)
def angular_fourier(self, evaluate=True, **assumptions):
"""Attempt angular Fourier transform."""
from .symbols import omega
return self.FT(omega, evaluate, **assumptions)
def norm_angular_fourier(self, evaluate=True, **assumptions):
"""Attempt normalized angular Fourier transform."""
from .symbols import Omega
return self.FT(Omega, evaluate, **assumptions)
def time(self, **assumptions):
return self
def plot(self, t=None, **kwargs):
"""Plot the time waveform. If t is not specified, it defaults to the
range (-0.2, 2). t can be a vector of specified instants, a
tuple specifing the range, or a constant specifying the
maximum value with the minimum value set to 0.
kwargs include:
axes - the plot axes to use otherwise a new figure is created
xlabel - the x-axis label
ylabel - the y-axis label
xscale - the x-axis scaling, say for plotting as ms
yscale - the y-axis scaling, say for plotting mV
in addition to those supported by the matplotlib plot command.
The plot axes are returned."""
from .plot import plot_time
return plot_time(self, t, **kwargs)
def sample(self, t):
"""Return a discrete-time signal evaluated at time values specified by
vector t. """
return self.evaluate(t)
def initial_value(self):
"""Determine value at t = 0.
See also pre_initial_value and post_initial_value"""
return self.subs(0)
def pre_initial_value(self):
"""Determine value at t = 0-.
See also initial_value and post_initial_value"""
return self.limit(self.var, 0, dir='-')
def post_initial_value(self):
"""Determine value at t = 0+.
See also pre_initial_value and initial_value"""
return self.limit(self.var, 0, dir='+')
def final_value(self):
"""Determine value at t = oo."""
return self.limit(self.var, oo)
def remove_condition(self):
"""Remove the piecewise condition from the expression.
See also force_causal."""
if not self.is_conditional:
return self
expr = self.expr
expr = expr.args[0].args[0]
return self.__class__(expr)
def force_causal(self):
"""Remove the piecewise condition from the expression
and multiply by Heaviside function. See also remove_condition."""
if self.is_causal:
return self
expr = self.expr
if self.is_conditional:
expr = expr.args[0].args[0]
expr = expr * Heaviside(t)
return self.__class__(expr)
class TimeDomainImpulseResponse(TimeDomainExpression):
"""Time-domain impulse response."""
# TODO, check attributes.
quantity = 'transfer'
quantity_label = 'Impulse response'
domain_units = uu.Hz
is_transfer = True
def texpr(arg, **assumptions):
"""Create TimeDomainExpression object. If `arg` is tsym return t"""
if arg is tsym:
return t
return expr_make('time', arg, **assumptions)
from .expressionclasses import expressionclasses
classes = expressionclasses.register('time', TimeDomainExpression)
TimeDomainVoltage = classes['voltage']
TimeDomainCurrent = classes['current']
t = TimeDomainExpression('t')
t.units = uu.s
| lgpl-2.1 |
mlyundin/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
imaculate/scikit-learn | doc/conf.py | 5 | 8468 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2016, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
cuauv/software | hydrocode/scripts/dft_plot.py | 1 | 3121 | #!/usr/bin/env python3
#Script for drawing plots of the correct DFT bin amplitude and ratio between successive amplitudes with respect to time. Also shows trigger point. Read Hydrophones Code wiki entry.
import socket, struct, numpy, math
import matplotlib.pyplot as plt
import os.path
try:
from cv2 import imread
except ImportError:
from scipy.misc import imread
DFT_PLOT_LENGTH = 3500 #length of the dft plot (in samples)
MAXIMUM_AMPLITUDE = DFT_PLOT_LENGTH #maximum signal amplitude (plot is square because the penguin meme is a square image)
UDP_ADDRESS = "127.0.0.1" #local host because we receive plots from hydromathd on the same machine
UDP_PAYLOAD_SIZE = 512 #size of the UDP plot packets (in bytes)
UDP_PORT = 9003 #hydromathd sends dft plots to this port
#initializing UDP networking
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_ADDRESS, UDP_PORT))
#initializing the plot window
fig = plt.figure(figsize = (7, 7))
#setting the plot labels
plt.title("Correct DFT bin Amplitudes and Amplitude Ratios w/ Trigger Point")
plt.xlabel("Packet Number")
plt.ylabel("Amplitudes White, Ratios Blue")
#displaying the penguin meme background
img = imread(os.path.join(os.path.dirname(os.path.realpath(__file__)), "katy.jpg"))
plt.imshow(img, extent = [0, DFT_PLOT_LENGTH - 1, 0, MAXIMUM_AMPLITUDE - 1])
#creating the axes and setting the maximum values. removing axis ticks
ax = plt.gca()
ax.set_xlim((0, DFT_PLOT_LENGTH - 1))
ax.set_ylim((0, MAXIMUM_AMPLITUDE - 1))
ax.axes.yaxis.set_ticks([])
#initializing the graphs and trigger cursor with arbitrary numbers
x = numpy.arange(0, DFT_PLOT_LENGTH)
y = x
trigger_cursor = plt.axvline(x = 0, color = "yellow", ymin = 0.95)
(line_0, line_1) = ax.plot(x, y, 'w-', x, y, 'b-', linewidth = '0.2', marker = '.', markersize = '2') #'w-' for white and 'b-' for blue
#preparing a decode strings for unpacking the received bytes arrays into values. we need 'f' for floats. the trigger point array contains a single value
decode_string = str(DFT_PLOT_LENGTH) + 'f'
trigger_decode_string = str(1) + 'f'
while 1:
#initializing the graph values list
line_values = list()
#amplitudes are received first, then ratios
for i in range(2):
data = bytes()
#receiving the full packets and the final possibly partially filled packet
for j in range(int(math.ceil(float(DFT_PLOT_LENGTH) * 4.0 / UDP_PAYLOAD_SIZE))):
(data_packet, address) = sock.recvfrom(UDP_PAYLOAD_SIZE * 4)
data += data_packet
#unpacking the received bytes array into values
line_values.append(numpy.asarray(struct.unpack(decode_string, data)))
#receiving the trigger point number
(data_packet, address) = sock.recvfrom(1 * 4)
#unpacking the trigger point number
trigger_point = struct.unpack(trigger_decode_string, data_packet)[0]
print("received dft amplitude plot")
#updating the graphs
line_0.set_ydata(line_values[0]) #amplitudes
line_1.set_ydata(line_values[1]) #ratios
trigger_cursor.set_xdata(trigger_point) #trigger packet number
#not pausing after drawing a plot breaks things for some reason
plt.draw()
plt.pause(0.1) #in seconds
| bsd-3-clause |
rew4332/tensorflow | tensorflow/examples/skflow/out_of_core_data_classification.py | 5 | 2461 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of loading karge data sets into out-of-core dataframe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
# pylint: disable=g-bad-import-order
import dask.dataframe as dd
import pandas as pd
from tensorflow.contrib import learn
# pylint: enable=g-bad-import-order
# Sometimes when your dataset is too large to hold in the memory
# you may want to load it into a out-of-core dataframe as provided by dask
# library to firstly draw sample batches and then load into memory for training.
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we use iris here just for demo purposes
# You can load your own large dataset into a out-of-core dataframe
# using dask's methods, e.g. read_csv() in dask
# details please see: http://dask.pydata.org/en/latest/dataframe.html
# We firstly load them into pandas dataframe and then convert into dask
# dataframe.
x_train, y_train, x_test, y_test = [
pd.DataFrame(data) for data in [x_train, y_train, x_test, y_test]]
x_train, y_train, x_test, y_test = [
dd.from_pandas(data, npartitions=2)
for data in [x_train, y_train, x_test, y_test]]
# Initialize a TensorFlow linear classifier
classifier = learn.TensorFlowLinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3)
# Fit the model using training set.
classifier.fit(x_train, y_train)
# Make predictions on each partitions of testing data
predictions = x_test.map_partitions(classifier.predict).compute()
# Calculate accuracy
score = metrics.accuracy_score(y_test.compute(), predictions)
| apache-2.0 |
MohammedWasim/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
fredhusser/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 169 | 8809 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
MPTCP-smartphone-thesis/pcap-measurement | summary_imc.py | 1 | 106944 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Matthieu Baerts & Quentin De Coninck
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# To install on this machine: matplotlib, numpy
from __future__ import print_function
##################################################
## IMPORTS ##
##################################################
import argparse
import common as co
from math import ceil
import matplotlib
# Do not use any X11 backend
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import mptcp
import numpy as np
import os
import os.path
import pickle
import sys
import tcp
import time
##################################################
## ARGUMENTS ##
##################################################
parser = argparse.ArgumentParser(
description="Summarize stat files generated by analyze")
parser.add_argument("-s",
"--stat", help="directory where the stat files are stored", default=co.DEF_STAT_DIR+'_'+co.DEF_IFACE)
parser.add_argument('-S',
"--sums", help="directory where the summary graphs will be stored", default=co.DEF_SUMS_DIR+'_'+co.DEF_IFACE)
parser.add_argument("-d",
"--dirs", help="list of directories to aggregate", nargs="+")
parser.add_argument("-r",
"--remove", help="if set, remove outliers from dataset", action="store_true")
args = parser.parse_args()
stat_dir_exp = os.path.abspath(os.path.expanduser(args.stat))
sums_dir_exp = os.path.abspath(os.path.expanduser(args.sums))
co.check_directory_exists(sums_dir_exp)
##################################################
## GET THE DATA ##
##################################################
def check_in_list(dirpath, dirs):
""" Check if dirpath is one of the dir in dirs, True if dirs is empty """
if not dirs:
return True
return os.path.basename(dirpath) in dirs
def fetch_data(dir_exp):
co.check_directory_exists(dir_exp)
dico = {}
for dirpath, dirnames, filenames in os.walk(dir_exp):
if check_in_list(dirpath, args.dirs):
for fname in filenames:
try:
stat_file = open(os.path.join(dirpath, fname), 'r')
dico[fname] = pickle.load(stat_file)
stat_file.close()
except IOError as e:
print(str(e) + ': skip stat file ' + fname, file=sys.stderr)
return dico
connections = fetch_data(stat_dir_exp)
def ensures_smartphone_to_proxy():
for fname in connections.keys():
for conn_id in connections[fname].keys():
if isinstance(connections[fname][conn_id], mptcp.MPTCPConnection):
inside = True
for flow_id, flow in connections[fname][conn_id].flows.iteritems():
if not flow.attr[co.DADDR].startswith('172.17.') and not flow.attr[co.DADDR] == co.IP_PROXY:
connections[fname].pop(conn_id, None)
inside = False
break
if inside:
for direction in co.DIRECTIONS:
# This is a fix for wrapping seq num
if connections[fname][conn_id].attr[direction][co.BYTES_MPTCPTRACE] < -1:
connections[fname][conn_id].attr[direction][co.BYTES_MPTCPTRACE] = 2**32 + connections[fname][conn_id].attr[direction][co.BYTES_MPTCPTRACE]
ensures_smartphone_to_proxy()
def get_multiflow_connections(connections):
multiflow_connections = {}
singleflow_connections = {}
for fname, conns_fname in connections.iteritems():
for conn_id, conn in conns_fname.iteritems():
if isinstance(conn, mptcp.MPTCPConnection):
if len(conn.flows) > 1:
if fname not in multiflow_connections:
multiflow_connections[fname] = {}
multiflow_connections[fname][conn_id] = conn
else:
if fname not in singleflow_connections:
singleflow_connections[fname] = {}
singleflow_connections[fname][conn_id] = conn
return multiflow_connections, singleflow_connections
multiflow_connections, singleflow_connections = get_multiflow_connections(connections)
def filter_connections(connections, min_bytes=None, max_bytes=None):
filtered = {}
for fname, data in connections.iteritems():
filtered[fname] = {}
for conn_id, conn in data.iteritems():
if isinstance(conn, mptcp.MPTCPConnection):
mptcp_bytes = conn.attr[co.S2D].get(co.BYTES_MPTCPTRACE, 0) + conn.attr[co.D2S].get(co.BYTES_MPTCPTRACE, 0)
if (min_bytes and mptcp_bytes >= min_bytes) or (max_bytes and mptcp_bytes <= max_bytes):
filtered[fname][conn_id] = conn
return filtered
# connections = filter_connections(connections)
##################################################
## PLOTTING RESULTS ##
##################################################
def fog_plot_with_bytes_wifi_cell_per_condition(log_file=sys.stdout):
data = {co.S2D: {'all': {'Connections': []}}, co.D2S: {'all': {'Connections': []}}}
color = {'Connections': 'orange'}
base_graph_name = "fog_bytes"
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
if co.BYTES in conn.attr[co.S2D]:
data[co.S2D]['all']['Connections'].append([conn.attr[co.S2D][co.BYTES].get(co.WIFI, 0), conn.attr[co.S2D][co.BYTES].get(co.CELL, 0)])
if co.BYTES in conn.attr[co.D2S]:
data[co.D2S]['all']['Connections'].append([conn.attr[co.D2S][co.BYTES].get(co.WIFI, 0), conn.attr[co.D2S][co.BYTES].get(co.CELL, 0)])
co.scatter_plot_with_direction(data, "Bytes on Wi-Fi", "Bytes on cellular", color, sums_dir_exp, base_graph_name)
def fog_plot_with_packs_wifi_cell_per_condition(log_file=sys.stdout):
data = {co.S2D: {'all': {'Connections': []}}, co.D2S: {'all': {'Connections': []}}}
color = {'Connections': 'orange'}
base_graph_name = "fog_packs"
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
# conn is then a MPTCPConnection, be still better to be sure of
if isinstance(conn, mptcp.MPTCPConnection):
packs = {co.S2D: {co.CELL: 0, co.WIFI: 0, '?': 0}, co.D2S: {co.CELL: 0, co.WIFI: 0, '?': 0}}
for flow_id, flow in conn.flows.iteritems():
if co.S2D not in flow.attr:
continue
if co.PACKS not in flow.attr[co.S2D] or co.PACKS not in flow.attr[co.D2S]:
break
interface = flow.attr[co.IF]
packs[co.S2D][interface] += flow.attr[co.S2D][co.PACKS]
packs[co.D2S][interface] += flow.attr[co.D2S][co.PACKS]
if packs[co.S2D][co.CELL] == 0 and packs[co.S2D][co.WIFI] == 0 and packs[co.D2S][co.CELL] == 0 and packs[co.D2S][co.WIFI] == 0:
continue
data[co.S2D]['all']['Connections'].append([packs[co.S2D][co.WIFI], packs[co.S2D][co.CELL]])
data[co.D2S]['all']['Connections'].append([packs[co.D2S][co.WIFI], packs[co.D2S][co.CELL]])
co.scatter_plot_with_direction(data, "Packets on wifi", "Packets on cellular", color, sums_dir_exp, base_graph_name)
def fog_duration_bytes(log_file=sys.stdout):
data = {'all': {'Connections': []}}
color = {'Connections': 'orange'}
base_graph_name = "fog_duration_bytes"
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
if isinstance(conn, tcp.TCPConnection):
duration = conn.flow.attr[co.DURATION]
elif isinstance(conn, mptcp.MPTCPConnection):
duration = conn.attr[co.DURATION]
nb_bytes = 0
if co.BYTES in conn.attr[co.S2D]:
nb_bytes = conn.attr[co.S2D][co.BYTES].get(co.WIFI, 0) + conn.attr[co.S2D][co.BYTES].get(co.CELL, 0) + conn.attr[co.S2D][co.BYTES].get('?', 0)
if co.BYTES in conn.attr[co.D2S]:
nb_bytes += conn.attr[co.D2S][co.BYTES].get(co.WIFI, 0) + conn.attr[co.D2S][co.BYTES].get(co.CELL, 0) + conn.attr[co.D2S][co.BYTES].get('?', 0)
data['all']['Connections'].append([duration, nb_bytes])
co.scatter_plot(data, "Duration [s]", "Bytes on connection", color, sums_dir_exp, base_graph_name, plot_identity=False)
def cdf_duration(log_file=sys.stdout):
data_duration = {'all': {co.DURATION: []}}
color = ['red']
base_graph_name_duration = "summary_cdf_duration"
base_graph_path_duration = os.path.join(sums_dir_exp, base_graph_name_duration)
base_graph_name_duration_hist = "summary_hist_duration"
base_graph_path_duration_hist = os.path.join(sums_dir_exp, base_graph_name_duration_hist)
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
if isinstance(conn, tcp.TCPConnection):
duration = conn.flow.attr[co.DURATION]
elif isinstance(conn, mptcp.MPTCPConnection):
duration = conn.attr[co.DURATION]
data_duration['all'][co.DURATION].append(duration)
co.plot_cdfs_natural(data_duration, color, 'Seconds [s]', base_graph_path_duration)
co.plot_cdfs_natural(data_duration, color, 'Seconds [s]', base_graph_path_duration + '_log', xlog=True)
# weights = []
# for dataset_results in data_duration['all'][co.DURATION]:
# weights.append(np.ones_like(dataset_results) / len(data_duration['all'][co.DURATION]))
plt.figure()
plt.hist(data_duration['all'][co.DURATION], bins=np.logspace(-3, 5, 81), log=True)
plt.xlabel("Duration of connections [s]", fontsize=18)
plt.ylabel("Connections", fontsize=18)
plt.gca().set_xscale("log")
plt.savefig(base_graph_path_duration_hist + "_log.pdf")
plt.close()
plt.figure()
plt.hist(data_duration['all'][co.DURATION], bins=np.logspace(-3, 5, 81))
plt.xlabel("Duration of connections [s]", fontsize=18)
plt.ylabel("Connections", fontsize=18)
plt.gca().set_xscale("log")
plt.savefig(base_graph_path_duration_hist + ".pdf")
plt.close()
print("50th percentile", np.percentile(data_duration['all'][co.DURATION], 50), file=log_file)
print("60th percentile", np.percentile(data_duration['all'][co.DURATION], 60), file=log_file)
print("70th percentile", np.percentile(data_duration['all'][co.DURATION], 70), file=log_file)
def cdfs_bytes(log_file=sys.stdout):
data_bytes = {'all': {co.BYTES: []}}
data_bytes_with_dir = {co.S2D: {'all': {co.BYTES: []}}, co.D2S: {'all': {co.BYTES: []}}}
color = ['red']
base_graph_name_bytes = "summary_cdf_bytes"
base_graph_path_bytes = os.path.join(sums_dir_exp, base_graph_name_bytes)
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
# An alternative version could be written with the bytes returned by mptcptrace, it would then be
# nb_bytes_s2d = conn.attr[co.S2D][co.BYTES_MPTCPTRACE]
# nb_bytes_d2s = conn.attr[co.D2S][co.BYTES_MPTCPTRACE]
if co.BYTES in conn.attr[co.S2D]:
nb_bytes_s2d = conn.attr[co.S2D][co.BYTES].get(co.WIFI, 0) + conn.attr[co.S2D][co.BYTES].get(co.CELL, 0)
if co.BYTES in conn.attr[co.D2S]:
nb_bytes_d2s = conn.attr[co.D2S][co.BYTES].get(co.WIFI, 0) + conn.attr[co.D2S][co.BYTES].get(co.CELL, 0)
data_bytes['all'][co.BYTES].append(nb_bytes_s2d + nb_bytes_d2s)
data_bytes_with_dir[co.S2D]['all'][co.BYTES].append(nb_bytes_s2d)
data_bytes_with_dir[co.D2S]['all'][co.BYTES].append(nb_bytes_d2s)
co.plot_cdfs_natural(data_bytes, color, 'Bytes', base_graph_path_bytes)
co.plot_cdfs_with_direction(data_bytes_with_dir, color, 'Bytes', base_graph_path_bytes, natural=True)
def cdf_number_subflows(log_file=sys.stdout):
subflows = {'all': {'Subflows': []}}
nb_subflows = {}
color = ['red']
base_graph_name_subflows = "cdf_number_subflows"
base_graph_path_subflows = os.path.join(sums_dir_exp, base_graph_name_subflows)
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
# Make sure we have MPTCPConnections, but it should always be the case
if isinstance(conn, mptcp.MPTCPConnection):
subflows['all']['Subflows'].append(len(conn.flows))
if len(conn.flows) not in nb_subflows:
nb_subflows[len(conn.flows)] = 1
else:
nb_subflows[len(conn.flows)] += 1
elif isinstance(conn, tcp.TCPConnection):
print("WARNING: there is a TCPConnection!")
co.plot_cdfs_natural(subflows, color, '# of subflows', base_graph_path_subflows)
print(nb_subflows, file=log_file)
def count_unused_subflows(log_file=sys.stdout):
count = 0
count_total = 0
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
# Still make sure it's MPTCPConnections
if isinstance(conn, mptcp.MPTCPConnection):
for flow_id, flow in conn.flows.iteritems():
unused_subflow = True
for direction in co.DIRECTIONS:
# Count data bytes
if flow.attr[direction].get(co.BYTES_DATA, 0) > 0:
unused_subflow = False
count_total += 1
if unused_subflow:
count += 1
count_multiflow = 0
count_unused_multiflow = 0
count_unused_additional = 0
count_unused_best_avg_rtt = 0
count_multiflow_additional = 0
bytes_when_unused = []
duration_when_unused = []
for fname, conns, in multiflow_connections.iteritems():
for conn_id, conn in conns.iteritems():
# Still make sure it's MPTCPConnections
if isinstance(conn, mptcp.MPTCPConnection):
start_time = float('inf')
for flow_id, flow in conn.flows.iteritems():
start_time = min(start_time, flow.attr.get(co.START, float('inf')))
if start_time == float('inf'):
continue
for flow_id, flow in conn.flows.iteritems():
unused_subflow = True
for direction in co.DIRECTIONS:
# Count data bytes
if flow.attr[direction].get(co.BYTES_DATA, 0) > 0:
unused_subflow = False
if unused_subflow:
count_unused_multiflow += 1
count_multiflow += 1
if not start_time == flow.attr.get(co.START, float('inf')):
count_multiflow_additional += 1
if unused_subflow:
count_unused_additional += 1
min_rtt_avg = float('inf')
for fid, fl in conn.flows.iteritems():
if co.RTT_AVG in flow.attr[co.D2S]:
min_rtt_avg = min(min_rtt_avg, fl.attr[co.D2S].get(co.RTT_AVG, float('inf')))
if co.RTT_AVG in flow.attr[co.D2S] and flow.attr[co.D2S][co.RTT_AVG] == min_rtt_avg:
count_unused_best_avg_rtt += 1
bytes_when_unused.append(conn.attr[co.D2S].get(co.BYTES_MPTCPTRACE, 0))
if co.DURATION in conn.attr:
duration_when_unused.append(conn.attr[co.DURATION])
print("Number of unused subflows:", count, file=log_file)
print("Number of total subflows:", count_total, file=log_file)
print("Number of subflows in multiflow connections", count_multiflow, file=log_file)
print("Number of additional subflows in multiflow connections", count_multiflow_additional, file=log_file)
print("Number of unused subflows on multiflow connections", count_unused_multiflow, file=log_file)
print("Number of unused additional subflows on multiflow connections", count_unused_additional, file=log_file)
print("Number of unused additional subflows on multiflow connections with best RTT", count_unused_best_avg_rtt, file=log_file)
print(np.min(bytes_when_unused), np.percentile(bytes_when_unused, 50), np.mean(bytes_when_unused), np.percentile(bytes_when_unused, 75), np.percentile(bytes_when_unused, 90), np.percentile(bytes_when_unused, 95), np.percentile(bytes_when_unused, 96), np.percentile(bytes_when_unused, 97), np.percentile(bytes_when_unused, 98), np.percentile(bytes_when_unused, 99), np.max(bytes_when_unused), file=log_file)
print(np.min(duration_when_unused), np.percentile(duration_when_unused, 50), np.mean(duration_when_unused), np.percentile(duration_when_unused, 75), np.percentile(duration_when_unused, 90), np.percentile(duration_when_unused, 95), np.percentile(duration_when_unused, 96), np.percentile(duration_when_unused, 97), np.percentile(duration_when_unused, 98), np.percentile(duration_when_unused, 99), np.max(duration_when_unused), file=log_file)
def textual_summary(log_file=sys.stdout):
data = {'all': {'<1s': 0, ">=1s<10K": 0, ">=1s>=10K": 0, "<9s": 0, ">=9s<10K": 0, ">=9s>=10K": 0, '<10K': 0, '<1K': 0, '1B': 0, '2B': 0, '>=100s': 0, '<10s': 0, '>=1M': 0}}
count = {'all': {'<1s': 0, ">=1s<10K": 0, ">=1s>=10K": 0, "<9s": 0, ">=9s<10K": 0, ">=9s>=10K": 0, '<10K': 0, '<1K': 0, '1B': 0, '2B': 0, '>=100s': 0, '<10s': 0, '>=1M': 0}}
tot_count = {'all': 0.0}
total_bytes = {'all': {co.S2D: 0, co.D2S: 0}}
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
if isinstance(conn, tcp.TCPConnection):
duration = conn.flow.attr[co.DURATION]
elif isinstance(conn, mptcp.MPTCPConnection):
duration = conn.attr[co.DURATION]
nb_bytes_s2d = 0
nb_bytes_d2s = 0
# An alternative version could be written with the bytes returned by mptcptrace, it would then be
nb_bytes_s2d = conn.attr[co.S2D][co.BYTES_MPTCPTRACE]
nb_bytes_d2s = conn.attr[co.D2S][co.BYTES_MPTCPTRACE]
# if co.BYTES in conn.attr[co.S2D]:
# nb_bytes_s2d = conn.attr[co.S2D][co.BYTES].get(co.WIFI, 0) + conn.attr[co.S2D][co.BYTES].get(co.CELL, 0) + conn.attr[co.S2D][co.BYTES].get('?', 0)
# if co.BYTES in conn.attr[co.D2S]:
# nb_bytes_d2s = conn.attr[co.D2S][co.BYTES].get(co.WIFI, 0) + conn.attr[co.D2S][co.BYTES].get(co.CELL, 0) + conn.attr[co.D2S][co.BYTES].get('?', 0)
if duration < 1:
data['all']['<1s'] += nb_bytes_s2d + nb_bytes_d2s
count['all']['<1s'] += 1
else:
if nb_bytes_s2d + nb_bytes_d2s < 10000:
data['all'][">=1s<10K"] += nb_bytes_s2d + nb_bytes_d2s
count['all'][">=1s<10K"] += 1
else:
data['all'][">=1s>=10K"] += nb_bytes_s2d + nb_bytes_d2s
count['all'][">=1s>=10K"] += 1
if duration >= 100.0:
count['all']['>=100s'] += 1
data['all']['>=100s'] += nb_bytes_s2d + nb_bytes_d2s
elif duration < 10.0:
count['all']['<10s'] += 1
data['all']['<10s'] += nb_bytes_s2d + nb_bytes_d2s
if nb_bytes_s2d + nb_bytes_d2s == 2:
count['all']['2B'] += 1
data['all']['2B'] += nb_bytes_s2d + nb_bytes_d2s
elif nb_bytes_s2d + nb_bytes_d2s == 1:
count['all']['1B'] += 1
data['all']['1B'] += nb_bytes_s2d + nb_bytes_d2s
if nb_bytes_s2d + nb_bytes_d2s < 1000:
count['all']['<1K'] += 1
data['all']['<1K'] += nb_bytes_s2d + nb_bytes_d2s
if nb_bytes_s2d + nb_bytes_d2s < 10000:
count['all']['<10K'] += 1
data['all']['<10K'] += nb_bytes_s2d + nb_bytes_d2s
elif nb_bytes_s2d + nb_bytes_d2s >= 1000000:
count['all']['>=1M'] += 1
data['all']['>=1M'] += nb_bytes_s2d + nb_bytes_d2s
if duration < 9:
data['all']["<9s"] += nb_bytes_s2d + nb_bytes_d2s
count['all']["<9s"] += 1
else:
if nb_bytes_s2d + nb_bytes_d2s < 10000:
data['all'][">=9s<10K"] += nb_bytes_s2d + nb_bytes_d2s
count['all'][">=9s<10K"] += 1
else:
data['all'][">=9s>=10K"] += nb_bytes_s2d + nb_bytes_d2s
count['all'][">=9s>=10K"] += 1
tot_count['all'] += 1
total_bytes['all'][co.S2D] += nb_bytes_s2d
total_bytes['all'][co.D2S] += nb_bytes_d2s
for cond, data_cond in data.iteritems():
print(cond + " with " + str(tot_count[cond]) + "connections:", file=log_file)
print("TOTAL BYTES S2D", total_bytes['all'][co.S2D], file=log_file)
print("TOTAL BYTES D2S", total_bytes['all'][co.D2S], file=log_file)
for dur_type, value in data_cond.iteritems():
print(dur_type + " (has " + str(count[cond][dur_type]) + " with " + str(count[cond][dur_type] * 100 / (tot_count[cond] + 0.00001)) + "%): " + str(value) + " bytes (" + str(value * 100 / (total_bytes[cond][co.S2D] + total_bytes[cond][co.D2S] + 0.00001)) + "%)", file=log_file)
def count_ip_type(log_file=sys.stdout):
results = {co.IPv4: [], co.IPv6: []}
for fname, data in connections.iteritems():
for conn_id, conn in data.iteritems():
if isinstance(conn, mptcp.MPTCPConnection):
for flow_id, flow in conn.flows.iteritems():
ip_type = flow.attr[co.TYPE]
if flow.attr[co.SADDR] not in results[ip_type]:
results[ip_type].append(flow.attr[co.SADDR])
print("IPv4", file=log_file)
print(results[co.IPv4], file=log_file)
print("IPv6", file=log_file)
print(results[co.IPv6], file=log_file)
print("IPv4", len(results[co.IPv4]), "IPv6", len(results[co.IPv6]), file=log_file)
def count_packet(log_file=sys.stdout):
count = {co.S2D: 0, co.D2S: 0}
for fname, data in connections.iteritems():
for conn_id, conn in data.iteritems():
if isinstance(conn, mptcp.MPTCPConnection):
for flow_id, flow in conn.flows.iteritems():
for direction in co.DIRECTIONS:
count[direction] += flow.attr[direction].get(co.PACKS, 0)
print("NB PACKETS S2D", count[co.S2D], "NB PACKETS D2S", count[co.D2S], file=log_file)
def count_ports(log_file=sys.stdout):
count = {co.SPORT: {}, co.DPORT: {}}
for fname, data in connections.iteritems():
for conn_id, conn in data.iteritems():
if isinstance(conn, mptcp.MPTCPConnection):
for flow_id, flow in conn.flows.iteritems():
for port in [co.SPORT, co.DPORT]:
if flow.attr[port] in count[port]:
count[port][flow.attr[port]] += 1
else:
count[port][flow.attr[port]] = 1
print("PORT SOURCE", file=log_file)
print(count[co.SPORT], file=log_file)
print("PORT DEST", file=log_file)
print(count[co.DPORT], file=log_file)
def count_ports_mptcp(log_file=sys.stdout):
count_mptcp = {}
for fname, data in connections.iteritems():
for conn_id, conn in data.iteritems():
if isinstance(conn, mptcp.MPTCPConnection):
if conn.flows['0'].attr[co.DPORT] in count_mptcp:
count_mptcp[conn.flows['0'].attr[co.DPORT]] += 1
else:
count_mptcp[conn.flows['0'].attr[co.DPORT]] = 1
print("PORT DEST MPTCP", file=log_file)
print(count_mptcp, file=log_file)
def count_on_filtered(min_bytes=1000000, log_file=sys.stdout):
count_bytes = {co.S2D: 0, co.D2S: 0}
count_packs = {co.S2D: 0, co.D2S: 0}
count_conn = 0
ports = {co.SPORT: {}, co.DPORT: {}}
for fname, data in connections.iteritems():
for conn_id, conn in data.iteritems():
if isinstance(conn, mptcp.MPTCPConnection):
mptcp_bytes = conn.attr[co.S2D].get(co.BYTES_MPTCPTRACE, 0) + conn.attr[co.D2S].get(co.BYTES_MPTCPTRACE, 0)
if mptcp_bytes >= min_bytes:
count_conn += 1
for direction in co.DIRECTIONS:
count_bytes[direction] += conn.attr[direction].get(co.BYTES_MPTCPTRACE, 0)
for flow_id, flow in conn.flows.iteritems():
for port in [co.SPORT, co.DPORT]:
if flow.attr[port] in ports[port]:
ports[port][flow.attr[port]] += 1
else:
ports[port][flow.attr[port]] = 1
for direction in co.DIRECTIONS:
count_packs[direction] += flow.attr[direction].get(co.PACKS, 0)
print("NB CONN FILTERED", count_conn, file=log_file)
print("BYTES S2D FILTERED", count_bytes[co.S2D], "BYTES D2S FILTERED", count_bytes[co.D2S], file=log_file)
print("PACKS S2D FILTERED", count_packs[co.S2D], "PACKS D2S FILTERED", count_packs[co.D2S], file=log_file)
print("PORT SOURCE FILTER", file=log_file)
print(ports[co.SPORT], file=log_file)
print("PORT DEST FILTER", file=log_file)
print(ports[co.DPORT], file=log_file)
def box_plot_cellular_percentage(log_file=sys.stdout, limit_duration=0, limit_bytes=0):
base_graph_name_bytes = "summary_fraction_cellular"
base_graph_path_bytes = os.path.join(sums_dir_exp, base_graph_name_bytes)
fog_base_graph_name_bytes = "fog_cellular"
fog_base_graph_path_bytes = os.path.join(sums_dir_exp, fog_base_graph_name_bytes)
color = {'Connections': 'orange'}
data_bytes = {'all': {}}
data_frac = {'all': {}}
nb_zero = {'all': {}}
bytes_zero = {'all': {}}
nb_one = {'all': {}}
bytes_one = {'all': {}}
tot_conn = {'all': {}}
tot_bytes = {'all': {}}
for cond in data_frac:
data_frac[cond] = {co.S2D: {}, co.D2S: {}}
for cond in data_bytes:
data_bytes[cond] = {co.S2D: {}, co.D2S: {}}
for cond in nb_zero:
nb_zero[cond] = {co.S2D: {}, co.D2S: {}}
for cond in bytes_zero:
bytes_zero[cond] = {co.S2D: {}, co.D2S: {}}
for cond in nb_one:
nb_one[cond] = {co.S2D: {}, co.D2S: {}}
for cond in bytes_one:
bytes_one[cond] = {co.S2D: {}, co.D2S: {}}
for cond in tot_conn:
tot_conn[cond] = {co.S2D: {}, co.D2S: {}}
for cond in tot_bytes:
tot_bytes[cond] = {co.S2D: {}, co.D2S: {}}
for fname, data in connections.iteritems():
app = "Connections"
for conn_id, conn in data.iteritems():
if app not in data_frac['all'][co.S2D]:
for direction in data_frac['all'].keys():
data_frac['all'][direction][app] = []
data_bytes['all'][direction][app] = []
nb_zero['all'][direction][app] = 0
bytes_zero['all'][direction][app] = 0
nb_one['all'][direction][app] = 0
bytes_one['all'][direction][app] = 0
tot_conn['all'][direction][app] = 0
tot_bytes['all'][direction][app] = 0
# Only interested on MPTCP connections
if isinstance(conn, mptcp.MPTCPConnection):
if conn.attr[co.DURATION] < limit_duration:
continue
conn_bytes_s2d = {'cellular': 0, 'wifi': 0, '?': 0}
conn_bytes_d2s = {'cellular': 0, 'wifi': 0, '?': 0}
if co.BYTES in conn.attr[co.S2D]:
for interface in conn.attr[co.S2D][co.BYTES]:
conn_bytes_s2d[interface] += conn.attr[co.S2D][co.BYTES][interface]
if co.BYTES in conn.attr[co.D2S]:
for interface in conn.attr[co.D2S][co.BYTES]:
conn_bytes_d2s[interface] += conn.attr[co.D2S][co.BYTES][interface]
for flow_id, flow in conn.flows.iteritems():
if co.S2D not in flow.attr or co.D2S not in flow.attr or co.REINJ_ORIG_BYTES not in flow.attr[co.S2D] or co.REINJ_ORIG_BYTES not in flow.attr[co.D2S]:
break
interface = flow.attr[co.IF]
conn_bytes_s2d[interface] -= flow.attr[co.S2D][co.REINJ_ORIG_BYTES]
conn_bytes_d2s[interface] -= flow.attr[co.D2S][co.REINJ_ORIG_BYTES]
if conn_bytes_s2d['cellular'] + conn_bytes_s2d['wifi'] > limit_bytes:
frac_cell_s2d = (max(0.0, min(1.0, (conn_bytes_s2d['cellular'] + 0.0) / (conn_bytes_s2d['cellular'] + conn_bytes_s2d['wifi']))))
if frac_cell_s2d == 0:
nb_zero['all'][co.S2D][app] += 1
bytes_zero['all'][co.S2D][app] += conn_bytes_s2d['wifi']
elif frac_cell_s2d == 1:
nb_one['all'][co.S2D][app] += 1
bytes_one['all'][co.S2D][app] += conn_bytes_s2d['cellular']
data_frac['all'][co.S2D][app].append(frac_cell_s2d)
data_bytes['all'][co.S2D][app].append(conn_bytes_s2d['cellular'] + conn_bytes_s2d['wifi'])
tot_conn['all'][co.S2D][app] += 1
tot_bytes['all'][co.S2D][app] += conn_bytes_s2d['cellular'] + conn_bytes_s2d['wifi']
if conn_bytes_d2s['cellular'] + conn_bytes_d2s['wifi'] > limit_bytes:
frac_cell_d2s = max(0.0, min(1.0, ((conn_bytes_d2s['cellular'] + 0.0) / (conn_bytes_d2s['cellular'] + conn_bytes_d2s['wifi']))))
if frac_cell_d2s == 0:
nb_zero['all'][co.D2S][app] += 1
bytes_zero['all'][co.D2S][app] += conn_bytes_d2s['wifi']
elif frac_cell_d2s == 1:
nb_one['all'][co.D2S][app] += 1
bytes_one['all'][co.D2S][app] += conn_bytes_d2s['cellular']
data_frac['all'][co.D2S][app].append(frac_cell_d2s)
data_bytes['all'][co.D2S][app].append(conn_bytes_d2s['cellular'] + conn_bytes_d2s['wifi'])
tot_conn['all'][co.D2S][app] += 1
tot_bytes['all'][co.D2S][app] += conn_bytes_d2s['cellular'] + conn_bytes_d2s['wifi']
data_scatter = {co.S2D: {}, co.D2S: {}}
for condition in data_bytes:
for direction in data_bytes[condition]:
nb_zeros = 0.
bytes_zeros = 0.
nb_ones = 0.
bytes_ones = 0.
total_conn = 0.
total_bytes = 0.
data_scatter[direction][condition] = {}
for app in data_bytes[condition][direction]:
data_scatter[direction][condition][app] = zip(data_bytes[condition][direction][app], data_frac[condition][direction][app])
print(condition, direction, app, "NB ZERO", nb_zero[condition][direction][app], "BYTES ZERO", bytes_zero[condition][direction][app], "NB ONE", nb_one[condition][direction][app], "BYTES ONE", bytes_one[condition][direction][app], file=log_file)
nb_zeros += nb_zero[condition][direction][app]
bytes_zeros += bytes_zero[condition][direction][app]
nb_ones += nb_one[condition][direction][app]
bytes_ones += bytes_one[condition][direction][app]
total_conn += tot_conn[condition][direction][app]
total_bytes += tot_bytes[condition][direction][app]
if total_bytes > 0 and total_bytes > 0:
print("TOTAL:", nb_zeros, "zero conns over", total_conn, nb_zeros / total_conn * 100., "%", bytes_zeros, "zero bytes over", total_bytes, bytes_zeros / total_bytes * 100., "%", nb_ones, "connections full cellular", nb_ones / total_conn * 100., "%", bytes_ones, "bytes cellular", bytes_ones / total_bytes * 100., "%", file=log_file)
co.scatter_plot_with_direction(data_scatter, "Bytes on connection", "Fraction of bytes on cellular", color, sums_dir_exp, fog_base_graph_path_bytes, plot_identity=False, log_scale_y=False, y_to_one=True, label_order=['Dailymotion', 'Drive', 'Dropbox', 'Facebook', 'Firefox', 'Messenger', 'Spotify', 'Youtube'])
for cond, data_cond in data_frac.iteritems():
for direction, data_dir in data_cond.iteritems():
plt.figure()
fig, ax = plt.subplots()
apps = data_dir.keys()
to_plot = []
for app in apps:
for point in data_frac[cond][direction][app]:
to_plot.append(point)
if to_plot:
plt.hist(to_plot, bins=50)
plt.xlabel("Fraction of bytes on cellular", fontsize=18)
plt.ylabel("Number of connections", fontsize=18)
plt.xlim([0.0, 1.0])
plt.savefig(base_graph_path_bytes + "_hist_" + cond + "_" + direction + ".pdf")
plt.close()
for cond, data_cond in data_frac.iteritems():
for direction, data_dir in data_cond.iteritems():
plt.figure()
fig, ax = plt.subplots()
apps = data_dir.keys()
to_plot = []
for app in apps:
to_plot.append(data_frac[cond][direction][app])
if to_plot:
plt.boxplot(to_plot)
plt.xticks(range(1, len(apps) + 1), apps)
plt.tick_params(axis='both', which='major', labelsize=10)
plt.tick_params(axis='both', which='minor', labelsize=8)
plt.ylabel("Fraction of bytes on cellular", fontsize=18)
plt.ylim([0.0, 1.0])
plt.savefig(base_graph_path_bytes + "_" + cond + "_" + direction + ".pdf")
plt.close()
def cdf_bytes_all(log_file=sys.stdout):
base_graph_name_bytes = "cdf_bytes_all"
base_graph_path_bytes = os.path.join(sums_dir_exp, base_graph_name_bytes)
tot_bytes = {'all': {'bytes': []}}
data_frac = {'all': {}}
for cond in data_frac:
data_frac[cond] = {co.S2D: {}, co.D2S: {}}
for fname, data in connections.iteritems():
app = 'all'
for conn_id, conn in data.iteritems():
if app not in data_frac['all'][co.S2D]:
for direction in data_frac['all'].keys():
data_frac['all'][direction][app] = []
# Only interested on MPTCP connections
elif isinstance(conn, mptcp.MPTCPConnection):
conn_bytes_s2d = {'cellular': 0, 'wifi': 0, '?': 0}
conn_bytes_d2s = {'cellular': 0, 'wifi': 0, '?': 0}
if co.BYTES in conn.attr[co.S2D]:
for interface in conn.attr[co.S2D][co.BYTES]:
conn_bytes_s2d[interface] += conn.attr[co.S2D][co.BYTES][interface]
if co.BYTES in conn.attr[co.D2S]:
for interface in conn.attr[co.D2S][co.BYTES]:
conn_bytes_d2s[interface] += conn.attr[co.D2S][co.BYTES][interface]
for flow_id, flow in conn.flows.iteritems():
if co.REINJ_ORIG_BYTES not in flow.attr[co.S2D] or co.REINJ_ORIG_BYTES not in flow.attr[co.D2S]:
break
interface = flow.attr[co.IF]
conn_bytes_s2d[interface] -= flow.attr[co.S2D][co.REINJ_ORIG_BYTES]
conn_bytes_d2s[interface] -= flow.attr[co.D2S][co.REINJ_ORIG_BYTES]
tot_bytes['all']['bytes'].append(conn_bytes_s2d['cellular'] + conn_bytes_s2d['wifi'])
co.plot_cdfs_natural(tot_bytes, ['r'], "Bytes", base_graph_path_bytes)
def cdf_rtt_s2d_all(log_file=sys.stdout, min_samples=5, min_bytes=100):
wifi = "wifi"
cell = "cellular"
aggl_res = {'all': {wifi: [], cell: [], '?': []}}
graph_fname = "rtt_avg_s2d_all.pdf"
graph_full_path = os.path.join(sums_dir_exp, graph_fname)
for fname, data in connections.iteritems():
for conn_id, conn in data.iteritems():
if isinstance(conn, mptcp.MPTCPConnection):
for flow_id, flow in conn.flows.iteritems():
if co.S2D not in flow.attr or co.RTT_SAMPLES not in flow.attr[co.S2D]:
break
if flow.attr[co.S2D][co.RTT_SAMPLES] >= min_samples and flow.attr[co.S2D][co.BYTES] >= min_bytes:
aggl_res['all'][flow.attr[co.IF]] += [(flow.attr[co.S2D][co.RTT_AVG], fname)]
elif isinstance(conn, tcp.TCPConnection):
if co.S2D not in conn.flow.attr or co.RTT_SAMPLES not in conn.flow.attr[co.S2D]:
break
if conn.flow.attr[co.S2D][co.RTT_SAMPLES] >= min_samples and conn.flow.attr[co.S2D][co.BYTES] >= min_bytes:
aggl_res['all'][conn.flow.attr[co.IF]] += [(conn.flow.attr[co.S2D][co.RTT_AVG], fname)]
aggl_res['all'].pop('?', None)
co.log_outliers(aggl_res, remove=args.remove)
co.plot_cdfs_natural(aggl_res, ['red', 'blue', 'green', 'black'], 'RTT (ms)', graph_full_path)
co.plot_cdfs_natural(aggl_res, ['red', 'blue', 'green', 'black'], 'RTT (ms)', os.path.splitext(graph_full_path)[0] + '_cut.pdf', xlim=1000)
def cdf_rtt_d2s_all(log_file=sys.stdout, min_samples=5):
wifi = "wifi"
cell = "cellular"
aggl_res = {'all': {wifi: [], cell: [], '?': []}}
graph_fname = "rtt_avg_d2s_all.pdf"
graph_full_path = os.path.join(sums_dir_exp, graph_fname)
for fname, data in connections.iteritems():
for conn_id, conn in data.iteritems():
if isinstance(conn, mptcp.MPTCPConnection):
for flow_id, flow in conn.flows.iteritems():
if co.D2S not in flow.attr or co.RTT_SAMPLES not in flow.attr[co.D2S]:
break
if flow.attr[co.D2S][co.RTT_SAMPLES] >= min_samples:
aggl_res['all'][flow.attr[co.IF]] += [(flow.attr[co.D2S][co.RTT_AVG], fname)]
elif isinstance(conn, tcp.TCPConnection):
if co.D2S not in conn.flow.attr or co.RTT_SAMPLES not in conn.flow.attr[co.D2S]:
break
if conn.flow.attr[co.D2S][co.RTT_SAMPLES] >= min_samples:
aggl_res['all'][conn.flow.attr[co.IF]] += [(conn.flow.attr[co.D2S][co.RTT_AVG], fname)]
aggl_res['all'].pop('?', None)
co.log_outliers(aggl_res, remove=args.remove)
co.plot_cdfs_natural(aggl_res, ['red', 'blue', 'green', 'black'], 'RTT (ms)', os.path.splitext(graph_full_path)[0] + '.pdf')
co.plot_cdfs_natural(aggl_res, ['red', 'blue', 'green', 'black'], 'RTT (ms)', os.path.splitext(graph_full_path)[0] + '_cut.pdf', xlim=1000)
def difference_rtt_d2s(log_file=sys.stdout, min_bytes=1000000):
# Computed only on MPTCP connections with 2 subflows and at least 1MB
results = {'two_sf': {'diff': []}}
graph_fname = "rtt_avg_diff_2sf.pdf"
graph_full_path = os.path.join(sums_dir_exp, graph_fname)
for fname, data in connections.iteritems():
for conn_id, conn in data.iteritems():
if isinstance(conn, mptcp.MPTCPConnection):
if len(conn.flows) == 2:
if conn.attr[co.D2S].get(co.BYTES_MPTCPTRACE, 0) >= min_bytes:
is_ok = True
for flow_id, flow in conn.flows.iteritems():
if co.START not in flow.attr:
is_ok = False
if not is_ok:
continue
time_init_sf = float('inf')
rtt_init_sf = -1.0
rtt_second_sf = -1.0
for flow_id, flow in conn.flows.iteritems():
if flow.attr[co.START] < time_init_sf:
time_init_sf = flow.attr[co.START]
rtt_second_sf = rtt_init_sf
rtt_init_sf = flow.attr[co.D2S][co.RTT_AVG]
else:
rtt_second_sf = flow.attr[co.D2S][co.RTT_AVG]
results['two_sf']['diff'].append(rtt_init_sf - rtt_second_sf)
co.plot_cdfs_natural(results, ['red', 'blue', 'green', 'black'], 'Initial SF AVG RTT - Second SF AVG RTT', os.path.splitext(graph_full_path)[0] + '.pdf')
def reinject_plot(log_file=sys.stdout, min_bytes=0.0):
base_graph_fname = "reinject_bytes"
base_graph_full_path = os.path.join(sums_dir_exp, base_graph_fname)
results = {co.S2D: {'all': {'all': []}}, co.D2S: {'all': {'all': []}}}
results_packs = {co.S2D: {'all': {'all': []}}, co.D2S: {'all': {'all': []}}}
for fname, data in multiflow_connections.iteritems():
for conn_id, conn in data.iteritems():
reinject_bytes_s2d = 0.0
reinject_bytes_d2s = 0.0
reinject_packs_s2d = 0.0
reinject_packs_d2s = 0.0
bytes_s2d = 0.0
bytes_d2s = 0.0
packs_s2d = 0.0
packs_d2s = 0.0
for flow_id, flow in conn.flows.iteritems():
if co.S2D in flow.attr and co.D2S in flow.attr:
if co.REINJ_ORIG_BYTES in flow.attr[co.S2D] and co.REINJ_ORIG_BYTES in flow.attr[co.D2S]:
if co.BYTES in flow.attr[co.S2D]:
bytes_s2d += flow.attr[co.S2D][co.BYTES]
else:
continue
if co.BYTES in flow.attr[co.D2S]:
bytes_d2s += flow.attr[co.D2S][co.BYTES]
else:
continue
reinject_bytes_s2d += flow.attr[co.S2D][co.REINJ_ORIG_BYTES]
reinject_bytes_d2s += flow.attr[co.D2S][co.REINJ_ORIG_BYTES]
reinject_packs_s2d += flow.attr[co.S2D][co.REINJ_ORIG_PACKS]
reinject_packs_d2s += flow.attr[co.D2S][co.REINJ_ORIG_PACKS]
packs_s2d += flow.attr[co.S2D][co.PACKS]
packs_d2s += flow.attr[co.D2S][co.PACKS]
if bytes_s2d > min_bytes and packs_s2d > 0:
results[co.S2D]['all']['all'].append(reinject_bytes_s2d / bytes_s2d)
results_packs[co.S2D]['all']['all'].append(reinject_packs_s2d / packs_s2d)
if bytes_d2s > min_bytes and packs_d2s > 0:
if (reinject_bytes_d2s / bytes_d2s) >= 0.5:
print("reinj: " + str(reinject_bytes_d2s) + " tot: " + str(bytes_d2s) + " " + fname + " " + conn_id)
results[co.D2S]['all']['all'].append(reinject_bytes_d2s / bytes_d2s)
results_packs[co.D2S]['all']['all'].append(reinject_packs_d2s / packs_d2s)
for direction in results:
for condition in results[direction]:
plt.figure()
fig, ax = plt.subplots()
apps = results[direction][condition].keys()
to_plot = []
for app in apps:
to_plot.append(results[direction][condition][app])
if to_plot:
plt.boxplot(to_plot)
plt.xticks(range(1, len(apps) + 1), apps)
plt.tick_params(axis='both', which='major', labelsize=10)
plt.tick_params(axis='both', which='minor', labelsize=8)
plt.ylabel("Fraction of bytes reinjected", fontsize=18)
plt.savefig(base_graph_full_path + "_" + condition + "_" + direction + ".pdf")
plt.close()
packs_base_graph_fname = "reinject_packs"
packs_base_graph_full_path = os.path.join(sums_dir_exp, packs_base_graph_fname)
plt.figure()
fig, ax = plt.subplots()
apps = results_packs[direction][condition].keys()
to_plot = []
for app in apps:
to_plot.append(results_packs[direction][condition][app])
if to_plot:
plt.boxplot(to_plot)
plt.xticks(range(1, len(apps) + 1), apps)
plt.tick_params(axis='both', which='major', labelsize=10)
plt.tick_params(axis='both', which='minor', labelsize=8)
plt.ylabel("Fraction of packs reinjected", fontsize=18)
plt.savefig(packs_base_graph_full_path + "_" + condition + "_" + direction + ".pdf")
plt.close()
def retrans_plot(log_file=sys.stdout, min_bytes=0.0):
base_graph_fname = "retrans_bytes"
base_graph_full_path = os.path.join(sums_dir_exp, base_graph_fname)
results = {co.S2D: {'all': {'all': []}}, co.D2S: {'all': {'all': []}}}
results_packs = {co.S2D: {'all': {'all': []}}, co.D2S: {'all': {'all': []}}}
for fname, data in singleflow_connections.iteritems():
for conn_id, conn in data.iteritems():
bytes_retrans_s2d = 0.0
bytes_retrans_d2s = 0.0
packs_retrans_s2d = 0.0
packs_retrans_d2s = 0.0
bytes_s2d = 0.0
bytes_d2s = 0.0
packs_s2d = 0.0
packs_d2s = 0.0
for flow_id, flow in conn.flows.iteritems():
if co.S2D in flow.attr and co.D2S in flow.attr:
if co.BYTES_RETRANS in flow.attr[co.S2D] and co.BYTES_RETRANS in flow.attr[co.D2S]:
if co.BYTES in flow.attr[co.S2D]:
bytes_s2d += flow.attr[co.S2D][co.BYTES]
else:
continue
if co.BYTES in flow.attr[co.D2S]:
bytes_d2s += flow.attr[co.D2S][co.BYTES]
else:
continue
bytes_retrans_d2s += flow.attr[co.D2S][co.BYTES_RETRANS]
bytes_retrans_s2d += flow.attr[co.S2D][co.BYTES_RETRANS]
packs_retrans_s2d += flow.attr[co.S2D][co.PACKS_RETRANS]
packs_retrans_d2s += flow.attr[co.D2S][co.PACKS_RETRANS]
packs_s2d += flow.attr[co.S2D][co.PACKS]
packs_d2s += flow.attr[co.D2S][co.PACKS]
if bytes_s2d > min_bytes and packs_s2d > 0:
results[co.S2D]['all']['all'].append(bytes_retrans_s2d / bytes_s2d)
results_packs[co.S2D]['all']['all'].append(bytes_retrans_s2d / packs_s2d)
if bytes_d2s > min_bytes and packs_d2s > 0:
if (bytes_retrans_d2s / bytes_d2s) >= 0.5:
print("retrans: " + str(bytes_retrans_d2s) + " tot: " + str(bytes_d2s) + " " + fname + " " + conn_id)
results[co.D2S]['all']['all'].append(bytes_retrans_d2s / bytes_d2s)
results_packs[co.D2S]['all']['all'].append(bytes_retrans_d2s / packs_d2s)
for direction in results:
for condition in results[direction]:
plt.figure()
fig, ax = plt.subplots()
apps = results[direction][condition].keys()
to_plot = []
for app in apps:
to_plot.append(results[direction][condition][app])
if to_plot:
plt.boxplot(to_plot)
plt.xticks(range(1, len(apps) + 1), apps)
plt.tick_params(axis='both', which='major', labelsize=10)
plt.tick_params(axis='both', which='minor', labelsize=8)
plt.ylabel("Fraction of bytes retransmitted", fontsize=18)
plt.savefig(base_graph_full_path + "_" + condition + "_" + direction + ".pdf")
plt.close()
packs_base_graph_fname = "retrans_packs"
packs_base_graph_full_path = os.path.join(sums_dir_exp, packs_base_graph_fname)
plt.figure()
fig, ax = plt.subplots()
apps = results_packs[direction][condition].keys()
to_plot = []
for app in apps:
to_plot.append(results_packs[direction][condition][app])
if to_plot:
plt.boxplot(to_plot)
plt.xticks(range(1, len(apps) + 1), apps)
plt.tick_params(axis='both', which='major', labelsize=10)
plt.tick_params(axis='both', which='minor', labelsize=8)
plt.ylabel("Fraction of packs retransmitted", fontsize=18)
plt.savefig(packs_base_graph_full_path + "_" + condition + "_" + direction + ".pdf")
plt.close()
def reinject_plot_relative_to_data(log_file=sys.stdout, min_bytes=0.0):
base_graph_fname = "reinject_data_bytes"
base_graph_full_path = os.path.join(sums_dir_exp, base_graph_fname)
results = {co.S2D: {'all': {'all': []}}, co.D2S: {'all': {'all': []}}}
for fname, data in multiflow_connections.iteritems():
for conn_id, conn in data.iteritems():
if co.S2D not in conn.attr or co.D2S not in conn.attr:
continue
reinject_bytes_s2d = 0.0
reinject_bytes_d2s = 0.0
bytes_s2d = 0.0
bytes_d2s = 0.0
if co.BYTES_MPTCPTRACE in conn.attr[co.S2D]:
bytes_s2d = conn.attr[co.S2D][co.BYTES_MPTCPTRACE]
if co.BYTES_MPTCPTRACE in conn.attr[co.D2S]:
bytes_d2s = conn.attr[co.D2S][co.BYTES_MPTCPTRACE]
# reinject_bytes_s2d = 0
# reinject_bytes_d2s = 0
# reinject_packs_s2d = 0
# reinject_packs_d2s = 0
for flow_id, flow in conn.flows.iteritems():
if co.S2D in flow.attr and co.D2S in flow.attr:
if co.REINJ_ORIG_BYTES in flow.attr[co.S2D] and co.REINJ_ORIG_BYTES in flow.attr[co.D2S]:
reinject_bytes_s2d += flow.attr[co.S2D][co.REINJ_ORIG_BYTES]
reinject_bytes_d2s += flow.attr[co.S2D][co.REINJ_ORIG_BYTES]
if bytes_s2d > min_bytes:
results[co.S2D]['all']['all'].append(reinject_bytes_s2d / bytes_s2d)
if bytes_d2s > min_bytes:
if (reinject_bytes_d2s / bytes_d2s) >= 0.5:
print("reinj: " + str(reinject_bytes_d2s) + " tot: " + str(bytes_d2s) + " " + fname + " " + conn_id)
results[co.D2S]['all']['all'].append(reinject_bytes_d2s / bytes_d2s)
for direction in results:
for condition in results[direction]:
plt.figure()
fig, ax = plt.subplots()
apps = results[direction][condition].keys()
to_plot = []
for app in apps:
to_plot.append(results[direction][condition][app])
if to_plot:
plt.boxplot(to_plot)
plt.xticks(range(1, len(apps) + 1), apps)
plt.tick_params(axis='both', which='major', labelsize=10)
plt.tick_params(axis='both', which='minor', labelsize=8)
plt.ylabel("Fraction of bytes reinjected from all data bytes", fontsize=18)
plt.savefig(base_graph_full_path + "_" + condition + "_" + direction + ".pdf")
plt.close()
def fog_plot_cellular_percentage_rtt_wifi(log_file=sys.stdout, limit_duration=0, limit_bytes=0):
fog_base_graph_name_bytes = "fog_cellular_rtt_wifi"
fog_base_graph_path_bytes = os.path.join(sums_dir_exp, fog_base_graph_name_bytes)
color = {'Connections': 'orange'}
data_rtt = {'all': {}}
data_frac = {'all': {}}
for cond in data_frac:
data_frac[cond] = {co.S2D: {}, co.D2S: {}}
for cond in data_rtt:
data_rtt[cond] = {co.S2D: {}, co.D2S: {}}
for fname, data in connections.iteritems():
app = 'Connections'
for conn_id, conn in data.iteritems():
if app not in data_frac['all'][co.S2D]:
for direction in data_frac['all']:
data_frac['all'][direction][app] = []
data_rtt['all'][direction][app] = []
# Only interested on MPTCP connections
if isinstance(conn, mptcp.MPTCPConnection):
if conn.attr[co.DURATION] < limit_duration:
continue
conn_bytes_s2d = {'cellular': 0, 'wifi': 0, '?': 0}
conn_bytes_d2s = {'cellular': 0, 'wifi': 0, '?': 0}
rtt_max_wifi_s2d = None
rtt_max_wifi_d2s = None
if co.BYTES in conn.attr[co.S2D]:
for interface in conn.attr[co.S2D][co.BYTES]:
conn_bytes_s2d[interface] += conn.attr[co.S2D][co.BYTES][interface]
if co.BYTES in conn.attr[co.D2S]:
for interface in conn.attr[co.D2S][co.BYTES]:
conn_bytes_d2s[interface] += conn.attr[co.D2S][co.BYTES][interface]
for flow_id, flow in conn.flows.iteritems():
if co.S2D not in flow.attr or co.D2S not in flow.attr:
continue
if co.REINJ_ORIG_BYTES not in flow.attr[co.S2D] or co.REINJ_ORIG_BYTES not in flow.attr[co.D2S]:
break
interface = flow.attr[co.IF]
conn_bytes_s2d[interface] -= flow.attr[co.S2D][co.REINJ_ORIG_BYTES]
conn_bytes_d2s[interface] -= flow.attr[co.D2S][co.REINJ_ORIG_BYTES]
if interface == co.WIFI:
if co.RTT_MAX in flow.attr[co.S2D]:
rtt_max_wifi_s2d = flow.attr[co.S2D][co.RTT_MAX]
if co.RTT_MAX in flow.attr[co.D2S]:
rtt_max_wifi_d2s = flow.attr[co.D2S][co.RTT_MAX]
if conn_bytes_s2d['cellular'] + conn_bytes_s2d['wifi'] > limit_bytes:
# if (conn_bytes_s2d['cellular'] + 0.0) / (conn_bytes_s2d['cellular'] + conn_bytes_s2d['wifi']) > 0.6:
# print("S2D: " + str((conn_bytes_s2d['cellular'] + 0.0) / (conn_bytes_s2d['cellular'] + conn_bytes_s2d['wifi'])) + " " + str(conn_bytes_s2d['cellular']) + " " + str(conn_bytes_s2d['wifi']) + " " + fname + " " + conn_id + " " + str(conn.attr[co.DURATION]) + " " + conn.flows['0'].attr[co.IF] + " " + str(conn.flows['0'].attr[co.S2D][co.RTT_STDEV]) + " " + conn.flows['1'].attr[co.IF] + " " + str(conn.flows['1'].attr[co.S2D][co.RTT_STDEV]))
frac_cell_s2d = (min(1.0, (conn_bytes_s2d['cellular'] + 0.0) / (conn_bytes_s2d['cellular'] + conn_bytes_s2d['wifi'])))
data_frac['all'][co.S2D][app].append(frac_cell_s2d)
data_rtt['all'][co.S2D][app].append(rtt_max_wifi_s2d)
if conn_bytes_d2s['cellular'] + conn_bytes_d2s['wifi'] > limit_bytes:
# if (conn_bytes_d2s['cellular'] + 0.0) / (conn_bytes_d2s['cellular'] + conn_bytes_d2s['wifi']) > 0.6:
# print("D2S: " + str((conn_bytes_d2s['cellular'] + 0.0) / (conn_bytes_d2s['cellular'] + conn_bytes_d2s['wifi'])) + " " + str(conn_bytes_d2s['cellular']) + " " + str(conn_bytes_d2s['wifi']) + " " + fname + " " + conn_id + " " + str(conn.attr[co.DURATION]) + " " + conn.flows['0'].attr[co.IF] + " " + str(conn.flows['0'].attr[co.D2S][co.RTT_STDEV]) + " " + conn.flows['1'].attr[co.IF] + " " + str(conn.flows['1'].attr[co.D2S][co.RTT_STDEV]))
frac_cell_d2s = min(1.0, ((conn_bytes_d2s['cellular'] + 0.0) / (conn_bytes_d2s['cellular'] + conn_bytes_d2s['wifi'])))
data_frac['all'][co.D2S][app].append(frac_cell_d2s)
data_rtt['all'][co.D2S][app].append(rtt_max_wifi_d2s)
data_scatter = {co.S2D: {}, co.D2S: {}}
for condition in data_rtt:
for direction in data_rtt[condition]:
data_scatter[direction][condition] = {}
for app in data_rtt[condition][direction]:
data_scatter[direction][condition][app] = zip(data_rtt[condition][direction][app], data_frac[condition][direction][app])
co.scatter_plot_with_direction(data_scatter, "Max RTT on Wi-Fi (ms)", "Fraction of bytes on cellular", color, sums_dir_exp, fog_base_graph_path_bytes, plot_identity=False, log_scale_y=False, log_scale_x=False)
def check_ok(value):
if value < 0 or value >= 100000000:
return 0
return value
def textual_summary_global(log_file=sys.stdout):
conn_number = {'all': 0}
tests_number = {'all': 0}
bytes_number = {'all': {co.S2D: {co.CELL: 0, co.WIFI: 0, '?': 0}, co.D2S: {co.CELL: 0, co.WIFI: 0, '?': 0}}}
for fname, data in connections.iteritems():
conn_number['all'] += len(data)
tests_number['all'] += 1
for conn_id, conn in data.iteritems():
if isinstance(conn, tcp.TCPConnection):
ith = conn.flow.attr[co.IF]
if ith not in bytes_number['all'][co.S2D]:
continue
for direction in co.DIRECTIONS:
bytes_number['all'][direction][ith] += check_ok(conn.flow.attr[direction][co.BYTES])
elif isinstance(conn, mptcp.MPTCPConnection):
for flow_id, flow in conn.flows.iteritems():
ith = flow.attr[co.IF]
for direction in co.DIRECTIONS:
reinjected = 0
if direction in flow.attr:
if co.REINJ_ORIG in flow.attr[direction]:
for start_seq, stop_seq in flow.attr[direction][co.REINJ_ORIG]:
reinjected += stop_seq - start_seq
if co.BYTES in flow.attr[direction]:
bytes_number['all'][direction][ith] += check_ok(flow.attr[direction][co.BYTES] - reinjected)
total = 0
total_tests = 0
total_s2d = 0
total_d2s = 0
for cond, cond_num in conn_number.iteritems():
print(cond + ": ", cond_num, " connections with ", tests_number[cond], " tests;", file=log_file)
total += cond_num
total_tests += tests_number[cond]
for direction in co.DIRECTIONS:
ratio = (bytes_number[cond][direction][co.CELL] + 0.0) / (bytes_number[cond][direction][co.WIFI] + bytes_number[cond][direction][co.CELL]) * 100 if bytes_number[cond][direction][co.WIFI] + bytes_number[cond][direction][co.CELL] > 0 else 0
print(direction, bytes_number[cond][direction][co.CELL], " bytes cell and ", bytes_number[cond][direction][co.WIFI], "bytes wifi (", ratio, "% cell)", file=log_file)
for ith in [co.WIFI, co.CELL, '?']:
total_s2d += bytes_number[cond][co.S2D][ith]
total_d2s += bytes_number[cond][co.D2S][ith]
total_ratio = (total_s2d + 0.0) / (total_s2d + total_d2s) * 100 if total_s2d + total_d2s > 0 else 0
print("Total: " + str(total) + " connections with " + str(total_tests) + " tests; " + str(total_s2d) + " bytes S2D and " + str(total_d2s) + " D2S (" + str(total_ratio) + " % s2d)", file=log_file)
def cdf_overhead_retrans_reinj(log_file=sys.stdout):
results = {co.S2D: {'all': {'Reinjection': [], 'Retransmission': []}}, co.D2S: {'all': {'Reinjection': [], 'Retransmission': []}}}
results_two = {co.S2D: {'all': []}, co.D2S: {'all': []}}
graph_fname = "overhead_retrans_reinj_multiflow.pdf"
graph_full_path = os.path.join(sums_dir_exp, graph_fname)
count_conn = {co.S2D: 0, co.D2S: 0}
count_reinj = {co.S2D: 0, co.D2S: 0}
count_reinj_20 = {co.S2D: 0, co.D2S: 0}
count_retrans_50 = {co.S2D: 0, co.D2S: 0}
count_retrans = {co.S2D: 0, co.D2S: 0}
for fname, data in multiflow_connections.iteritems():
for conn_id, conn in data.iteritems():
retrans_bytes = {co.S2D: 0, co.D2S: 0}
reinj_bytes = {co.S2D: 0, co.D2S: 0}
total_bytes = {co.S2D: 0, co.D2S: 0}
total_data_bytes = {co.S2D: 0, co.D2S: 0}
reinj_data_bytes = {co.S2D: 0, co.D2S: 0}
for flow_id, flow in conn.flows.iteritems():
for direction in co.DIRECTIONS:
if direction not in flow.attr:
continue
if co.BYTES in flow.attr[direction]:
# total_bytes[direction] += flow.attr[direction][co.BYTES_FRAMES_TOTAL]
total_bytes[direction] = total_bytes[direction] + flow.attr[direction][co.BYTES]
# retrans_bytes[direction] += flow.attr[direction].get(co.BYTES_FRAMES_RETRANS, 0)
retrans_bytes[direction] = retrans_bytes[direction] + flow.attr[direction].get(co.BYTES_RETRANS, 0)
# reinj_bytes[direction] += flow.attr[direction].get(co.REINJ_ORIG_BYTES, 0) + (flow.attr[direction].get(co.REINJ_ORIG_PACKS, 0) * co.FRAME_MPTCP_OVERHEAD)
reinj_bytes[direction] = reinj_bytes[direction] + flow.attr[direction].get(co.REINJ_ORIG_BYTES, 0)
total_data_bytes[direction] = total_data_bytes[direction] + flow.attr[direction].get(co.BYTES, 0)
reinj_data_bytes[direction] = reinj_data_bytes[direction] + flow.attr[direction].get(co.REINJ_ORIG_BYTES, 0)
for direction in co.DIRECTIONS:
if total_bytes[direction] > 0:
count_conn[direction] += 1
results[direction]['all']['Retransmission'].append((retrans_bytes[direction] + 0.0) / total_data_bytes[direction])
if (retrans_bytes[direction] + 0.0) / total_data_bytes[direction] > 0.0:
count_retrans[direction] += 1
if (retrans_bytes[direction] + 0.0) / total_data_bytes[direction] >= 0.5:
count_retrans_50[direction] += 1
results[direction]['all']['Reinjection'].append((reinj_data_bytes[direction] + 0.0) / total_data_bytes[direction])
if (reinj_data_bytes[direction] + 0.0) / total_data_bytes[direction] > 0.0:
count_reinj[direction] += 1
if (reinj_data_bytes[direction] + 0.0) / total_data_bytes[direction] >= 0.2:
count_reinj_20[direction] += 1
results_two[direction]['all'].append([total_data_bytes[direction], reinj_data_bytes[direction]])
for direction in co.DIRECTIONS:
print("COUNT FOR DIRECTION", direction, file=log_file)
print("TOTAL", count_conn[direction], file=log_file)
print("REINJ", count_reinj[direction], file=log_file)
print("REINJ 20", count_reinj_20[direction], file=log_file)
print("RETRA", count_retrans[direction], file=log_file)
print("RETRA 50", count_retrans_50[direction], file=log_file)
co.plot_cdfs_with_direction(results, ['red', 'blue'], 'Fraction of total bytes', graph_full_path, natural=True, ylim=0.8)
co.plot_cdfs_with_direction(results, ['red', 'blue'], 'Fraction of total bytes', os.path.splitext(graph_full_path)[0] + '_cut.pdf', natural=True, ylim=0.8, xlim=1)
for direction in results_two:
for condition in results_two[direction]:
sorted_data = sorted(results_two[direction][condition], key=lambda elem: elem[0])
to_plot = [[], []]
i = 0
for point in sorted_data:
to_plot[0].append([i, point[0]])
to_plot[1].append([i, point[1]])
i += 1
tot_graph_full_path = os.path.splitext(graph_full_path)[0] + "_details_" + direction + "_" + condition + ".pdf"
co.plot_line_graph(to_plot, ['Total', 'Reinjections'], ['b', 'r'], 'Connections', 'Number of data bytes', '', tot_graph_full_path, y_log=True)
def list_bytes_all(log_file=sys.stdout):
graph_fname = "list_bytes.pdf"
graph_full_path = os.path.join(sums_dir_exp, graph_fname)
results_two = {'both': {'all': []}}
for fname, data in connections.iteritems():
for conn_id, conn in data.iteritems():
total_data_bytes = 0
for direction in co.DIRECTIONS:
if direction not in conn.attr:
continue
if conn.attr[direction].get(co.BYTES_MPTCPTRACE, 0) > 0:
total_data_bytes += conn.attr[direction].get(co.BYTES_MPTCPTRACE, 0)
results_two['both']['all'].append(total_data_bytes)
for direction in results_two:
for condition in results_two[direction]:
sorted_data = sorted(results_two[direction][condition])
to_plot = [[]]
i = 0
for point in sorted_data:
to_plot[0].append([i, point])
i += 1
tot_graph_full_path = os.path.splitext(graph_full_path)[0] + "_" + direction + "_" + condition + ".pdf"
co.plot_line_graph(to_plot, ['Total'], ['b'], 'Connections', 'Number of data bytes', '', tot_graph_full_path, y_log=True)
def cdf_overhead_retrans_reinj_singleflow(log_file=sys.stdout):
results = {co.S2D: {'all': {'Reinjection': [], 'Retransmission': []}}, co.D2S: {'all': {'Reinjection': [], 'Retransmission': []}}}
results_two = {co.S2D: {'all': []}, co.D2S: {'all': []}}
graph_fname = "overhead_retrans_reinj_singleflow.pdf"
graph_full_path = os.path.join(sums_dir_exp, graph_fname)
for fname, data in singleflow_connections.iteritems():
for conn_id, conn in data.iteritems():
retrans_bytes = {co.S2D: 0, co.D2S: 0}
reinj_bytes = {co.S2D: 0, co.D2S: 0}
total_bytes = {co.S2D: 0, co.D2S: 0}
total_data_bytes = {co.S2D: 0, co.D2S: 0}
reinj_data_bytes = {co.S2D: 0, co.D2S: 0}
for flow_id, flow in conn.flows.iteritems():
for direction in co.DIRECTIONS:
if direction not in flow.attr:
continue
if co.BYTES in flow.attr[direction]:
# total_bytes[direction] += flow.attr[direction][co.BYTES_FRAMES_TOTAL]
total_bytes[direction] = total_bytes[direction] + flow.attr[direction][co.BYTES]
# retrans_bytes[direction] += flow.attr[direction].get(co.BYTES_FRAMES_RETRANS, 0)
retrans_bytes[direction] = retrans_bytes[direction] + flow.attr[direction].get(co.BYTES_RETRANS, 0)
# reinj_bytes[direction] += flow.attr[direction].get(co.REINJ_ORIG_BYTES, 0) + (flow.attr[direction].get(co.REINJ_ORIG_PACKS, 0) * co.FRAME_MPTCP_OVERHEAD)
reinj_bytes[direction] = reinj_bytes[direction] + flow.attr[direction].get(co.REINJ_ORIG_BYTES, 0)
total_data_bytes[direction] = total_data_bytes[direction] + flow.attr[direction].get(co.BYTES, 0)
reinj_data_bytes[direction] = reinj_data_bytes[direction] + flow.attr[direction].get(co.REINJ_ORIG_BYTES, 0)
for direction in co.DIRECTIONS:
if total_bytes[direction] > 0:
results[direction]['all']['Retransmission'].append((retrans_bytes[direction] + 0.0) / total_data_bytes[direction])
results[direction]['all']['Reinjection'].append((reinj_data_bytes[direction] + 0.0) / total_data_bytes[direction])
results_two[direction]['all'].append([total_data_bytes[direction], retrans_bytes[direction]])
co.plot_cdfs_with_direction(results, ['red', 'blue'], 'Fraction of total bytes', graph_full_path, natural=True, ylim=0.8)
co.plot_cdfs_with_direction(results, ['red', 'blue'], 'Fraction of total bytes', os.path.splitext(graph_full_path)[0] + '_cut.pdf', natural=True, ylim=0.8, xlim=1)
for direction in results_two:
for condition in results_two[direction]:
sorted_data = sorted(results_two[direction][condition], key=lambda elem: elem[0])
to_plot = [[], []]
i = 0
for point in sorted_data:
to_plot[0].append([i, point[0]])
to_plot[1].append([i, point[1]])
i += 1
tot_graph_full_path = os.path.splitext(graph_full_path)[0] + "_details_" + direction + "_" + condition + ".pdf"
co.plot_line_graph(to_plot, ['Total', 'Retransmissions'], ['b', 'r'], 'Connections', 'Number of data bytes', '', tot_graph_full_path, y_log=True)
def plot_total_bytes_reinj_bytes(log_file=sys.stdout):
results = {co.S2D: {'all': [[], []]}, co.D2S: {'all': [[], []]}}
results_raw = {co.S2D: {'all': []}, co.D2S: {'all': []}}
graph_fname = "total_bytes_reinj_bytes"
graph_full_path = os.path.join(sums_dir_exp, graph_fname)
for fname, data in connections.iteritems():
for conn_id, conn in data.iteritems():
if isinstance(conn, mptcp.MPTCPConnection):
reinj_bytes = {co.S2D: 0, co.D2S: 0}
total_bytes = {co.S2D: 0, co.D2S: 0}
for flow_id, flow in conn.flows.iteritems():
for direction in co.DIRECTIONS:
if direction not in flow.attr:
continue
if co.BYTES in flow.attr[direction]:
total_bytes[direction] += flow.attr[direction][co.BYTES]
reinj_bytes[direction] += flow.attr[direction].get(co.REINJ_ORIG_BYTES, 0)
for direction in co.DIRECTIONS:
results_raw[direction]['all'].append([total_bytes[direction], reinj_bytes[direction]])
for direction in results_raw:
for condition in results_raw[direction]:
results_raw[direction][condition] = sorted(results_raw[direction][condition], key=lambda elem: elem[0])
i = 0
for point in results_raw[direction][condition]:
results[direction][condition][0].append([i, point[0]])
results[direction][condition][1].append([i, point[1]])
i += 1
tot_graph_full_path = graph_full_path + "_" + direction + "_" + condition + ".pdf"
co.plot_line_graph(results[direction][condition], ['Total', 'Reinjections'], ['b', 'r'], 'Connections', 'Number of data bytes', '', tot_graph_full_path, y_log=True)
def fog_plot_cellular_percentage_all(log_file=sys.stdout, limit_duration=0, limit_bytes=0):
fog_base_graph_name_bytes = "fog_cellular_all"
fog_base_graph_path_bytes = os.path.join(sums_dir_exp, fog_base_graph_name_bytes)
color = {'Connections': 'orange'}
data_frac = {'all': {}}
data_bytes = {'all': {}}
for fname, data in connections.iteritems():
app = 'Connections'
for conn_id, conn in data.iteritems():
if app not in data_frac['all']:
data_frac['all'][app] = []
data_bytes['all'][app] = []
# Only interested on MPTCP connections
if isinstance(conn, mptcp.MPTCPConnection):
if conn.attr[co.DURATION] < limit_duration:
continue
conn_bytes = {'cellular': 0, 'wifi': 0, '?': 0}
if co.BYTES in conn.attr[co.S2D]:
for interface in conn.attr[co.S2D][co.BYTES]:
conn_bytes[interface] += conn.attr[co.S2D][co.BYTES][interface]
if co.BYTES in conn.attr[co.D2S]:
for interface in conn.attr[co.D2S][co.BYTES]:
conn_bytes[interface] += conn.attr[co.D2S][co.BYTES][interface]
for flow_id, flow in conn.flows.iteritems():
if co.S2D not in flow.attr or co.D2S not in flow.attr or co.REINJ_ORIG_BYTES not in flow.attr[co.S2D] or co.REINJ_ORIG_BYTES not in flow.attr[co.D2S]:
break
interface = flow.attr[co.IF]
conn_bytes[interface] -= flow.attr[co.S2D][co.REINJ_ORIG_BYTES]
conn_bytes[interface] -= flow.attr[co.D2S][co.REINJ_ORIG_BYTES]
if conn_bytes['cellular'] + conn_bytes['wifi'] > limit_bytes:
frac_cell = (max(0.0, min(1.0, (conn_bytes['cellular'] + 0.0) / (conn_bytes['cellular'] + conn_bytes['wifi']))))
data_frac['all'][app].append(frac_cell)
data_bytes['all'][app].append(conn_bytes['cellular'] + conn_bytes['wifi'])
data_scatter = {}
for condition in data_bytes:
data_scatter[condition] = {}
for app in data_bytes[condition]:
data_scatter[condition][app] = zip(data_bytes[condition][app], data_frac[condition][app])
co.scatter_plot(data_scatter, "Bytes on connection", "Fraction of bytes on cellular", color, sums_dir_exp, fog_base_graph_path_bytes, plot_identity=False, log_scale_y=False, y_to_one=True, label_order=['Dailymotion', 'Drive', 'Dropbox', 'Facebook', 'Firefox', 'Messenger', 'Spotify', 'Youtube'])
def count_mptcp_best_rtt_flow(log_file=sys.stdout):
wifi_best_avg_rtt = {'all': {co.S2D: 0, co.D2S: 0}}
cell_best_avg_rtt = {'all': {co.S2D: 0, co.D2S: 0}}
wifi_best_max_rtt = {'all': {co.S2D: 0, co.D2S: 0}}
cell_best_max_rtt = {'all': {co.S2D: 0, co.D2S: 0}}
for fname, data in connections.iteritems():
for conn_id, conn in data.iteritems():
# Only interested on MPTCP connections
if isinstance(conn, mptcp.MPTCPConnection):
for direction in co.DIRECTIONS:
avg_rtt = {co.WIFI: 100000., co.CELL: 100000.}
max_rtt = {co.WIFI: 100000., co.CELL: 100000.}
for flow_id, flow in conn.flows.iteritems():
if direction not in flow.attr or co.RTT_AVG not in flow.attr[direction] or co.RTT_MAX not in flow.attr[direction]:
continue
avg_rtt[flow.attr[co.IF]] = flow.attr[direction][co.RTT_AVG]
max_rtt[flow.attr[co.IF]] = flow.attr[direction][co.RTT_MAX]
if avg_rtt[co.WIFI] == 100000. and avg_rtt[co.CELL] == 100000.:
continue
if avg_rtt[co.WIFI] <= avg_rtt[co.CELL]:
wifi_best_avg_rtt['all'][direction] += 1
else:
cell_best_avg_rtt['all'][direction] += 1
if max_rtt[co.WIFI] <= max_rtt[co.CELL]:
wifi_best_max_rtt['all'][direction] += 1
else:
cell_best_max_rtt['all'][direction] += 1
for condition in wifi_best_avg_rtt:
for direction in co.DIRECTIONS:
print(condition, direction, (wifi_best_avg_rtt[condition][direction] + 0.) / (wifi_best_avg_rtt[condition][direction] + cell_best_avg_rtt[condition][direction]) * 100, "% where WiFi is better on average", (wifi_best_max_rtt[condition][direction] + 0.0) / (wifi_best_max_rtt[condition][direction] + cell_best_max_rtt[condition][direction]) * 100 , "% where WiFi is better on max", file=log_file)
def time_reinjection(log_file=sys.stdout):
location_time = {co.S2D: {'all': {co.REINJ_ORIG_TIMESTAMP: []}}, co.D2S: {'all': {co.REINJ_ORIG_TIMESTAMP: []}}}
location_time_nocorrect = {co.S2D: {'all': {co.REINJ_ORIG_TIMESTAMP: []}}, co.D2S: {'all': {co.REINJ_ORIG_TIMESTAMP: []}}}
reinj_first_sec = []
color = ['red']
graph_fname = "time_reinjection"
base_graph_path = os.path.join(sums_dir_exp, graph_fname)
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
# We never know, still check
if isinstance(conn, mptcp.MPTCPConnection):
start_time = float('inf')
duration = conn.attr[co.DURATION]
if duration <= 0.001:
continue
start_time = conn.attr.get(co.START, float('inf'))
if start_time == float('inf'):
continue
start_time_int = int(start_time)
start_time_dec = float(str(start_time - start_time_int)[1:])
start_time_dec = ceil(start_time_dec * 1000000) / 1000000.0
warning_reinj = open(os.path.join(sums_dir_exp, 'warning_reinj.txt'), 'w')
look_95 = open(os.path.join(sums_dir_exp, 'look95.txt'), 'w')
look_100 = open(os.path.join(sums_dir_exp, 'look100.txt'), 'w')
for direction in [co.D2S]:
for flow_id, flow in conn.flows.iteritems():
if co.REINJ_ORIG_TIMESTAMP in flow.attr[direction] and co.START in flow.attr:
for ts in flow.attr[direction][co.REINJ_ORIG_TIMESTAMP]:
# Some tricks to avoid floating errors
ts_int = int(ts)
ts_dec = float(str(ts - ts_int)[1:])
ts_dec = ceil(ts_dec * 1000000) / 1000000.0
ts_dec_delta = ts_dec - start_time_dec
ts_fix = ts_int - start_time_int + ts_dec_delta
location_time[direction]['all'][co.REINJ_ORIG_TIMESTAMP].append(max(min(ts_fix / duration, 1.0), 0.0))
location_time_nocorrect[direction]['all'][co.REINJ_ORIG_TIMESTAMP].append(ts_fix / duration)
if direction == co.D2S and ts_fix / duration < 0.0 or ts_fix / duration > 1.0:
print(fname, conn_id, flow_id, ts_fix / duration, ts, start_time, ts_fix, duration, file=warning_reinj)
if direction == co.D2S and ts_fix <= 1.0:
reinj_first_sec.append((conn_id, flow_id))
if direction == co.D2S and ts_fix / duration >= 0.92 and ts_fix / duration <= 0.97:
print(fname, conn_id, flow_id, ts_fix / duration, ts, start_time, ts_fix, duration, file=look_95)
if direction == co.D2S and ts_fix / duration >= 0.99:
print(fname, conn_id, flow_id, ts_fix / duration, ts, start_time, ts_fix, duration, file=look_100)
co.plot_cdfs_with_direction(location_time, color, 'Fraction of connection duration', base_graph_path, natural=True)
co.plot_cdfs_with_direction(location_time_nocorrect, color, 'Fraction of connection duration', base_graph_path + '_nocorrect', natural=True)
print(reinj_first_sec, file=log_file)
print(len(reinj_first_sec), "reinjections in 1 second", file=log_file)
warning_reinj.close()
look_95.close()
look_100.close()
def time_retransmission(log_file=sys.stdout):
location_time = {co.S2D: {'all': {co.TIMESTAMP_RETRANS: []}}, co.D2S: {'all': {co.TIMESTAMP_RETRANS: []}}}
location_time_no_correct = {co.S2D: {'all': {co.TIMESTAMP_RETRANS: []}}, co.D2S: {'all': {co.TIMESTAMP_RETRANS: []}}}
color = ['red']
graph_fname = "time_retransmission"
base_graph_path = os.path.join(sums_dir_exp, graph_fname)
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
# We never know, still check
if isinstance(conn, mptcp.MPTCPConnection):
start_time = float('inf')
duration = conn.attr[co.DURATION]
if duration <= 0.001:
continue
start_time = conn.attr.get(co.START, float('inf'))
for direction in co.DIRECTIONS:
for flow_id, flow in conn.flows.iteritems():
if co.TIMESTAMP_RETRANS in flow.attr[direction] and co.START in flow.attr:
start_flow_time = flow.attr[co.START]
time_diff = start_flow_time - start_time
for ts in flow.attr[direction][co.TIMESTAMP_RETRANS]:
location_time[direction]['all'][co.TIMESTAMP_RETRANS].append(min((ts + time_diff) / duration, 1.0))
location_time_no_correct[direction]['all'][co.TIMESTAMP_RETRANS].append((ts + time_diff) / duration)
if direction == co.D2S and (ts + time_diff) / duration < 0.0 or (ts + time_diff) / duration > 1.0:
print("WARNING retrans", fname, conn_id, flow_id, ts / duration, file=log_file)
co.plot_cdfs_with_direction(location_time, color, 'Fraction of connection duration', base_graph_path, natural=True, xlim=1.0)
co.plot_cdfs_with_direction(location_time, color, 'Fraction of connection duration', base_graph_path + '_nocorrect', natural=True)
def merge_time_reinjection_retransmission(log_file=sys.stdout):
location_time = {co.S2D: {'all': {"Reinjections": [], "Retransmissions": []}}, co.D2S: {'all': {"Reinjections": [], "Retransmissions": []}}}
location_time_nocorrect = {co.S2D: {'all': {"Reinjections": [], "Retransmissions": []}}, co.D2S: {'all': {"Reinjections": [], "Retransmissions": []}}}
reinj_first_sec = []
color = ['red', 'blue']
graph_fname = "merge_time_reinjection_retranmission"
base_graph_path = os.path.join(sums_dir_exp, graph_fname)
count_duration = {co.S2D: 0, co.D2S: 0}
count_low_duration = {co.S2D: 0, co.D2S: 0}
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
# We never know, still check
if isinstance(conn, mptcp.MPTCPConnection):
start_time = float('inf')
duration = conn.attr[co.DURATION]
if duration <= 0.001:
continue
start_time = conn.attr.get(co.START, float('inf'))
if start_time == float('inf'):
continue
start_time_int = int(start_time)
start_time_dec = float(str(start_time - start_time_int)[1:])
start_time_dec = ceil(start_time_dec * 1000000) / 1000000.0
warning_reinj = open(os.path.join(sums_dir_exp, 'warning_reinj.txt'), 'w')
look_95 = open(os.path.join(sums_dir_exp, 'look95.txt'), 'w')
look_100 = open(os.path.join(sums_dir_exp, 'look100.txt'), 'w')
warning_retrans = open(os.path.join(sums_dir_exp, 'warning_retrans.txt'), 'w')
for direction in [co.D2S]:
for flow_id, flow in conn.flows.iteritems():
if co.REINJ_ORIG_TIMESTAMP in flow.attr[direction] and co.START in flow.attr:
for ts in flow.attr[direction][co.REINJ_ORIG_TIMESTAMP]:
# Some tricks to avoid floating errors
ts_int = int(ts)
ts_dec = float(str(ts - ts_int)[1:])
ts_dec = ceil(ts_dec * 1000000) / 1000000.0
ts_dec_delta = ts_dec - start_time_dec
ts_fix = ts_int - start_time_int + ts_dec_delta
location_time[direction]['all']["Reinjections"].append(max(min(ts_fix / duration, 1.0), 0.0))
location_time_nocorrect[direction]['all']["Reinjections"].append(ts_fix / duration)
if direction == co.D2S and ts_fix / duration < 0.0 or ts_fix / duration > 1.0:
print(fname, conn_id, flow_id, ts_fix / duration, ts, start_time, ts_fix, duration, file=warning_reinj)
if direction == co.D2S and ts_fix <= 1.0:
reinj_first_sec.append((conn_id, flow_id))
if direction == co.D2S and ts_fix / duration >= 0.92 and ts_fix / duration <= 0.97:
print(fname, conn_id, flow_id, ts_fix / duration, ts, start_time, ts_fix, duration, file=look_95)
if direction == co.D2S and ts_fix / duration >= 0.99:
print("LOOK 100", fname, conn_id, flow_id, ts_fix / duration, ts, start_time, ts_fix, duration, file=log_file)
start_time = float('inf')
duration = conn.attr[co.DURATION]
if duration <= 0.001:
continue
start_time = conn.attr.get(co.START, float('inf'))
for direction in co.DIRECTIONS:
for flow_id, flow in conn.flows.iteritems():
if co.TIMESTAMP_RETRANS in flow.attr[direction] and co.START in flow.attr:
start_flow_time = flow.attr[co.START]
time_diff = start_flow_time - start_time
for ts in flow.attr[direction][co.TIMESTAMP_RETRANS]:
location_time[direction]['all']["Retransmissions"].append(max(min((ts + time_diff) / duration, 1.0), 0.0))
location_time_nocorrect[direction]['all']["Retransmissions"].append((ts + time_diff) / duration)
if direction == co.D2S and (ts + time_diff) / duration >= 0.99:
print("LOOK RETRANS", fname, conn_id, flow_id, duration, (ts + time_diff) / duration, file=log_file)
count_duration[direction] += 1
if duration < 3.0:
count_low_duration[direction] += 1
# if direction == co.D2S and (ts + time_diff) / duration < 0.0 or (ts + time_diff) / duration > 1.0:
# print(fname, conn_id, flow_id, ts / duration, file=warning_retrans)
co.plot_cdfs_with_direction(location_time, color, 'Fraction of connection duration', base_graph_path, natural=True)
co.plot_cdfs_with_direction(location_time_nocorrect, color, 'Fraction of connection duration', base_graph_path + '_nocorrect', natural=True)
print(reinj_first_sec, file=log_file)
print(len(reinj_first_sec), "reinjections in 1 second", file=log_file)
warning_reinj.close()
look_95.close()
look_100.close()
warning_retrans.close()
for direction in co.DIRECTIONS:
print("DURATION", count_duration[direction], count_low_duration[direction], file=log_file)
def total_retrans_reinj(log_file=sys.stdout):
reinject = {co.S2D: 0, co.D2S: 0}
reinject_packs = {co.S2D: 0, co.D2S: 0}
retrans = {co.S2D: 0, co.D2S: 0}
retrans_packs = {co.S2D: 0, co.D2S: 0}
reinject_list = {}
for fname, conns in connections.iteritems():
reinject_list[fname] = []
for conn_id, conn in conns.iteritems():
# We never know, still check
if isinstance(conn, mptcp.MPTCPConnection):
for flow_id, flow in conn.flows.iteritems():
added = False
for direction in co.DIRECTIONS:
if direction in flow.attr:
if flow.attr[direction].get(co.REINJ_ORIG_BYTES, 0) > 0:
if not added:
reinject_list[fname].append(conn_id)
added = True
reinject[direction] += flow.attr[direction].get(co.REINJ_ORIG_BYTES, 0)
reinject_packs[direction] += flow.attr[direction].get(co.REINJ_ORIG_PACKS, 0)
if flow.attr[direction].get(co.BYTES_RETRANS, 0) > 0:
retrans[direction] += flow.attr[direction].get(co.BYTES_RETRANS, 0)
retrans_packs[direction] += flow.attr[direction].get(co.PACKS_RETRANS, 0)
for direction in co.DIRECTIONS:
print("REINJECT", direction, reinject[direction], file=log_file)
print("REINJECT PACKS", direction, reinject_packs[direction], file=log_file)
print("RETRANS", direction, retrans[direction], file=log_file)
print("RETRANS PACKS", direction, retrans_packs[direction], file=log_file)
print("LIST OF REINJECTION CONNECTIONS", file=log_file)
print(reinject_list, file=log_file)
def bursts_mptcp(log_file=sys.stdout):
bursts_mb = {co.S2D: {'all': {'Connections': []}}, co.D2S: {'all': {'Connections': []}}}
bursts_sec = {co.S2D: {'all': {'Connections': []}}, co.D2S: {'all': {'Connections': []}}}
bursts_pck = {co.S2D: {'all': {'Connections': []}}, co.D2S: {'all': {'Connections': []}}}
color = ['red']
graph_fname_mb = "bursts_mb"
base_graph_path_mb = os.path.join(sums_dir_exp, graph_fname_mb)
graph_fname_sec = "bursts_sec"
base_graph_path_sec = os.path.join(sums_dir_exp, graph_fname_sec)
graph_fname_pck = "packs_bursts"
base_graph_path_pck = os.path.join(sums_dir_exp, graph_fname_pck)
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
# We never know, still check
if isinstance(conn, mptcp.MPTCPConnection):
if conn.attr[co.DURATION] > 0.0 and len(conn.flows) >= 2:
duration = conn.attr[co.DURATION]
if duration == 0.0:
continue
for direction in co.DIRECTIONS:
if conn.attr[direction].get(co.BYTES_MPTCPTRACE, 0) < 1000000:
continue
nb_packs = 0
for flow_id, flow in conn.flows.iteritems():
nb_packs += flow.attr[direction].get(co.PACKS, 0)
if conn.attr[direction][co.BYTES_MPTCPTRACE] > 1 and co.BURSTS in conn.attr[direction] and len(conn.attr[direction][co.BURSTS]) > 0:
tot_bytes = conn.attr[direction][co.BYTES_MPTCPTRACE] / 1000000.0 # For MBytes
bursts_mb[direction]['all']['Connections'].append((len(conn.attr[direction][co.BURSTS]) - 1.0) / tot_bytes)
bursts_sec[direction]['all']['Connections'].append((len(conn.attr[direction][co.BURSTS]) - 1.0) / duration)
bursts_pck[direction]['all']['Connections'].append(nb_packs / len(conn.attr[direction][co.BURSTS]))
co.plot_cdfs_with_direction(bursts_mb, color, '# switches / MB of data', base_graph_path_mb, natural=True)
co.plot_cdfs_with_direction(bursts_sec, color, '# switches / second', base_graph_path_sec, natural=True)
co.plot_cdfs_with_direction(bursts_pck, color, '# packets / # bursts', base_graph_path_pck, natural=True)
co.plot_cdfs_with_direction(bursts_mb, color, '# switches / MB of data', base_graph_path_mb + "_cut", xlim=5000, natural=True)
co.plot_cdfs_with_direction(bursts_sec, color, '# switches / second', base_graph_path_sec + "_cut", xlim=200, natural=True)
co.plot_cdfs_with_direction(bursts_pck, color, '# packets / # bursts', base_graph_path_pck + "_cut", xlim=1000, natural=True)
co.plot_cdfs_with_direction(bursts_mb, color, '# switches / MB of data', base_graph_path_mb + "_ccdf", natural=True, xlog=True, ylog=True, ccdf=True)
co.plot_cdfs_with_direction(bursts_sec, color, '# switches / second', base_graph_path_sec + "_ccdf", natural=True, xlog=True, ylog=True, ccdf=True)
co.plot_cdfs_with_direction(bursts_pck, color, '# packets / # bursts', base_graph_path_pck + "_ccdf", natural=True, xlog=True, ylog=True, ccdf=True)
def detect_handover(log_file=sys.stdout):
handover_conns = {}
for fname, conns in connections.iteritems():
handover_conns[fname] = []
for conn_id, conn in conns.iteritems():
# We never know, still check
if isinstance(conn, mptcp.MPTCPConnection):
start_time = float('inf')
for flow_id, flow in conn.flows.iteritems():
if co.START not in flow.attr:
continue
start_time = min(start_time, flow.attr[co.START])
for flow_id, flow in conn.flows.iteritems():
if co.START in flow.attr and flow.attr[co.START] - start_time >= 2.0:
handover_conns[fname].append(conn_id)
print(handover_conns, file=log_file)
def delay_mpcapable_mpjoin_quantify_handover(log_file=sys.stdout, threshold_handover=2.0):
syn_additional_sfs = []
handover_conns = {}
# Look only at multiple subflows connections
for fname, conns in multiflow_connections.iteritems():
handover_conns[fname] = {}
for conn_id, conn in conns.iteritems():
# First find initial subflow timestamp
initial_sf_ts = float('inf')
for flow_id, flow in conn.flows.iteritems():
if co.START not in flow.attr:
continue
if flow.attr[co.START] < initial_sf_ts:
initial_sf_ts = flow.attr[co.START]
if initial_sf_ts == float('inf'):
continue
# Now store the delta and record connections with handover
handover_detected = False
for flow_id, flow in conn.flows.iteritems():
if co.START not in flow.attr:
continue
delta = flow.attr[co.START] - initial_sf_ts
if delta > 0.0:
syn_additional_sfs.append(delta)
if delta >= threshold_handover and not handover_detected:
handover_detected = True
handover_conns[fname][conn_id] = conn
if delta >= 50000:
print("HUGE DELTA", fname, conn_id, flow_id, delta, file=log_file)
# Do a first CDF plot of the delta between initial SYN and additional ones
base_graph_path = os.path.join(sums_dir_exp, 'cdf_delta_addtitional_syns')
co.plot_cdfs_natural({'multiflow': {'delta': syn_additional_sfs}}, ['red'], 'Seconds', base_graph_path + '_log', xlog=True)
co.plot_cdfs_natural({'multiflow': {'delta': syn_additional_sfs}}, ['red'], 'Seconds', base_graph_path)
co.plot_cdfs_natural({'multiflow': {'delta': syn_additional_sfs}}, ['red'], 'Seconds', base_graph_path + '_cut', xlim=5.0)
co.plot_cdfs_natural({'multiflow': {'delta': syn_additional_sfs}}, ['red'], 'Seconds', base_graph_path + '_cut_15', xlim=15.0)
# Now quantify in handover connections the amount of data not on the initial subflows
bytes_init_sf = 0.0
bytes_init_sfs = 0.0
bytes_total = 0.0
for fname, conns in handover_conns.iteritems():
for conn_id, conn in conns.iteritems():
# First find initial subflow timestamp
initial_sf_ts = float('inf')
for flow_id, flow in conn.flows.iteritems():
if co.START not in flow.attr:
continue
if flow.attr[co.START] < initial_sf_ts:
initial_sf_ts = flow.attr[co.START]
# Now collect the amount of data on all subflows
for flow_id, flow in conn.flows.iteritems():
if co.START not in flow.attr:
continue
delta = flow.attr[co.START] - initial_sf_ts
for direction in co.DIRECTIONS:
bytes_total += flow.attr[direction].get(co.BYTES, 0)
if bytes_total >= 1000000000:
print("WARNING!!!", fname, conn_id, flow_id, bytes_total, file=log_file)
if delta < threshold_handover:
# Initial subflows
bytes_init_sfs += flow.attr[direction].get(co.BYTES, 0)
if delta == 0.0:
# Initial subflow
bytes_init_sf += flow.attr[direction].get(co.BYTES, 0)
# Log those values in the log file
print("QUANTIFY HANDOVER", file=log_file)
print(bytes_init_sf, "BYTES ON INIT SF", bytes_init_sf * 100 / bytes_total, "%", file=log_file)
print(bytes_init_sfs, "BYTES ON INIT SFS", bytes_init_sfs * 100 / bytes_total, "%", file=log_file)
print("TOTAL BYTES", bytes_total, file=log_file)
def table_rtt_d2s(log_file=sys.stdout):
MPTCP = "MPTCP"
TCP = "TCP"
rtt_min = {MPTCP: [], TCP: []}
rtt_med = {MPTCP: [], TCP: []}
rtt_avg = {MPTCP: [], TCP: []}
rtt_75 = {MPTCP: [], TCP: []}
rtt_90 = {MPTCP: [], TCP: []}
rtt_95 = {MPTCP: [], TCP: []}
rtt_97 = {MPTCP: [], TCP: []}
rtt_98 = {MPTCP: [], TCP: []}
rtt_99 = {MPTCP: [], TCP: []}
rtt_max = {MPTCP: [], TCP: []}
rtt_diff = {MPTCP: [], TCP: []}
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
# We never know, still check
if isinstance(conn, mptcp.MPTCPConnection):
if conn.attr[co.D2S].get(co.BYTES_MPTCPTRACE, 0) < 1000000:
continue
data_mptcp = conn.attr[co.D2S]
if co.RTT_MIN in data_mptcp and co.RTT_AVG in data_mptcp and co.RTT_MED in data_mptcp and co.RTT_99P in data_mptcp:
rtt_min[MPTCP].append(data_mptcp[co.RTT_MIN])
rtt_med[MPTCP].append(data_mptcp[co.RTT_MED])
rtt_avg[MPTCP].append(data_mptcp[co.RTT_AVG])
rtt_75[MPTCP].append(data_mptcp[co.RTT_75P])
rtt_90[MPTCP].append(data_mptcp[co.RTT_90P])
rtt_95[MPTCP].append(data_mptcp[co.RTT_95P])
rtt_97[MPTCP].append(data_mptcp[co.RTT_97P])
rtt_98[MPTCP].append(data_mptcp[co.RTT_98P])
rtt_99[MPTCP].append(data_mptcp[co.RTT_99P])
rtt_max[MPTCP].append(data_mptcp[co.RTT_MAX])
rtt_diff[MPTCP].append(data_mptcp[co.RTT_MAX] - data_mptcp[co.RTT_MIN])
for flow_id, flow in conn.flows.iteritems():
if flow.attr[co.D2S].get(co.BYTES, 0) < 250000:
continue
data = flow.attr[co.D2S]
if co.RTT_MIN in data and co.RTT_AVG in data and co.RTT_MED in data and co.RTT_99P in data:
rtt_min[TCP].append(data[co.RTT_MIN])
rtt_med[TCP].append(data[co.RTT_MED])
rtt_avg[TCP].append(data[co.RTT_AVG])
rtt_75[TCP].append(data[co.RTT_75P])
rtt_90[TCP].append(data[co.RTT_90P])
rtt_95[TCP].append(data[co.RTT_95P])
rtt_97[TCP].append(data[co.RTT_97P])
rtt_98[TCP].append(data[co.RTT_98P])
rtt_99[TCP].append(data[co.RTT_99P])
rtt_max[TCP].append(data[co.RTT_MAX])
rtt_diff[TCP].append(data[co.RTT_MAX] - data[co.RTT_MIN])
print("TABLE RTT", file=log_file)
print("\hline", file=log_file)
print("Protocol & Min & Med & Avg & 75^{th} & 90^{th} & 95^{th} & 97^{th} & 98^{th} & 99^{th} & Max & Max - Min \\ ", file=log_file)
print("\hline", file=log_file)
print("\hline", file=log_file)
for protocol in [MPTCP, TCP]:
print(protocol, "&", np.mean(rtt_min[protocol]), "&", np.mean(rtt_med[protocol]), "&", np.mean(rtt_avg[protocol]), "&", np.mean(rtt_75[protocol]), "&", np.mean(rtt_90[protocol]), "&", np.mean(rtt_95[protocol]), "&", np.mean(rtt_97[protocol]), "&", np.mean(rtt_98[protocol]), "&", np.mean(rtt_99[protocol]), "&", np.mean(rtt_max[protocol]), "&", np.mean(rtt_diff[protocol]), "\\", file=log_file)
print(protocol, "&", np.median(rtt_min[protocol]), "&", np.median(rtt_med[protocol]), "&", np.median(rtt_avg[protocol]), "&", np.median(rtt_75[protocol]), "&", np.median(rtt_90[protocol]), "&", np.median(rtt_95[protocol]), "&", np.median(rtt_97[protocol]), "&", np.median(rtt_98[protocol]), "&", np.median(rtt_99[protocol]), "&", np.median(rtt_max[protocol]), "&", np.median(rtt_diff[protocol]), "\\", file=log_file)
print(protocol, "&", np.percentile(rtt_min[protocol], 75), "&", np.percentile(rtt_med[protocol], 75), "&", np.percentile(rtt_avg[protocol], 75), "&", np.percentile(rtt_75[protocol], 75), "&", np.percentile(rtt_90[protocol], 75), "&", np.percentile(rtt_95[protocol], 75), "&", np.percentile(rtt_97[protocol], 75), "&", np.percentile(rtt_98[protocol], 75), "&", np.percentile(rtt_99[protocol], 75), "&", np.percentile(rtt_max[protocol], 75), "&", np.percentile(rtt_diff[protocol], 75), "\\", file=log_file)
print(protocol, "&", np.percentile(rtt_min[protocol], 90), "&", np.percentile(rtt_med[protocol], 90), "&", np.percentile(rtt_avg[protocol], 90), "&", np.percentile(rtt_75[protocol], 90), "&", np.percentile(rtt_90[protocol], 90), "&", np.percentile(rtt_95[protocol], 90), "&", np.percentile(rtt_97[protocol], 90), "&", np.percentile(rtt_98[protocol], 90), "&", np.percentile(rtt_99[protocol], 90), "&", np.percentile(rtt_max[protocol], 90), "&", np.percentile(rtt_diff[protocol], 90), "\\", file=log_file)
print(protocol, "&", np.percentile(rtt_min[protocol], 95), "&", np.percentile(rtt_med[protocol], 95), "&", np.percentile(rtt_avg[protocol], 95), "&", np.percentile(rtt_75[protocol], 95), "&", np.percentile(rtt_90[protocol], 95), "&", np.percentile(rtt_95[protocol], 95), "&", np.percentile(rtt_97[protocol], 95), "&", np.percentile(rtt_98[protocol], 95), "&", np.percentile(rtt_99[protocol], 95), "&", np.percentile(rtt_max[protocol], 95), "&", np.percentile(rtt_diff[protocol], 95), "\\", file=log_file)
print("\hline", file=log_file)
def plot_rtt_d2s(log_file=sys.stdout):
rtt_min = []
rtt_avg = []
rtt_max = []
rtt_diff = []
graph_fname_rtt = "rtt_d2s"
base_graph_path_rtt = os.path.join(sums_dir_exp, graph_fname_rtt)
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
# We never know, still check
if isinstance(conn, mptcp.MPTCPConnection):
count_flow = 0
max_flow = 0.0
min_flow = float('inf')
for flow_id, flow in conn.flows.iteritems():
if flow.attr[co.D2S].get(co.BYTES, 0) < 100000:
continue
data = flow.attr[co.D2S]
if co.RTT_MIN in data and co.RTT_AVG in data:
rtt_min.append(data[co.RTT_MIN])
rtt_avg.append(data[co.RTT_AVG])
rtt_max.append(data[co.RTT_MAX])
count_flow += 1
max_flow = max(max_flow, data[co.RTT_AVG])
min_flow = min(min_flow, data[co.RTT_AVG])
if data[co.RTT_MIN] < 1.0:
print("LOW RTT", fname, conn_id, flow_id, data[co.RTT_MIN], data[co.RTT_AVG], data[co.RTT_MAX], flow.attr[co.D2S].get(co.RTT_3WHS, 0), flow.attr[co.D2S].get(co.BYTES, 0), flow.attr[co.D2S].get(co.RTT_SAMPLES, 0), flow.attr[co.DADDR], file=log_file)
if count_flow >= 2:
rtt_diff.append(max_flow - min_flow)
co.plot_cdfs_natural({'all': {"min RTT": rtt_min, "avg RTT": rtt_avg, "max RTT": rtt_max, "max RTT - min RTT": rtt_diff}}, ['red', 'green', 'blue', 'magenta'], "RTT of subflows larger than 100KB (ms)", base_graph_path_rtt, label_order=["min RTT", "avg RTT", "max RTT", "max RTT - min RTT"], xlog=True)
millis = int(round(time.time() * 1000))
log_file = open(os.path.join(sums_dir_exp, 'log_summary-' + str(millis) + '.txt'), 'w')
print("Summary plots", file=log_file)
# fog_plot_with_bytes_wifi_cell_per_condition(log_file=log_file)
# fog_plot_with_packs_wifi_cell_per_condition(log_file=log_file)
# fog_duration_bytes(log_file=log_file)
cdf_duration(log_file=log_file)
cdfs_bytes(log_file=log_file)
cdf_number_subflows(log_file=log_file)
textual_summary(log_file=log_file)
box_plot_cellular_percentage(log_file=log_file, limit_bytes=0)
cdf_bytes_all(log_file=log_file)
# cdf_rtt_s2d_all(log_file=log_file, min_samples=5)
cdf_rtt_d2s_all(log_file=log_file, min_samples=5)
# reinject_plot(log_file=log_file, min_bytes=9999.9)
# reinject_plot_relative_to_data(log_file=log_file, min_bytes=9999.9)
# retrans_plot(log_file=log_file)
# fog_plot_cellular_percentage_rtt_wifi(log_file=log_file)
# textual_summary_global(log_file=log_file)
cdf_overhead_retrans_reinj(log_file=log_file)
cdf_overhead_retrans_reinj_singleflow(log_file=log_file)
plot_total_bytes_reinj_bytes(log_file=log_file)
fog_plot_cellular_percentage_all(log_file=log_file)
count_mptcp_best_rtt_flow(log_file=log_file)
count_ip_type(log_file=log_file)
count_packet(log_file=log_file)
count_ports(log_file=log_file)
count_ports_mptcp(log_file=log_file)
count_on_filtered(log_file=log_file)
# time_reinjection(log_file=log_file)
# time_retransmission(log_file=log_file)
merge_time_reinjection_retransmission(log_file=log_file)
bursts_mptcp(log_file=log_file)
detect_handover(log_file=log_file)
list_bytes_all(log_file=log_file)
difference_rtt_d2s(log_file=log_file)
delay_mpcapable_mpjoin_quantify_handover(log_file=log_file, threshold_handover=1.0)
count_unused_subflows(log_file=log_file)
total_retrans_reinj(log_file=log_file)
# table_rtt_d2s(log_file=log_file)
plot_rtt_d2s(log_file=log_file)
log_file.close()
print("End of summary")
| gpl-3.0 |
rs2/pandas | pandas/tests/tools/test_to_datetime.py | 1 | 90242 | """ test to_datetime """
import calendar
from collections import deque
from datetime import datetime, timedelta
import locale
from dateutil.parser import parse
from dateutil.tz.tz import tzoffset
import numpy as np
import pytest
import pytz
from pandas._libs import tslib
from pandas._libs.tslibs import iNaT, parsing
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_datetime64_ns_dtype
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
NaT,
Series,
Timestamp,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
from pandas.core.tools import datetimes as tools
class TestTimeConversionFormats:
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format(self, cache):
values = ["1/1/2000", "1/2/2000", "1/3/2000"]
results1 = [Timestamp("20000101"), Timestamp("20000201"), Timestamp("20000301")]
results2 = [Timestamp("20000101"), Timestamp("20000102"), Timestamp("20000103")]
for vals, expecteds in [
(values, (Index(results1), Index(results2))),
(Series(values), (Series(results1), Series(results2))),
(values[0], (results1[0], results2[0])),
(values[1], (results1[1], results2[1])),
(values[2], (results1[2], results2[2])),
]:
for i, fmt in enumerate(["%d/%m/%Y", "%m/%d/%Y"]):
result = to_datetime(vals, format=fmt, cache=cache)
expected = expecteds[i]
if isinstance(expected, Series):
tm.assert_series_equal(result, Series(expected))
elif isinstance(expected, Timestamp):
assert result == expected
else:
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_YYYYMMDD(self, cache):
s = Series([19801222, 19801222] + [19810105] * 5)
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
result = to_datetime(s.apply(str), format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# with NaT
expected = Series(
[Timestamp("19801222"), Timestamp("19801222")] + [Timestamp("19810105")] * 5
)
expected[2] = np.nan
s[2] = np.nan
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# string with NaT
s = s.apply(str)
s[2] = "nat"
result = to_datetime(s, format="%Y%m%d", cache=cache)
tm.assert_series_equal(result, expected)
# coercion
# GH 7930
s = Series([20121231, 20141231, 99991231])
result = pd.to_datetime(s, format="%Y%m%d", errors="ignore", cache=cache)
expected = Series(
[datetime(2012, 12, 31), datetime(2014, 12, 31), datetime(9999, 12, 31)],
dtype=object,
)
tm.assert_series_equal(result, expected)
result = pd.to_datetime(s, format="%Y%m%d", errors="coerce", cache=cache)
expected = Series(["20121231", "20141231", "NaT"], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_s",
[
# Null values with Strings
["19801222", "20010112", None],
["19801222", "20010112", np.nan],
["19801222", "20010112", pd.NaT],
["19801222", "20010112", "NaT"],
# Null values with Integers
[19801222, 20010112, None],
[19801222, 20010112, np.nan],
[19801222, 20010112, pd.NaT],
[19801222, 20010112, "NaT"],
],
)
def test_to_datetime_format_YYYYMMDD_with_none(self, input_s):
# GH 30011
# format='%Y%m%d'
# with None
expected = Series([Timestamp("19801222"), Timestamp("20010112"), pd.NaT])
result = Series(pd.to_datetime(input_s, format="%Y%m%d"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"input_s, expected",
[
# NaN before strings with invalid date values
[
Series(["19801222", np.nan, "20010012", "10019999"]),
Series([Timestamp("19801222"), np.nan, np.nan, np.nan]),
],
# NaN after strings with invalid date values
[
Series(["19801222", "20010012", "10019999", np.nan]),
Series([Timestamp("19801222"), np.nan, np.nan, np.nan]),
],
# NaN before integers with invalid date values
[
Series([20190813, np.nan, 20010012, 20019999]),
Series([Timestamp("20190813"), np.nan, np.nan, np.nan]),
],
# NaN after integers with invalid date values
[
Series([20190813, 20010012, np.nan, 20019999]),
Series([Timestamp("20190813"), np.nan, np.nan, np.nan]),
],
],
)
def test_to_datetime_format_YYYYMMDD_overflow(self, input_s, expected):
# GH 25512
# format='%Y%m%d', errors='coerce'
result = pd.to_datetime(input_s, format="%Y%m%d", errors="coerce")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_integer(self, cache):
# GH 10178
s = Series([2000, 2001, 2002])
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format="%Y", cache=cache)
tm.assert_series_equal(result, expected)
s = Series([200001, 200105, 200206])
expected = Series([Timestamp(x[:4] + "-" + x[4:]) for x in s.apply(str)])
result = to_datetime(s, format="%Y%m", cache=cache)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"int_date, expected",
[
# valid date, length == 8
[20121030, datetime(2012, 10, 30)],
# short valid date, length == 6
[199934, datetime(1999, 3, 4)],
# long integer date partially parsed to datetime(2012,1,1), length > 8
[2012010101, 2012010101],
# invalid date partially parsed to datetime(2012,9,9), length == 8
[20129930, 20129930],
# short integer date partially parsed to datetime(2012,9,9), length < 8
[2012993, 2012993],
# short invalid date, length == 4
[2121, 2121],
],
)
def test_int_to_datetime_format_YYYYMMDD_typeerror(self, int_date, expected):
# GH 26583
result = to_datetime(int_date, format="%Y%m%d", errors="ignore")
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_microsecond(self, cache):
# these are locale dependent
lang, _ = locale.getlocale()
month_abbr = calendar.month_abbr[4]
val = f"01-{month_abbr}-2011 00:00:01.978"
format = "%d-%b-%Y %H:%M:%S.%f"
result = to_datetime(val, format=format, cache=cache)
exp = datetime.strptime(val, format)
assert result == exp
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_time(self, cache):
data = [
["01/10/2010 15:20", "%m/%d/%Y %H:%M", Timestamp("2010-01-10 15:20")],
["01/10/2010 05:43", "%m/%d/%Y %I:%M", Timestamp("2010-01-10 05:43")],
[
"01/10/2010 13:56:01",
"%m/%d/%Y %H:%M:%S",
Timestamp("2010-01-10 13:56:01"),
] # ,
# ['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 20:14')],
# ['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 07:40')],
# ['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p',
# Timestamp('2010-01-10 09:12:56')]
]
for s, format, dt in data:
assert to_datetime(s, format=format, cache=cache) == dt
@td.skip_if_has_locale
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_non_exact(self, cache):
# GH 10834
# 8904
# exact kw
s = Series(
["19MAY11", "foobar19MAY11", "19MAY11:00:00:00", "19MAY11 00:00:00Z"]
)
result = to_datetime(s, format="%d%b%y", exact=False, cache=cache)
expected = to_datetime(
s.str.extract(r"(\d+\w+\d+)", expand=False), format="%d%b%y", cache=cache
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_parse_nanoseconds_with_formula(self, cache):
# GH8989
# truncating the nanoseconds when a format was provided
for v in [
"2012-01-01 09:00:00.000000001",
"2012-01-01 09:00:00.000001",
"2012-01-01 09:00:00.001",
"2012-01-01 09:00:00.001000",
"2012-01-01 09:00:00.001000000",
]:
expected = pd.to_datetime(v, cache=cache)
result = pd.to_datetime(v, format="%Y-%m-%d %H:%M:%S.%f", cache=cache)
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_format_weeks(self, cache):
data = [
["2009324", "%Y%W%w", Timestamp("2009-08-13")],
["2013020", "%Y%U%w", Timestamp("2013-01-13")],
]
for s, format, dt in data:
assert to_datetime(s, format=format, cache=cache) == dt
@pytest.mark.parametrize(
"fmt,dates,expected_dates",
[
[
"%Y-%m-%d %H:%M:%S %Z",
["2010-01-01 12:00:00 UTC"] * 2,
[pd.Timestamp("2010-01-01 12:00:00", tz="UTC")] * 2,
],
[
"%Y-%m-%d %H:%M:%S %Z",
[
"2010-01-01 12:00:00 UTC",
"2010-01-01 12:00:00 GMT",
"2010-01-01 12:00:00 US/Pacific",
],
[
pd.Timestamp("2010-01-01 12:00:00", tz="UTC"),
pd.Timestamp("2010-01-01 12:00:00", tz="GMT"),
pd.Timestamp("2010-01-01 12:00:00", tz="US/Pacific"),
],
],
[
"%Y-%m-%d %H:%M:%S%z",
["2010-01-01 12:00:00+0100"] * 2,
[pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2,
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 +0100"] * 2,
[pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60))] * 2,
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 +0100", "2010-01-01 12:00:00 -0100"],
[
pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(60)),
pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(-60)),
],
],
[
"%Y-%m-%d %H:%M:%S %z",
["2010-01-01 12:00:00 Z", "2010-01-01 12:00:00 Z"],
[
pd.Timestamp(
"2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)
), # pytz coerces to UTC
pd.Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)),
],
],
],
)
def test_to_datetime_parse_tzname_or_tzoffset(self, fmt, dates, expected_dates):
# GH 13486
result = pd.to_datetime(dates, format=fmt)
expected = pd.Index(expected_dates)
tm.assert_equal(result, expected)
def test_to_datetime_parse_tzname_or_tzoffset_different_tz_to_utc(self):
# GH 32792
dates = [
"2010-01-01 12:00:00 +0100",
"2010-01-01 12:00:00 -0100",
"2010-01-01 12:00:00 +0300",
"2010-01-01 12:00:00 +0400",
]
expected_dates = [
"2010-01-01 11:00:00+00:00",
"2010-01-01 13:00:00+00:00",
"2010-01-01 09:00:00+00:00",
"2010-01-01 08:00:00+00:00",
]
fmt = "%Y-%m-%d %H:%M:%S %z"
result = pd.to_datetime(dates, format=fmt, utc=True)
expected = pd.DatetimeIndex(expected_dates)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"offset", ["+0", "-1foo", "UTCbar", ":10", "+01:000:01", ""]
)
def test_to_datetime_parse_timezone_malformed(self, offset):
fmt = "%Y-%m-%d %H:%M:%S %z"
date = "2010-01-01 12:00:00 " + offset
with pytest.raises(ValueError):
pd.to_datetime([date], format=fmt)
def test_to_datetime_parse_timezone_keeps_name(self):
# GH 21697
fmt = "%Y-%m-%d %H:%M:%S %z"
arg = pd.Index(["2010-01-01 12:00:00 Z"], name="foo")
result = pd.to_datetime(arg, format=fmt)
expected = pd.DatetimeIndex(["2010-01-01 12:00:00"], tz="UTC", name="foo")
tm.assert_index_equal(result, expected)
class TestToDatetime:
@pytest.mark.parametrize(
"s, _format, dt",
[
["2015-1-1", "%G-%V-%u", datetime(2014, 12, 29, 0, 0)],
["2015-1-4", "%G-%V-%u", datetime(2015, 1, 1, 0, 0)],
["2015-1-7", "%G-%V-%u", datetime(2015, 1, 4, 0, 0)],
],
)
def test_to_datetime_iso_week_year_format(self, s, _format, dt):
# See GH#16607
assert to_datetime(s, format=_format) == dt
@pytest.mark.parametrize(
"msg, s, _format",
[
[
"ISO week directive '%V' must be used with the ISO year directive "
"'%G' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 50",
"%Y %V",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 51",
"%G %V",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 Monday",
"%G %A",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 Mon",
"%G %a",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 6",
"%G %w",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"1999 6",
"%G %u",
],
[
"ISO year directive '%G' must be used with the ISO week directive "
"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.",
"2051",
"%G",
],
[
"Day of the year directive '%j' is not compatible with ISO year "
"directive '%G'. Use '%Y' instead.",
"1999 51 6 256",
"%G %V %u %j",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 Sunday",
"%Y %V %A",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 Sun",
"%Y %V %a",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 1",
"%Y %V %w",
],
[
"ISO week directive '%V' is incompatible with the year directive "
"'%Y'. Use the ISO year '%G' instead.",
"1999 51 1",
"%Y %V %u",
],
[
"ISO week directive '%V' must be used with the ISO year directive "
"'%G' and a weekday directive '%A', '%a', '%w', or '%u'.",
"20",
"%V",
],
],
)
def test_error_iso_week_year(self, msg, s, _format):
# See GH#16607
# This test checks for errors thrown when giving the wrong format
# However, as discussed on PR#25541, overriding the locale
# causes a different error to be thrown due to the format being
# locale specific, but the test data is in english.
# Therefore, the tests only run when locale is not overwritten,
# as a sort of solution to this problem.
if locale.getlocale() != ("zh_CN", "UTF-8") and locale.getlocale() != (
"it_IT",
"UTF-8",
):
with pytest.raises(ValueError, match=msg):
to_datetime(s, format=_format)
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_to_datetime_dtarr(self, tz):
# DatetimeArray
dti = date_range("1965-04-03", periods=19, freq="2W", tz=tz)
arr = DatetimeArray(dti)
result = to_datetime(arr)
assert result is arr
result = to_datetime(arr)
assert result is arr
def test_to_datetime_pydatetime(self):
actual = pd.to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
def test_to_datetime_YYYYMMDD(self):
actual = pd.to_datetime("20080115")
assert actual == datetime(2008, 1, 15)
def test_to_datetime_unparseable_ignore(self):
# unparseable
s = "Month 1, 1999"
assert pd.to_datetime(s, errors="ignore") == s
@td.skip_if_windows # `tm.set_timezone` does not work in windows
def test_to_datetime_now(self):
# See GH#18666
with tm.set_timezone("US/Eastern"):
npnow = np.datetime64("now").astype("datetime64[ns]")
pdnow = pd.to_datetime("now")
pdnow2 = pd.to_datetime(["now"])[0]
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdnow.value - npnow.astype(np.int64)) < 1e10
assert abs(pdnow2.value - npnow.astype(np.int64)) < 1e10
assert pdnow.tzinfo is None
assert pdnow2.tzinfo is None
@td.skip_if_windows # `tm.set_timezone` does not work in windows
def test_to_datetime_today(self):
# See GH#18666
# Test with one timezone far ahead of UTC and another far behind, so
# one of these will _almost_ always be in a different day from UTC.
# Unfortunately this test between 12 and 1 AM Samoa time
# this both of these timezones _and_ UTC will all be in the same day,
# so this test will not detect the regression introduced in #18666.
with tm.set_timezone("Pacific/Auckland"): # 12-13 hours ahead of UTC
nptoday = np.datetime64("today").astype("datetime64[ns]").astype(np.int64)
pdtoday = pd.to_datetime("today")
pdtoday2 = pd.to_datetime(["today"])[0]
tstoday = pd.Timestamp("today")
tstoday2 = pd.Timestamp.today()
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdtoday.normalize().value - nptoday) < 1e10
assert abs(pdtoday2.normalize().value - nptoday) < 1e10
assert abs(pdtoday.value - tstoday.value) < 1e10
assert abs(pdtoday.value - tstoday2.value) < 1e10
assert pdtoday.tzinfo is None
assert pdtoday2.tzinfo is None
with tm.set_timezone("US/Samoa"): # 11 hours behind UTC
nptoday = np.datetime64("today").astype("datetime64[ns]").astype(np.int64)
pdtoday = pd.to_datetime("today")
pdtoday2 = pd.to_datetime(["today"])[0]
# These should all be equal with infinite perf; this gives
# a generous margin of 10 seconds
assert abs(pdtoday.normalize().value - nptoday) < 1e10
assert abs(pdtoday2.normalize().value - nptoday) < 1e10
assert pdtoday.tzinfo is None
assert pdtoday2.tzinfo is None
def test_to_datetime_today_now_unicode_bytes(self):
to_datetime(["now"])
to_datetime(["today"])
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_dt64s(self, cache):
in_bound_dts = [np.datetime64("2000-01-01"), np.datetime64("2000-01-02")]
for dt in in_bound_dts:
assert pd.to_datetime(dt, cache=cache) == Timestamp(dt)
@pytest.mark.parametrize(
"dt", [np.datetime64("1000-01-01"), np.datetime64("5000-01-02")]
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_dt64s_out_of_bounds(self, cache, dt):
msg = f"Out of bounds nanosecond timestamp: {dt}"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(dt, errors="raise")
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp(dt)
assert pd.to_datetime(dt, errors="coerce", cache=cache) is NaT
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize("unit", ["s", "D"])
def test_to_datetime_array_of_dt64s(self, cache, unit):
# https://github.com/pandas-dev/pandas/issues/31491
# Need at least 50 to ensure cache is used.
dts = [
np.datetime64("2000-01-01", unit),
np.datetime64("2000-01-02", unit),
] * 30
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
tm.assert_index_equal(
pd.to_datetime(dts, cache=cache),
pd.DatetimeIndex([Timestamp(x).asm8 for x in dts]),
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64("9999-01-01")]
msg = "Out of bounds nanosecond timestamp: 9999-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(dts_with_oob, errors="raise")
tm.assert_index_equal(
pd.to_datetime(dts_with_oob, errors="coerce", cache=cache),
pd.DatetimeIndex(
[Timestamp(dts_with_oob[0]).asm8, Timestamp(dts_with_oob[1]).asm8] * 30
+ [pd.NaT],
),
)
# With errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
tm.assert_index_equal(
pd.to_datetime(dts_with_oob, errors="ignore", cache=cache),
pd.Index([dt.item() for dt in dts_with_oob]),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_tz(self, cache):
# xref 8260
# uniform returns a DatetimeIndex
arr = [
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
result = pd.to_datetime(arr, cache=cache)
expected = DatetimeIndex(
["2013-01-01 13:00:00", "2013-01-02 14:00:00"], tz="US/Pacific"
)
tm.assert_index_equal(result, expected)
# mixed tzs will raise
arr = [
pd.Timestamp("2013-01-01 13:00:00", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00", tz="US/Eastern"),
]
msg = (
"Tz-aware datetime.datetime cannot be "
"converted to datetime64 unless utc=True"
)
with pytest.raises(ValueError, match=msg):
pd.to_datetime(arr, cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_different_offsets(self, cache):
# inspired by asv timeseries.ToDatetimeNONISO8601 benchmark
# see GH-26097 for more
ts_string_1 = "March 1, 2018 12:00:00+0400"
ts_string_2 = "March 1, 2018 12:00:00+0500"
arr = [ts_string_1] * 5 + [ts_string_2] * 5
expected = pd.Index([parse(x) for x in arr])
result = pd.to_datetime(arr, cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_tz_pytz(self, cache):
# see gh-8260
us_eastern = pytz.timezone("US/Eastern")
arr = np.array(
[
us_eastern.localize(
datetime(year=2000, month=1, day=1, hour=3, minute=0)
),
us_eastern.localize(
datetime(year=2000, month=6, day=1, hour=3, minute=0)
),
],
dtype=object,
)
result = pd.to_datetime(arr, utc=True, cache=cache)
expected = DatetimeIndex(
["2000-01-01 08:00:00+00:00", "2000-06-01 07:00:00+00:00"],
dtype="datetime64[ns, UTC]",
freq=None,
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"init_constructor, end_constructor, test_method",
[
(Index, DatetimeIndex, tm.assert_index_equal),
(list, DatetimeIndex, tm.assert_index_equal),
(np.array, DatetimeIndex, tm.assert_index_equal),
(Series, Series, tm.assert_series_equal),
],
)
def test_to_datetime_utc_true(
self, cache, init_constructor, end_constructor, test_method
):
# See gh-11934 & gh-6415
data = ["20100102 121314", "20100102 121315"]
expected_data = [
pd.Timestamp("2010-01-02 12:13:14", tz="utc"),
pd.Timestamp("2010-01-02 12:13:15", tz="utc"),
]
result = pd.to_datetime(
init_constructor(data), format="%Y%m%d %H%M%S", utc=True, cache=cache
)
expected = end_constructor(expected_data)
test_method(result, expected)
# Test scalar case as well
for scalar, expected in zip(data, expected_data):
result = pd.to_datetime(
scalar, format="%Y%m%d %H%M%S", utc=True, cache=cache
)
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_utc_true_with_series_single_value(self, cache):
# GH 15760 UTC=True with Series
ts = 1.5e18
result = pd.to_datetime(pd.Series([ts]), utc=True, cache=cache)
expected = pd.Series([pd.Timestamp(ts, tz="utc")])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_utc_true_with_series_tzaware_string(self, cache):
ts = "2013-01-01 00:00:00-01:00"
expected_ts = "2013-01-01 01:00:00"
data = pd.Series([ts] * 3)
result = pd.to_datetime(data, utc=True, cache=cache)
expected = pd.Series([pd.Timestamp(expected_ts, tz="utc")] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"date, dtype",
[
("2013-01-01 01:00:00", "datetime64[ns]"),
("2013-01-01 01:00:00", "datetime64[ns, UTC]"),
],
)
def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, dtype):
expected = pd.Series([pd.Timestamp("2013-01-01 01:00:00", tz="UTC")])
result = pd.to_datetime(pd.Series([date], dtype=dtype), utc=True, cache=cache)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
@td.skip_if_no("psycopg2")
def test_to_datetime_tz_psycopg2(self, cache):
# xref 8260
import psycopg2
# misc cases
tz1 = psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)
tz2 = psycopg2.tz.FixedOffsetTimezone(offset=-240, name=None)
arr = np.array(
[
datetime(2000, 1, 1, 3, 0, tzinfo=tz1),
datetime(2000, 6, 1, 3, 0, tzinfo=tz2),
],
dtype=object,
)
result = pd.to_datetime(arr, errors="coerce", utc=True, cache=cache)
expected = DatetimeIndex(
["2000-01-01 08:00:00+00:00", "2000-06-01 07:00:00+00:00"],
dtype="datetime64[ns, UTC]",
freq=None,
)
tm.assert_index_equal(result, expected)
# dtype coercion
i = pd.DatetimeIndex(
["2000-01-01 08:00:00"],
tz=psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None),
)
assert is_datetime64_ns_dtype(i)
# tz coercion
result = pd.to_datetime(i, errors="coerce", cache=cache)
tm.assert_index_equal(result, i)
result = pd.to_datetime(i, errors="coerce", utc=True, cache=cache)
expected = pd.DatetimeIndex(
["2000-01-01 13:00:00"], dtype="datetime64[ns, UTC]"
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_datetime_bool(self, cache):
# GH13176
with pytest.raises(TypeError):
to_datetime(False)
assert to_datetime(False, errors="coerce", cache=cache) is NaT
assert to_datetime(False, errors="ignore", cache=cache) is False
with pytest.raises(TypeError):
to_datetime(True)
assert to_datetime(True, errors="coerce", cache=cache) is NaT
assert to_datetime(True, errors="ignore", cache=cache) is True
with pytest.raises(TypeError):
to_datetime([False, datetime.today()], cache=cache)
with pytest.raises(TypeError):
to_datetime(["20130101", True], cache=cache)
tm.assert_index_equal(
to_datetime([0, False, NaT, 0.0], errors="coerce", cache=cache),
DatetimeIndex(
[to_datetime(0, cache=cache), NaT, NaT, to_datetime(0, cache=cache)]
),
)
def test_datetime_invalid_datatype(self):
# GH13176
with pytest.raises(TypeError):
pd.to_datetime(bool)
with pytest.raises(TypeError):
pd.to_datetime(pd.to_datetime)
@pytest.mark.parametrize("value", ["a", "00:01:99"])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_invalid_scalar(self, value, format, infer):
# GH24763
res = pd.to_datetime(
value, errors="ignore", format=format, infer_datetime_format=infer
)
assert res == value
res = pd.to_datetime(
value, errors="coerce", format=format, infer_datetime_format=infer
)
assert res is pd.NaT
with pytest.raises(ValueError):
pd.to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("value", ["3000/12/11 00:00:00"])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_outofbounds_scalar(self, value, format, infer):
# GH24763
res = pd.to_datetime(
value, errors="ignore", format=format, infer_datetime_format=infer
)
assert res == value
res = pd.to_datetime(
value, errors="coerce", format=format, infer_datetime_format=infer
)
assert res is pd.NaT
if format is not None:
with pytest.raises(ValueError):
pd.to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
else:
with pytest.raises(OutOfBoundsDatetime):
pd.to_datetime(
value, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("values", [["a"], ["00:01:99"], ["a", "b", "99:00:00"]])
@pytest.mark.parametrize("infer", [True, False])
@pytest.mark.parametrize("format", [None, "H%:M%:S%"])
def test_datetime_invalid_index(self, values, format, infer):
# GH24763
res = pd.to_datetime(
values, errors="ignore", format=format, infer_datetime_format=infer
)
tm.assert_index_equal(res, pd.Index(values))
res = pd.to_datetime(
values, errors="coerce", format=format, infer_datetime_format=infer
)
tm.assert_index_equal(res, pd.DatetimeIndex([pd.NaT] * len(values)))
with pytest.raises(ValueError):
pd.to_datetime(
values, errors="raise", format=format, infer_datetime_format=infer
)
@pytest.mark.parametrize("utc", [True, None])
@pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None])
@pytest.mark.parametrize("constructor", [list, tuple, np.array, pd.Index, deque])
def test_to_datetime_cache(self, utc, format, constructor):
date = "20130101 00:00:00"
test_dates = [date] * 10 ** 5
data = constructor(test_dates)
result = pd.to_datetime(data, utc=utc, format=format, cache=True)
expected = pd.to_datetime(data, utc=utc, format=format, cache=False)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"listlike",
[
(deque([pd.Timestamp("2010-06-02 09:30:00")] * 51)),
([pd.Timestamp("2010-06-02 09:30:00")] * 51),
(tuple([pd.Timestamp("2010-06-02 09:30:00")] * 51)),
],
)
def test_no_slicing_errors_in_should_cache(self, listlike):
# GH 29403
assert tools.should_cache(listlike) is True
def test_to_datetime_from_deque(self):
# GH 29403
result = pd.to_datetime(deque([pd.Timestamp("2010-06-02 09:30:00")] * 51))
expected = pd.to_datetime([pd.Timestamp("2010-06-02 09:30:00")] * 51)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("utc", [True, None])
@pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None])
def test_to_datetime_cache_series(self, utc, format):
date = "20130101 00:00:00"
test_dates = [date] * 10 ** 5
data = pd.Series(test_dates)
result = pd.to_datetime(data, utc=utc, format=format, cache=True)
expected = pd.to_datetime(data, utc=utc, format=format, cache=False)
tm.assert_series_equal(result, expected)
def test_to_datetime_cache_scalar(self):
date = "20130101 00:00:00"
result = pd.to_datetime(date, cache=True)
expected = pd.Timestamp("20130101 00:00:00")
assert result == expected
@pytest.mark.parametrize(
"date, format",
[
("2017-20", "%Y-%W"),
("20 Sunday", "%W %A"),
("20 Sun", "%W %a"),
("2017-21", "%Y-%U"),
("20 Sunday", "%U %A"),
("20 Sun", "%U %a"),
],
)
def test_week_without_day_and_calendar_year(self, date, format):
# GH16774
msg = "Cannot use '%W' or '%U' without day and year"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(date, format=format)
def test_to_datetime_coerce(self):
# GH 26122
ts_strings = [
"March 1, 2018 12:00:00+0400",
"March 1, 2018 12:00:00+0500",
"20100240",
]
result = to_datetime(ts_strings, errors="coerce")
expected = Index(
[
datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 14400)),
datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 18000)),
NaT,
]
)
tm.assert_index_equal(result, expected)
def test_to_datetime_coerce_malformed(self):
# GH 28299
ts_strings = ["200622-12-31", "111111-24-11"]
result = to_datetime(ts_strings, errors="coerce")
expected = Index([NaT, NaT])
tm.assert_index_equal(result, expected)
def test_iso_8601_strings_with_same_offset(self):
# GH 17697, 11736
ts_str = "2015-11-18 15:30:00+05:30"
result = to_datetime(ts_str)
expected = Timestamp(ts_str)
assert result == expected
expected = DatetimeIndex([Timestamp(ts_str)] * 2)
result = to_datetime([ts_str] * 2)
tm.assert_index_equal(result, expected)
result = DatetimeIndex([ts_str] * 2)
tm.assert_index_equal(result, expected)
def test_iso_8601_strings_with_different_offsets(self):
# GH 17697, 11736
ts_strings = ["2015-11-18 15:30:00+05:30", "2015-11-18 16:30:00+06:30", NaT]
result = to_datetime(ts_strings)
expected = np.array(
[
datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 19800)),
datetime(2015, 11, 18, 16, 30, tzinfo=tzoffset(None, 23400)),
NaT,
],
dtype=object,
)
# GH 21864
expected = Index(expected)
tm.assert_index_equal(result, expected)
result = to_datetime(ts_strings, utc=True)
expected = DatetimeIndex(
[Timestamp(2015, 11, 18, 10), Timestamp(2015, 11, 18, 10), NaT], tz="UTC"
)
tm.assert_index_equal(result, expected)
def test_iso8601_strings_mixed_offsets_with_naive(self):
# GH 24992
result = pd.to_datetime(
[
"2018-11-28T00:00:00",
"2018-11-28T00:00:00+12:00",
"2018-11-28T00:00:00",
"2018-11-28T00:00:00+06:00",
"2018-11-28T00:00:00",
],
utc=True,
)
expected = pd.to_datetime(
[
"2018-11-28T00:00:00",
"2018-11-27T12:00:00",
"2018-11-28T00:00:00",
"2018-11-27T18:00:00",
"2018-11-28T00:00:00",
],
utc=True,
)
tm.assert_index_equal(result, expected)
items = ["2018-11-28T00:00:00+12:00", "2018-11-28T00:00:00"]
result = pd.to_datetime(items, utc=True)
expected = pd.to_datetime(list(reversed(items)), utc=True)[::-1]
tm.assert_index_equal(result, expected)
def test_mixed_offsets_with_native_datetime_raises(self):
# GH 25978
s = pd.Series(
[
"nan",
pd.Timestamp("1990-01-01"),
"2015-03-14T16:15:14.123-08:00",
"2019-03-04T21:56:32.620-07:00",
None,
]
)
with pytest.raises(ValueError, match="Tz-aware datetime.datetime"):
pd.to_datetime(s)
def test_non_iso_strings_with_tz_offset(self):
result = to_datetime(["March 1, 2018 12:00:00+0400"] * 2)
expected = DatetimeIndex(
[datetime(2018, 3, 1, 12, tzinfo=pytz.FixedOffset(240))] * 2
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"ts, expected",
[
(Timestamp("2018-01-01"), Timestamp("2018-01-01", tz="UTC")),
(
Timestamp("2018-01-01", tz="US/Pacific"),
Timestamp("2018-01-01 08:00", tz="UTC"),
),
],
)
def test_timestamp_utc_true(self, ts, expected):
# GH 24415
result = to_datetime(ts, utc=True)
assert result == expected
@pytest.mark.parametrize("dt_str", ["00010101", "13000101", "30000101", "99990101"])
def test_to_datetime_with_format_out_of_bounds(self, dt_str):
# GH 9107
with pytest.raises(OutOfBoundsDatetime):
pd.to_datetime(dt_str, format="%Y%m%d")
def test_to_datetime_utc(self):
arr = np.array([parse("2012-06-13T01:39:00Z")], dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_to_datetime_fixed_offset(self):
from pandas.tests.indexes.datetimes.test_timezones import fixed_off
dates = [
datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off),
]
result = to_datetime(dates)
assert result.tz == fixed_off
class TestToDatetimeUnit:
@pytest.mark.parametrize("cache", [True, False])
def test_unit(self, cache):
# GH 11758
# test proper behavior with errors
with pytest.raises(ValueError):
to_datetime([1], unit="D", format="%Y%m%d", cache=cache)
values = [11111111, 1, 1.0, iNaT, NaT, np.nan, "NaT", ""]
result = to_datetime(values, unit="D", errors="ignore", cache=cache)
expected = Index(
[
11111111,
Timestamp("1970-01-02"),
Timestamp("1970-01-02"),
NaT,
NaT,
NaT,
NaT,
NaT,
],
dtype=object,
)
tm.assert_index_equal(result, expected)
result = to_datetime(values, unit="D", errors="coerce", cache=cache)
expected = DatetimeIndex(
["NaT", "1970-01-02", "1970-01-02", "NaT", "NaT", "NaT", "NaT", "NaT"]
)
tm.assert_index_equal(result, expected)
with pytest.raises(tslib.OutOfBoundsDatetime):
to_datetime(values, unit="D", errors="raise", cache=cache)
values = [1420043460000, iNaT, NaT, np.nan, "NaT"]
result = to_datetime(values, errors="ignore", unit="s", cache=cache)
expected = Index([1420043460000, NaT, NaT, NaT, NaT], dtype=object)
tm.assert_index_equal(result, expected)
result = to_datetime(values, errors="coerce", unit="s", cache=cache)
expected = DatetimeIndex(["NaT", "NaT", "NaT", "NaT", "NaT"])
tm.assert_index_equal(result, expected)
with pytest.raises(tslib.OutOfBoundsDatetime):
to_datetime(values, errors="raise", unit="s", cache=cache)
# if we have a string, then we raise a ValueError
# and NOT an OutOfBoundsDatetime
for val in ["foo", Timestamp("20130101")]:
try:
to_datetime(val, errors="raise", unit="s", cache=cache)
except tslib.OutOfBoundsDatetime as err:
raise AssertionError("incorrect exception raised") from err
except ValueError:
pass
@pytest.mark.parametrize("cache", [True, False])
def test_unit_consistency(self, cache):
# consistency of conversions
expected = Timestamp("1970-05-09 14:25:11")
result = pd.to_datetime(11111111, unit="s", errors="raise", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit="s", errors="coerce", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit="s", errors="ignore", cache=cache)
assert result == expected
assert isinstance(result, Timestamp)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_with_numeric(self, cache):
# GH 13180
# coercions from floats/ints are ok
expected = DatetimeIndex(["2015-06-19 05:33:20", "2015-05-27 22:33:20"])
arr1 = [1.434692e18, 1.432766e18]
arr2 = np.array(arr1).astype("int64")
for errors in ["ignore", "raise", "coerce"]:
result = pd.to_datetime(arr1, errors=errors, cache=cache)
tm.assert_index_equal(result, expected)
result = pd.to_datetime(arr2, errors=errors, cache=cache)
tm.assert_index_equal(result, expected)
# but we want to make sure that we are coercing
# if we have ints/strings
expected = DatetimeIndex(["NaT", "2015-06-19 05:33:20", "2015-05-27 22:33:20"])
arr = ["foo", 1.434692e18, 1.432766e18]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
expected = DatetimeIndex(
["2015-06-19 05:33:20", "2015-05-27 22:33:20", "NaT", "NaT"]
)
arr = [1.434692e18, 1.432766e18, "foo", "NaT"]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_mixed(self, cache):
# mixed integers/datetimes
expected = DatetimeIndex(["2013-01-01", "NaT", "NaT"])
arr = [pd.Timestamp("20130101"), 1.434692e18, 1.432766e18]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
pd.to_datetime(arr, errors="raise", cache=cache)
expected = DatetimeIndex(["NaT", "NaT", "2013-01-01"])
arr = [1.434692e18, 1.432766e18, pd.Timestamp("20130101")]
result = pd.to_datetime(arr, errors="coerce", cache=cache)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
pd.to_datetime(arr, errors="raise", cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_unit_rounding(self, cache):
# GH 14156 & GH 20445: argument will incur floating point errors
# but no premature rounding
result = pd.to_datetime(1434743731.8770001, unit="s", cache=cache)
expected = pd.Timestamp("2015-06-19 19:55:31.877000192")
assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_unit_ignore_keeps_name(self, cache):
# GH 21697
expected = pd.Index([15e9] * 2, name="name")
result = pd.to_datetime(expected, errors="ignore", unit="s", cache=cache)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_dataframe(self, cache):
df = DataFrame(
{
"year": [2015, 2016],
"month": [2, 3],
"day": [4, 5],
"hour": [6, 7],
"minute": [58, 59],
"second": [10, 11],
"ms": [1, 1],
"us": [2, 2],
"ns": [3, 3],
}
)
result = to_datetime(
{"year": df["year"], "month": df["month"], "day": df["day"]}, cache=cache
)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:0:00")]
)
tm.assert_series_equal(result, expected)
# dict-like
result = to_datetime(df[["year", "month", "day"]].to_dict(), cache=cache)
tm.assert_series_equal(result, expected)
# dict but with constructable
df2 = df[["year", "month", "day"]].to_dict()
df2["month"] = 2
result = to_datetime(df2, cache=cache)
expected2 = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160205 00:0:00")]
)
tm.assert_series_equal(result, expected2)
# unit mappings
units = [
{
"year": "years",
"month": "months",
"day": "days",
"hour": "hours",
"minute": "minutes",
"second": "seconds",
},
{
"year": "year",
"month": "month",
"day": "day",
"hour": "hour",
"minute": "minute",
"second": "second",
},
]
for d in units:
result = to_datetime(df[list(d.keys())].rename(columns=d), cache=cache)
expected = Series(
[Timestamp("20150204 06:58:10"), Timestamp("20160305 07:59:11")]
)
tm.assert_series_equal(result, expected)
d = {
"year": "year",
"month": "month",
"day": "day",
"hour": "hour",
"minute": "minute",
"second": "second",
"ms": "ms",
"us": "us",
"ns": "ns",
}
result = to_datetime(df.rename(columns=d), cache=cache)
expected = Series(
[
Timestamp("20150204 06:58:10.001002003"),
Timestamp("20160305 07:59:11.001002003"),
]
)
tm.assert_series_equal(result, expected)
# coerce back to int
result = to_datetime(df.astype(str), cache=cache)
tm.assert_series_equal(result, expected)
# passing coerce
df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]})
msg = (
"cannot assemble the datetimes: time data .+ does not "
r"match format '%Y%m%d' \(match\)"
)
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
result = to_datetime(df2, errors="coerce", cache=cache)
expected = Series([Timestamp("20150204 00:00:00"), NaT])
tm.assert_series_equal(result, expected)
# extra columns
msg = r"extra keys have been passed to the datetime assemblage: \[foo\]"
with pytest.raises(ValueError, match=msg):
df2 = df.copy()
df2["foo"] = 1
to_datetime(df2, cache=cache)
# not enough
msg = (
r"to assemble mappings requires at least that \[year, month, "
r"day\] be specified: \[.+\] is missing"
)
for c in [
["year"],
["year", "month"],
["year", "month", "second"],
["month", "day"],
["year", "day", "second"],
]:
with pytest.raises(ValueError, match=msg):
to_datetime(df[c], cache=cache)
# duplicates
msg = "cannot assemble with duplicate keys"
df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]})
df2.columns = ["year", "year", "day"]
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
df2 = DataFrame(
{"year": [2015, 2016], "month": [2, 20], "day": [4, 5], "hour": [4, 5]}
)
df2.columns = ["year", "month", "day", "day"]
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_dataframe_dtypes(self, cache):
# #13451
df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
# int16
result = to_datetime(df.astype("int16"), cache=cache)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")]
)
tm.assert_series_equal(result, expected)
# mixed dtypes
df["month"] = df["month"].astype("int8")
df["day"] = df["day"].astype("int8")
result = to_datetime(df, cache=cache)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")]
)
tm.assert_series_equal(result, expected)
# float
df = DataFrame({"year": [2000, 2001], "month": [1.5, 1], "day": [1, 1]})
with pytest.raises(ValueError):
to_datetime(df, cache=cache)
def test_dataframe_utc_true(self):
# GH 23760
df = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
result = pd.to_datetime(df, utc=True)
expected = pd.Series(
np.array(["2015-02-04", "2016-03-05"], dtype="datetime64[ns]")
).dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_to_datetime_errors_ignore_utc_true(self):
# GH 23758
result = pd.to_datetime([1], unit="s", utc=True, errors="ignore")
expected = DatetimeIndex(["1970-01-01 00:00:01"], tz="UTC")
tm.assert_index_equal(result, expected)
# TODO: this is moved from tests.series.test_timeseries, may be redundant
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([epoch + t for t in range(20)])
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT])
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
# GH13834
s = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float)
result = to_datetime(s, unit="s")
expected = Series(
[
Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t)
for t in np.arange(0, 2, 0.25)
]
+ [NaT]
)
# GH20455 argument will incur floating point errors but no premature rounding
result = result.round("ms")
tm.assert_series_equal(result, expected)
s = pd.concat(
[Series([epoch + t for t in range(20)]).astype(float), Series([np.nan])],
ignore_index=True,
)
result = to_datetime(s, unit="s")
expected = Series(
[Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)]
+ [NaT]
)
tm.assert_series_equal(result, expected)
result = to_datetime([1, 2, "NaT", pd.NaT, np.nan], unit="D")
expected = DatetimeIndex(
[Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3
)
tm.assert_index_equal(result, expected)
msg = "non convertible value foo with the unit 'D'"
with pytest.raises(ValueError, match=msg):
to_datetime([1, 2, "foo"], unit="D")
msg = "cannot convert input 111111111 with the unit 'D'"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime([1, 2, 111111111], unit="D")
# coerce we can process
expected = DatetimeIndex(
[Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1
)
result = to_datetime([1, 2, "foo"], unit="D", errors="coerce")
tm.assert_index_equal(result, expected)
result = to_datetime([1, 2, 111111111], unit="D", errors="coerce")
tm.assert_index_equal(result, expected)
class TestToDatetimeMisc:
def test_to_datetime_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object)
with pytest.raises(OutOfBoundsDatetime):
to_datetime(arr)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_iso8601(self, cache):
result = to_datetime(["2012-01-01 00:00:00"], cache=cache)
exp = Timestamp("2012-01-01 00:00:00")
assert result[0] == exp
result = to_datetime(["20121001"], cache=cache) # bad iso 8601
exp = Timestamp("2012-10-01")
assert result[0] == exp
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_default(self, cache):
rs = to_datetime("2001", cache=cache)
xp = datetime(2001, 1, 1)
assert rs == xp
# dayfirst is essentially broken
# to_datetime('01-13-2012', dayfirst=True)
# pytest.raises(ValueError, to_datetime('01-13-2012',
# dayfirst=True))
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_on_datetime64_series(self, cache):
# #2699
s = Series(date_range("1/1/2000", periods=10))
result = to_datetime(s, cache=cache)
assert result[0] == s[0]
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_space_in_series(self, cache):
# GH 6428
s = Series(["10/18/2006", "10/18/2008", " "])
msg = r"(\(')?String does not contain a date(:', ' '\))?"
with pytest.raises(ValueError, match=msg):
to_datetime(s, errors="raise", cache=cache)
result_coerce = to_datetime(s, errors="coerce", cache=cache)
expected_coerce = Series([datetime(2006, 10, 18), datetime(2008, 10, 18), NaT])
tm.assert_series_equal(result_coerce, expected_coerce)
result_ignore = to_datetime(s, errors="ignore", cache=cache)
tm.assert_series_equal(result_ignore, s)
@td.skip_if_has_locale
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_with_apply(self, cache):
# this is only locale tested with US/None locales
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(["May 04", "Jun 02", "Dec 11"], index=[1, 2, 3])
expected = pd.to_datetime(td, format="%b %y", cache=cache)
result = td.apply(pd.to_datetime, format="%b %y", cache=cache)
tm.assert_series_equal(result, expected)
td = pd.Series(["May 04", "Jun 02", ""], index=[1, 2, 3])
msg = r"time data '' does not match format '%b %y' \(match\)"
with pytest.raises(ValueError, match=msg):
pd.to_datetime(td, format="%b %y", errors="raise", cache=cache)
with pytest.raises(ValueError, match=msg):
td.apply(pd.to_datetime, format="%b %y", errors="raise", cache=cache)
expected = pd.to_datetime(td, format="%b %y", errors="coerce", cache=cache)
result = td.apply(
lambda x: pd.to_datetime(x, format="%b %y", errors="coerce", cache=cache)
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_types(self, cache):
# empty string
result = to_datetime("", cache=cache)
assert result is NaT
result = to_datetime(["", ""], cache=cache)
assert isna(result).all()
# ints
result = Timestamp(0)
expected = to_datetime(0, cache=cache)
assert result == expected
# GH 3888 (strings)
expected = to_datetime(["2012"], cache=cache)[0]
result = to_datetime("2012", cache=cache)
assert result == expected
# array = ['2012','20120101','20120101 12:01:01']
array = ["20120101", "20120101 12:01:01"]
expected = list(to_datetime(array, cache=cache))
result = [Timestamp(date_str) for date_str in array]
tm.assert_almost_equal(result, expected)
# currently fails ###
# result = Timestamp('2012')
# expected = to_datetime('2012')
# assert result == expected
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_unprocessable_input(self, cache):
# GH 4928
# GH 21864
result = to_datetime([1, "1"], errors="ignore", cache=cache)
expected = Index(np.array([1, "1"], dtype="O"))
tm.assert_equal(result, expected)
msg = "invalid string coercion to datetime"
with pytest.raises(TypeError, match=msg):
to_datetime([1, "1"], errors="raise", cache=cache)
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view("M8[us]")
as_obj = scalar.astype("O")
index = DatetimeIndex([scalar])
assert index[0] == scalar.astype("O")
value = Timestamp(scalar)
assert value == as_obj
def test_to_datetime_list_of_integers(self):
rng = date_range("1/1/2000", periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
tm.assert_index_equal(rng, result)
def test_to_datetime_overflow(self):
# gh-17637
# we are overflowing Timedelta range here
with pytest.raises(OverflowError):
date_range(start="1/1/1700", freq="B", periods=100000)
@pytest.mark.parametrize("cache", [True, False])
def test_string_na_nat_conversion(self, cache):
# GH #999, #858
strings = np.array(
["1/1/2000", "1/2/2000", np.nan, "1/4/2000, 12:34:56"], dtype=object
)
expected = np.empty(4, dtype="M8[ns]")
for i, val in enumerate(strings):
if isna(val):
expected[i] = iNaT
else:
expected[i] = parse(val)
result = tslib.array_to_datetime(strings)[0]
tm.assert_almost_equal(result, expected)
result2 = to_datetime(strings, cache=cache)
assert isinstance(result2, DatetimeIndex)
tm.assert_numpy_array_equal(result, result2.values)
malformed = np.array(["1/100/2000", np.nan], dtype=object)
# GH 10636, default is now 'raise'
msg = r"Unknown string format:|day is out of range for month"
with pytest.raises(ValueError, match=msg):
to_datetime(malformed, errors="raise", cache=cache)
result = to_datetime(malformed, errors="ignore", cache=cache)
# GH 21864
expected = Index(malformed)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError, match=msg):
to_datetime(malformed, errors="raise", cache=cache)
idx = ["a", "b", "c", "d", "e"]
series = Series(
["1/1/2000", np.nan, "1/3/2000", np.nan, "1/5/2000"], index=idx, name="foo"
)
dseries = Series(
[
to_datetime("1/1/2000", cache=cache),
np.nan,
to_datetime("1/3/2000", cache=cache),
np.nan,
to_datetime("1/5/2000", cache=cache),
],
index=idx,
name="foo",
)
result = to_datetime(series, cache=cache)
dresult = to_datetime(dseries, cache=cache)
expected = Series(np.empty(5, dtype="M8[ns]"), index=idx)
for i in range(5):
x = series[i]
if isna(x):
expected[i] = pd.NaT
else:
expected[i] = to_datetime(x, cache=cache)
tm.assert_series_equal(result, expected, check_names=False)
assert result.name == "foo"
tm.assert_series_equal(dresult, expected, check_names=False)
assert dresult.name == "foo"
@pytest.mark.parametrize(
"dtype",
[
"datetime64[h]",
"datetime64[m]",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
@pytest.mark.parametrize("cache", [True, False])
def test_dti_constructor_numpy_timeunits(self, cache, dtype):
# GH 9114
base = pd.to_datetime(
["2000-01-01T00:00", "2000-01-02T00:00", "NaT"], cache=cache
)
values = base.values.astype(dtype)
tm.assert_index_equal(DatetimeIndex(values), base)
tm.assert_index_equal(to_datetime(values, cache=cache), base)
@pytest.mark.parametrize("cache", [True, False])
def test_dayfirst(self, cache):
# GH 5917
arr = ["10/02/2014", "11/02/2014", "12/02/2014"]
expected = DatetimeIndex(
[datetime(2014, 2, 10), datetime(2014, 2, 11), datetime(2014, 2, 12)]
)
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True, cache=cache)
idx4 = to_datetime(np.array(arr), dayfirst=True, cache=cache)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
tm.assert_index_equal(expected, idx1)
tm.assert_index_equal(expected, idx2)
tm.assert_index_equal(expected, idx3)
tm.assert_index_equal(expected, idx4)
tm.assert_index_equal(expected, idx5)
tm.assert_index_equal(expected, idx6)
@pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray])
def test_to_datetime_dta_tz(self, klass):
# GH#27733
dti = date_range("2015-04-05", periods=3).rename("foo")
expected = dti.tz_localize("UTC")
obj = klass(dti)
expected = klass(expected)
result = to_datetime(obj, utc=True)
tm.assert_equal(result, expected)
class TestGuessDatetimeFormat:
@td.skip_if_not_us_locale
def test_guess_datetime_format_for_array(self):
expected_format = "%Y-%m-%d %H:%M:%S.%f"
dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format)
test_arrays = [
np.array([dt_string, dt_string, dt_string], dtype="O"),
np.array([np.nan, np.nan, dt_string], dtype="O"),
np.array([dt_string, "random_string"], dtype="O"),
]
for test_array in test_arrays:
assert tools._guess_datetime_format_for_array(test_array) == expected_format
format_for_string_of_nans = tools._guess_datetime_format_for_array(
np.array([np.nan, np.nan, np.nan], dtype="O")
)
assert format_for_string_of_nans is None
class TestToDatetimeInferFormat:
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_consistent_format(self, cache):
s = pd.Series(pd.date_range("20000101", periods=50, freq="H"))
test_formats = ["%m-%d-%Y", "%m/%d/%Y %H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S.%f"]
for test_format in test_formats:
s_as_dt_strings = s.apply(lambda x: x.strftime(test_format))
with_format = pd.to_datetime(
s_as_dt_strings, format=test_format, cache=cache
)
no_infer = pd.to_datetime(
s_as_dt_strings, infer_datetime_format=False, cache=cache
)
yes_infer = pd.to_datetime(
s_as_dt_strings, infer_datetime_format=True, cache=cache
)
# Whether the format is explicitly passed, it is inferred, or
# it is not inferred, the results should all be the same
tm.assert_series_equal(with_format, no_infer)
tm.assert_series_equal(no_infer, yes_infer)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache):
s = pd.Series(
np.array(
["01/01/2011 00:00:00", "01-02-2011 00:00:00", "2011-01-03T00:00:00"]
)
)
# When the format is inconsistent, infer_datetime_format should just
# fallback to the default parsing
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
s = pd.Series(np.array(["Jan/01/2011", "Feb/01/2011", "Mar/01/2011"]))
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_series_with_nans(self, cache):
s = pd.Series(
np.array(["01/01/2011 00:00:00", np.nan, "01/03/2011 00:00:00", np.nan])
)
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache):
s = pd.Series(
np.array(
[
np.nan,
np.nan,
"01/01/2011 00:00:00",
"01/02/2011 00:00:00",
"01/03/2011 00:00:00",
]
)
)
tm.assert_series_equal(
pd.to_datetime(s, infer_datetime_format=False, cache=cache),
pd.to_datetime(s, infer_datetime_format=True, cache=cache),
)
@pytest.mark.parametrize(
"tz_name, offset", [("UTC", 0), ("UTC-3", 180), ("UTC+3", -180)]
)
def test_infer_datetime_format_tz_name(self, tz_name, offset):
# GH 33133
s = pd.Series([f"2019-02-02 08:07:13 {tz_name}"])
result = to_datetime(s, infer_datetime_format=True)
expected = pd.Series(
[pd.Timestamp("2019-02-02 08:07:13").tz_localize(pytz.FixedOffset(offset))]
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_iso8601_noleading_0s(self, cache):
# GH 11871
s = pd.Series(["2014-1-1", "2014-2-2", "2015-3-3"])
expected = pd.Series(
[
pd.Timestamp("2014-01-01"),
pd.Timestamp("2014-02-02"),
pd.Timestamp("2015-03-03"),
]
)
tm.assert_series_equal(pd.to_datetime(s, cache=cache), expected)
tm.assert_series_equal(
pd.to_datetime(s, format="%Y-%m-%d", cache=cache), expected
)
class TestDaysInMonth:
# tests for issue #10154
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_coerce(self, cache):
assert isna(to_datetime("2015-02-29", errors="coerce", cache=cache))
assert isna(
to_datetime("2015-02-29", format="%Y-%m-%d", errors="coerce", cache=cache)
)
assert isna(
to_datetime("2015-02-32", format="%Y-%m-%d", errors="coerce", cache=cache)
)
assert isna(
to_datetime("2015-04-31", format="%Y-%m-%d", errors="coerce", cache=cache)
)
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_raise(self, cache):
msg = "day is out of range for month"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-29", errors="raise", cache=cache)
msg = "time data 2015-02-29 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-29", errors="raise", format="%Y-%m-%d", cache=cache)
msg = "time data 2015-02-32 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-02-32", errors="raise", format="%Y-%m-%d", cache=cache)
msg = "time data 2015-04-31 doesn't match format specified"
with pytest.raises(ValueError, match=msg):
to_datetime("2015-04-31", errors="raise", format="%Y-%m-%d", cache=cache)
@pytest.mark.parametrize("cache", [True, False])
def test_day_not_in_month_ignore(self, cache):
assert to_datetime("2015-02-29", errors="ignore", cache=cache) == "2015-02-29"
assert (
to_datetime("2015-02-29", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-02-29"
)
assert (
to_datetime("2015-02-32", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-02-32"
)
assert (
to_datetime("2015-04-31", errors="ignore", format="%Y-%m-%d", cache=cache)
== "2015-04-31"
)
class TestDatetimeParsingWrappers:
@pytest.mark.parametrize(
"date_str,expected",
list(
{
"2011-01-01": datetime(2011, 1, 1),
"2Q2005": datetime(2005, 4, 1),
"2Q05": datetime(2005, 4, 1),
"2005Q1": datetime(2005, 1, 1),
"05Q1": datetime(2005, 1, 1),
"2011Q3": datetime(2011, 7, 1),
"11Q3": datetime(2011, 7, 1),
"3Q2011": datetime(2011, 7, 1),
"3Q11": datetime(2011, 7, 1),
# quarterly without space
"2000Q4": datetime(2000, 10, 1),
"00Q4": datetime(2000, 10, 1),
"4Q2000": datetime(2000, 10, 1),
"4Q00": datetime(2000, 10, 1),
"2000q4": datetime(2000, 10, 1),
"2000-Q4": datetime(2000, 10, 1),
"00-Q4": datetime(2000, 10, 1),
"4Q-2000": datetime(2000, 10, 1),
"4Q-00": datetime(2000, 10, 1),
"00q4": datetime(2000, 10, 1),
"2005": datetime(2005, 1, 1),
"2005-11": datetime(2005, 11, 1),
"2005 11": datetime(2005, 11, 1),
"11-2005": datetime(2005, 11, 1),
"11 2005": datetime(2005, 11, 1),
"200511": datetime(2020, 5, 11),
"20051109": datetime(2005, 11, 9),
"20051109 10:15": datetime(2005, 11, 9, 10, 15),
"20051109 08H": datetime(2005, 11, 9, 8, 0),
"2005-11-09 10:15": datetime(2005, 11, 9, 10, 15),
"2005-11-09 08H": datetime(2005, 11, 9, 8, 0),
"2005/11/09 10:15": datetime(2005, 11, 9, 10, 15),
"2005/11/09 08H": datetime(2005, 11, 9, 8, 0),
"Thu Sep 25 10:36:28 2003": datetime(2003, 9, 25, 10, 36, 28),
"Thu Sep 25 2003": datetime(2003, 9, 25),
"Sep 25 2003": datetime(2003, 9, 25),
"January 1 2014": datetime(2014, 1, 1),
# GHE10537
"2014-06": datetime(2014, 6, 1),
"06-2014": datetime(2014, 6, 1),
"2014-6": datetime(2014, 6, 1),
"6-2014": datetime(2014, 6, 1),
"20010101 12": datetime(2001, 1, 1, 12),
"20010101 1234": datetime(2001, 1, 1, 12, 34),
"20010101 123456": datetime(2001, 1, 1, 12, 34, 56),
}.items()
),
)
@pytest.mark.parametrize("cache", [True, False])
def test_parsers(self, date_str, expected, cache):
# dateutil >= 2.5.0 defaults to yearfirst=True
# https://github.com/dateutil/dateutil/issues/217
yearfirst = True
result1, _ = parsing.parse_time_string(date_str, yearfirst=yearfirst)
result2 = to_datetime(date_str, yearfirst=yearfirst)
result3 = to_datetime([date_str], yearfirst=yearfirst)
# result5 is used below
result4 = to_datetime(
np.array([date_str], dtype=object), yearfirst=yearfirst, cache=cache
)
result6 = DatetimeIndex([date_str], yearfirst=yearfirst)
# result7 is used below
result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst)
result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst)
for res in [result1, result2]:
assert res == expected
for res in [result3, result4, result6, result8, result9]:
exp = DatetimeIndex([pd.Timestamp(expected)])
tm.assert_index_equal(res, exp)
# these really need to have yearfirst, but we don't support
if not yearfirst:
result5 = Timestamp(date_str)
assert result5 == expected
result7 = date_range(date_str, freq="S", periods=1, yearfirst=yearfirst)
assert result7 == expected
@pytest.mark.parametrize("cache", [True, False])
def test_na_values_with_cache(
self, cache, unique_nulls_fixture, unique_nulls_fixture2
):
# GH22305
expected = Index([NaT, NaT], dtype="datetime64[ns]")
result = to_datetime([unique_nulls_fixture, unique_nulls_fixture2], cache=cache)
tm.assert_index_equal(result, expected)
def test_parsers_nat(self):
# Test that each of several string-accepting methods return pd.NaT
result1, _ = parsing.parse_time_string("NaT")
result2 = to_datetime("NaT")
result3 = Timestamp("NaT")
result4 = DatetimeIndex(["NaT"])[0]
assert result1 is NaT
assert result2 is NaT
assert result3 is NaT
assert result4 is NaT
@pytest.mark.parametrize("cache", [True, False])
def test_parsers_dayfirst_yearfirst(self, cache):
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# bug fix in 2.5.2
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# revert of bug in 2.5.2
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# str : dayfirst, yearfirst, expected
cases = {
"10-11-12": [
(False, False, datetime(2012, 10, 11)),
(True, False, datetime(2012, 11, 10)),
(False, True, datetime(2010, 11, 12)),
(True, True, datetime(2010, 12, 11)),
],
"20/12/21": [
(False, False, datetime(2021, 12, 20)),
(True, False, datetime(2021, 12, 20)),
(False, True, datetime(2020, 12, 21)),
(True, True, datetime(2020, 12, 21)),
],
}
for date_str, values in cases.items():
for dayfirst, yearfirst, expected in values:
# compare with dateutil result
dateutil_result = parse(
date_str, dayfirst=dayfirst, yearfirst=yearfirst
)
assert dateutil_result == expected
result1, _ = parsing.parse_time_string(
date_str, dayfirst=dayfirst, yearfirst=yearfirst
)
# we don't support dayfirst/yearfirst here:
if not dayfirst and not yearfirst:
result2 = Timestamp(date_str)
assert result2 == expected
result3 = to_datetime(
date_str, dayfirst=dayfirst, yearfirst=yearfirst, cache=cache
)
result4 = DatetimeIndex(
[date_str], dayfirst=dayfirst, yearfirst=yearfirst
)[0]
assert result1 == expected
assert result3 == expected
assert result4 == expected
@pytest.mark.parametrize("cache", [True, False])
def test_parsers_timestring(self, cache):
# must be the same as dateutil result
cases = {
"10:15": (parse("10:15"), datetime(1, 1, 1, 10, 15)),
"9:05": (parse("9:05"), datetime(1, 1, 1, 9, 5)),
}
for date_str, (exp_now, exp_def) in cases.items():
result1, _ = parsing.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = Timestamp(date_str)
result5 = DatetimeIndex([date_str])[0]
# parse time string return time string based on default date
# others are not, and can't be changed because it is used in
# time series plot
assert result1 == exp_def
assert result2 == exp_now
assert result3 == exp_now
assert result4 == exp_now
assert result5 == exp_now
@pytest.mark.parametrize("cache", [True, False])
@pytest.mark.parametrize(
"dt_string, tz, dt_string_repr",
[
(
"2013-01-01 05:45+0545",
pytz.FixedOffset(345),
"Timestamp('2013-01-01 05:45:00+0545', tz='pytz.FixedOffset(345)')",
),
(
"2013-01-01 05:30+0530",
pytz.FixedOffset(330),
"Timestamp('2013-01-01 05:30:00+0530', tz='pytz.FixedOffset(330)')",
),
],
)
def test_parsers_timezone_minute_offsets_roundtrip(
self, cache, dt_string, tz, dt_string_repr
):
# GH11708
base = to_datetime("2013-01-01 00:00:00", cache=cache)
base = base.tz_localize("UTC").tz_convert(tz)
dt_time = to_datetime(dt_string, cache=cache)
assert base == dt_time
assert dt_string_repr == repr(dt_time)
@pytest.fixture(params=["D", "s", "ms", "us", "ns"])
def units(request):
"""Day and some time units.
* D
* s
* ms
* us
* ns
"""
return request.param
@pytest.fixture
def epoch_1960():
"""Timestamp at 1960-01-01."""
return Timestamp("1960-01-01")
@pytest.fixture
def units_from_epochs():
return list(range(5))
@pytest.fixture(params=["timestamp", "pydatetime", "datetime64", "str_1960"])
def epochs(epoch_1960, request):
"""Timestamp at 1960-01-01 in various forms.
* pd.Timestamp
* datetime.datetime
* numpy.datetime64
* str
"""
assert request.param in {"timestamp", "pydatetime", "datetime64", "str_1960"}
if request.param == "timestamp":
return epoch_1960
elif request.param == "pydatetime":
return epoch_1960.to_pydatetime()
elif request.param == "datetime64":
return epoch_1960.to_datetime64()
else:
return str(epoch_1960)
@pytest.fixture
def julian_dates():
return pd.date_range("2014-1-1", periods=10).to_julian_date().values
class TestOrigin:
def test_to_basic(self, julian_dates):
# gh-11276, gh-11745
# for origin as julian
result = Series(pd.to_datetime(julian_dates, unit="D", origin="julian"))
expected = Series(
pd.to_datetime(julian_dates - pd.Timestamp(0).to_julian_date(), unit="D")
)
tm.assert_series_equal(result, expected)
result = Series(pd.to_datetime([0, 1, 2], unit="D", origin="unix"))
expected = Series(
[Timestamp("1970-01-01"), Timestamp("1970-01-02"), Timestamp("1970-01-03")]
)
tm.assert_series_equal(result, expected)
# default
result = Series(pd.to_datetime([0, 1, 2], unit="D"))
expected = Series(
[Timestamp("1970-01-01"), Timestamp("1970-01-02"), Timestamp("1970-01-03")]
)
tm.assert_series_equal(result, expected)
def test_julian_round_trip(self):
result = pd.to_datetime(2456658, origin="julian", unit="D")
assert result.to_julian_date() == 2456658
# out-of-bounds
with pytest.raises(ValueError):
pd.to_datetime(1, origin="julian", unit="D")
def test_invalid_unit(self, units, julian_dates):
# checking for invalid combination of origin='julian' and unit != D
if units != "D":
with pytest.raises(ValueError):
pd.to_datetime(julian_dates, unit=units, origin="julian")
def test_invalid_origin(self):
# need to have a numeric specified
with pytest.raises(ValueError):
pd.to_datetime("2005-01-01", origin="1960-01-01")
with pytest.raises(ValueError):
pd.to_datetime("2005-01-01", origin="1960-01-01", unit="D")
def test_epoch(self, units, epochs, epoch_1960, units_from_epochs):
expected = Series(
[pd.Timedelta(x, unit=units) + epoch_1960 for x in units_from_epochs]
)
result = Series(pd.to_datetime(units_from_epochs, unit=units, origin=epochs))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"origin, exc",
[
("random_string", ValueError),
("epoch", ValueError),
("13-24-1990", ValueError),
(datetime(1, 1, 1), tslib.OutOfBoundsDatetime),
],
)
def test_invalid_origins(self, origin, exc, units, units_from_epochs):
with pytest.raises(exc):
pd.to_datetime(units_from_epochs, unit=units, origin=origin)
def test_invalid_origins_tzinfo(self):
# GH16842
with pytest.raises(ValueError):
pd.to_datetime(1, unit="D", origin=datetime(2000, 1, 1, tzinfo=pytz.utc))
@pytest.mark.parametrize("format", [None, "%Y-%m-%d %H:%M:%S"])
def test_to_datetime_out_of_bounds_with_format_arg(self, format):
# see gh-23830
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
to_datetime("2417-10-27 00:00:00", format=format)
def test_processing_order(self):
# make sure we handle out-of-bounds *before*
# constructing the dates
result = pd.to_datetime(200 * 365, unit="D")
expected = Timestamp("2169-11-13 00:00:00")
assert result == expected
result = pd.to_datetime(200 * 365, unit="D", origin="1870-01-01")
expected = Timestamp("2069-11-13 00:00:00")
assert result == expected
result = pd.to_datetime(300 * 365, unit="D", origin="1870-01-01")
expected = Timestamp("2169-10-20 00:00:00")
assert result == expected
@pytest.mark.parametrize(
"offset,utc,exp",
[
["Z", True, "2019-01-01T00:00:00.000Z"],
["Z", None, "2019-01-01T00:00:00.000Z"],
["-01:00", True, "2019-01-01T01:00:00.000Z"],
["-01:00", None, "2019-01-01T00:00:00.000-01:00"],
],
)
def test_arg_tz_ns_unit(self, offset, utc, exp):
# GH 25546
arg = "2019-01-01T00:00:00.000" + offset
result = to_datetime([arg], unit="ns", utc=utc)
expected = to_datetime([exp])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"listlike,do_caching",
[([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], False), ([1, 1, 1, 1, 4, 5, 6, 7, 8, 9], True)],
)
def test_should_cache(listlike, do_caching):
assert (
tools.should_cache(listlike, check_count=len(listlike), unique_share=0.7)
== do_caching
)
@pytest.mark.parametrize(
"unique_share,check_count, err_message",
[
(0.5, 11, r"check_count must be in next bounds: \[0; len\(arg\)\]"),
(10, 2, r"unique_share must be in next bounds: \(0; 1\)"),
],
)
def test_should_cache_errors(unique_share, check_count, err_message):
arg = [5] * 10
with pytest.raises(AssertionError, match=err_message):
tools.should_cache(arg, unique_share, check_count)
def test_nullable_integer_to_datetime():
# Test for #30050
ser = pd.Series([1, 2, None, 2 ** 61, None])
ser = ser.astype("Int64")
ser_copy = ser.copy()
res = pd.to_datetime(ser, unit="ns")
expected = pd.Series(
[
np.datetime64("1970-01-01 00:00:00.000000001"),
np.datetime64("1970-01-01 00:00:00.000000002"),
np.datetime64("NaT"),
np.datetime64("2043-01-25 23:56:49.213693952"),
np.datetime64("NaT"),
]
)
tm.assert_series_equal(res, expected)
# Check that ser isn't mutated
tm.assert_series_equal(ser, ser_copy)
@pytest.mark.parametrize("klass", [np.array, list])
def test_na_to_datetime(nulls_fixture, klass):
result = pd.to_datetime(klass([nulls_fixture]))
assert result[0] is pd.NaT
| bsd-3-clause |
TobyRoseman/SFrame | oss_src/unity/python/sframe/test/test_sframe.py | 5 | 134915 | '''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
# from nose import with_setup
from ..data_structures.sframe import SFrame
from ..data_structures.sarray import SArray
from ..data_structures.image import Image
from ..connect import main as glconnect
from ..util import _assert_sframe_equal, generate_random_sframe
from .. import _launch, load_sframe, aggregate
from . import util
import pandas as pd
from ..util.timezone import GMT
from pandas.util.testing import assert_frame_equal
import unittest
import datetime as dt
import tempfile
import os
import csv
import gzip
import string
import time
import numpy as np
import array
import math
import random
import shutil
import functools
import sys
import mock
import sqlite3
from .dbapi2_mock import dbapi2_mock
HAS_PYSPARK = True
try:
from pyspark import SparkContext, SQLContext
except:
HAS_PYSPARK = False
#######################################################
# Metrics tracking tests are in test_usage_metrics.py #
#######################################################
# Taken from http://stackoverflow.com/questions/1151658/python-hashable-dicts
# by Alex Martelli
class hashabledict(dict):
def __key(self):
return tuple((k,self[k]) for k in sorted(self))
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
return self.__key() == other.__key()
class SFrameTest(unittest.TestCase):
def setUp(self):
self.int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.float_data = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
self.string_data = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
self.a_to_z = [str(chr(97 + i)) for i in range(0, 26)]
self.dataframe = pd.DataFrame({'int_data': self.int_data, 'float_data': self.float_data, 'string_data': self.string_data})
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
self.int_data2 = range(50,60)
self.float_data2 = [1.0 * i for i in range(50,60)]
self.string_data2 = [str(i) for i in range(50,60)]
self.dataframe2 = pd.DataFrame({'int_data': self.int_data2, 'float_data': self.float_data2, 'string_data': self.string_data2})
self.vec_data = [array.array('d', [i, i+1]) for i in self.int_data]
self.list_data = [[i, str(i), i * 1.0] for i in self.int_data]
self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
self.datetime_data = [dt.datetime(2013, 5, 7, 10, 4, 10),
dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(0.0))]
self.all_type_cols = [self.int_data,
self.float_data,
self.string_data,
self.vec_data,
self.list_data,
self.dict_data,
self.datetime_data*5]
self.sf_all_types = SFrame({"X"+str(i[0]):i[1] for i in zip(range(1,8),
self.all_type_cols)})
# Taken from http://en.wikipedia.org/wiki/Join_(SQL) for fun.
self.employees_sf = SFrame()
self.employees_sf.add_column(SArray(['Rafferty','Jones','Heisenberg','Robinson','Smith','John']), 'last_name')
self.employees_sf.add_column(SArray([31,33,33,34,34,None]), 'dep_id')
# XXX: below are only used by one test!
self.departments_sf = SFrame()
self.departments_sf.add_column(SArray([31,33,34,35]), 'dep_id')
self.departments_sf.add_column(SArray(['Sales','Engineering','Clerical','Marketing']), 'dep_name')
def __assert_sarray_equal(self, sa1, sa2):
l1 = list(sa1)
l2 = list(sa2)
self.assertEquals(len(l1), len(l2))
for i in range(len(l1)):
v1 = l1[i]
v2 = l2[i]
if v1 == None:
self.assertEqual(v2, None)
else:
if type(v1) == dict:
self.assertEquals(len(v1), len(v2))
for key in v1:
self.assertTrue(key in v1)
self.assertEqual(v1[key], v2[key])
elif (hasattr(v1, "__iter__")):
self.assertEquals(len(v1), len(v2))
for j in range(len(v1)):
t1 = v1[j]; t2 = v2[j]
if (type(t1) == float):
if (math.isnan(t1)):
self.assertTrue(math.isnan(t2))
else:
self.assertEquals(t1, t2)
else:
self.assertEquals(t1, t2)
else:
self.assertEquals(v1, v2)
def test_split_datetime(self):
from_zone = GMT(0)
to_zone = GMT(4.5)
utc = dt.datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')
utc = utc.replace(tzinfo=from_zone)
central = utc.astimezone(to_zone)
sa = SArray([utc,central])
expected = SFrame()
expected ['X.year'] = [2011,2011]
expected ['X.month'] = [1,1]
expected ['X.day'] = [21,21]
expected ['X.hour'] = [2,7]
expected ['X.minute'] = [37,7]
expected ['X.second'] = [21,21]
expected ['X.tzone'] = [0.0,4.5]
result = sa.split_datetime(tzone=True)
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column names
expected = SFrame()
expected ['ttt.year'] = [2011,2011]
expected ['ttt.minute'] = [37,7]
expected ['ttt.second'] = [21,21]
result = sa.split_datetime(column_name_prefix='ttt',limit=['year','minute','second']);
self.assertEqual(result.column_names(), ['ttt.year', 'ttt.minute', 'ttt.second'])
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
sf = SFrame({'datetime': sa})
result = sf.split_datetime('datetime', column_name_prefix='ttt',limit=['year','minute','second']);
self.assertEqual(result.column_names(), ['ttt.year', 'ttt.minute', 'ttt.second'])
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
def __test_equal(self, sf, df):
self.assertEquals(sf.num_rows(), df.shape[0])
self.assertEquals(sf.num_cols(), df.shape[1])
assert_frame_equal(sf.to_dataframe(), df)
def __create_test_df(self, size):
int_data = []
float_data = []
string_data = []
for i in range(0,size):
int_data.append(i)
float_data.append(float(i))
string_data.append(str(i))
return pd.DataFrame({'int_data': int_data,
'float_data': float_data,
'string_data': string_data})
# Test if the rows are all the same...row order does not matter.
# (I do expect column order to be the same)
def __assert_join_results_equal(self, sf, expected_sf):
_assert_sframe_equal(sf, expected_sf, check_row_order=False)
def test_creation_from_dataframe(self):
# created from empty dataframe
sf_empty = SFrame(data=pd.DataFrame())
self.__test_equal(sf_empty, pd.DataFrame())
sf = SFrame(data=self.dataframe, format='dataframe')
self.__test_equal(sf, self.dataframe)
sf = SFrame(data=self.dataframe, format='auto')
self.__test_equal(sf, self.dataframe)
original_p = pd.DataFrame({'a':[1.0, float('nan')]})
effective_p = pd.DataFrame({'a':[1.0, None]})
sf = SFrame(data=original_p)
self.__test_equal(sf, effective_p)
original_p = pd.DataFrame({'a':['a',None,'b']})
sf = SFrame(data=original_p)
self.__test_equal(sf, original_p)
def test_auto_parse_csv(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as csvfile:
df = pd.DataFrame({'float_data': self.float_data,
'int_data': self.int_data,
'string_data': self.a_to_z[:len(self.int_data)]})
df.to_csv(csvfile, index=False)
csvfile.close()
sf = SFrame.read_csv(csvfile.name, header=True)
self.assertEqual(sf.dtype(), [float, int, str])
self.__test_equal(sf, df)
def test_parse_csv(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as csvfile:
self.dataframe.to_csv(csvfile, index=False)
csvfile.close()
# list type hints
sf = SFrame.read_csv(csvfile.name,
column_type_hints=[int, int, str])
self.assertEqual(sf.dtype(), [int, int, str])
sf['int_data'] = sf['int_data'].astype(int)
sf['float_data'] = sf['float_data'].astype(float)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, self.dataframe)
# list type hints, incorrect number of columns
self.assertRaises(RuntimeError,
lambda: SFrame.read_csv(csvfile.name,
column_type_hints=[int, float]))
# dictionary type hints
sf = SFrame.read_csv(csvfile.name,
column_type_hints={'int_data': int,
'float_data': float,
'string_data': str})
self.__test_equal(sf, self.dataframe)
# partial dictionary type hints
sf = SFrame.read_csv(csvfile.name,
column_type_hints={'float_data': float,
'string_data': str})
self.__test_equal(sf, self.dataframe)
# single value type hints
sf = SFrame.read_csv(csvfile.name, column_type_hints=str)
self.assertEqual(sf.dtype(), [str, str, str])
all_string_column_df = self.dataframe.apply(lambda x: [str(ele) for ele in x])
self.__test_equal(sf, all_string_column_df)
# single value type hints row limit
sf = SFrame.read_csv(csvfile.name, column_type_hints=str, nrows=5)
self.assertEqual(sf.dtype(), [str, str, str])
all_string_column_df = self.dataframe.apply(lambda x: [str(ele) for ele in x])
self.assertEqual(len(sf), 5)
self.__test_equal(sf, all_string_column_df[0:len(sf)])
sf = SFrame.read_csv(csvfile.name)
sf2 = SFrame(csvfile.name, format='csv')
self.__test_equal(sf2, sf.to_dataframe())
f = open(csvfile.name, "w")
f.write('a,b,c\n')
f.write('NA,PIKA,CHU\n')
f.write('1.0,2,3\n')
f.close()
sf = SFrame.read_csv(csvfile.name,
na_values=['NA','PIKA','CHU'],
column_type_hints={'a':float,'b':int,'c':str})
t = list(sf['a'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], 1.0)
t = list(sf['b'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], 2)
t = list(sf['c'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], "3")
def test_save_load_file_cleanup(self):
# when some file is in use, file should not be deleted
with util.TempDirectory() as f:
sf = SFrame()
sf['a'] = SArray(range(1,1000000))
sf.save(f)
# many for each sarray, 1 sframe_idx, 1 object.bin, 1 ini
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
# sf1 now references the on disk file
sf1 = SFrame(f);
# create another SFrame and save to the same location
sf2 = SFrame()
sf2['b'] = SArray([str(i) for i in range(1,100000)])
sf2['c'] = SArray(range(1, 100000))
sf2.save(f)
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
# now sf1 should still be accessible
self.__test_equal(sf1, sf.to_dataframe())
# and sf2 is correct too
sf3 = SFrame(f)
self.__test_equal(sf3, sf2.to_dataframe())
# when sf1 goes out of scope, the tmp files should be gone
sf1 = 1
time.sleep(1) # give time for the files being deleted
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
def test_save_load(self):
# Check top level load function, with no suffix
with util.TempDirectory() as f:
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f)
sf2 = load_sframe(f)
self.__test_equal(sf2, self.dataframe)
# Check individual formats with the SFrame constructor
formats = ['.csv']
for suffix in formats:
f = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f.name)
sf2 = SFrame(f.name)
sf2['int_data'] = sf2['int_data'].astype(int)
sf2['float_data'] = sf2['float_data'].astype(float)
sf2['string_data'] = sf2['string_data'].astype(str)
self.__test_equal(sf2, self.dataframe)
g=SArray([['a','b',3],[{'a':'b'}],[1,2,3]])
g2=SFrame()
g2['x']=g
g2.save(f.name)
g3=SFrame.read_csv(f.name,column_type_hints=list)
self.__test_equal(g2, g3.to_dataframe())
f.close()
os.unlink(f.name)
# Make sure this file don't exist before testing
self.assertRaises(IOError, lambda: SFrame(data='__no_such_file__.frame_idx', format='sframe'))
if sys.platform != 'win32':
# Bad permission
test_dir = 'test_dir'
if os.path.exists(test_dir):
os.removedirs(test_dir)
os.makedirs(test_dir, mode=0000)
with self.assertRaises(IOError):
sf.save(os.path.join(test_dir, 'bad.frame_idx'))
# Permissions will affect this test first, so no need
# to write something here
with self.assertRaises(IOError):
sf2 = SFrame(os.path.join(test_dir, 'bad.frame_idx'))
# cleanup
os.removedirs(test_dir)
del sf2
def test_save_load_reference(self):
# Check top level load function, with no suffix
with util.TempDirectory() as f:
sf = SFrame(data=self.dataframe, format='dataframe')
originallen = len(sf)
sf.save(f)
del sf
sf = SFrame(f)
# make a new column of "1s and save it back
int_data2 = sf['int_data'] + 1
int_data2.__materialize__()
sf['int_data2'] = int_data2
sf._save_reference(f)
del sf
sf = SFrame(f)
self.assertTrue(((sf['int_data2'] - sf['int_data']) == 1).all())
# try to append and save reference
expected = sf.to_dataframe()
sf = sf.append(sf)
sf._save_reference(f)
sf = SFrame(f)
self.assertTrue(((sf['int_data2'] - sf['int_data']) == 1).all())
self.assertEquals(2 * originallen, len(sf))
assert_frame_equal(sf[originallen:].to_dataframe(), expected)
assert_frame_equal(sf[:originallen].to_dataframe(), expected)
def test_save_to_csv(self):
f = tempfile.NamedTemporaryFile(suffix='.csv', delete=False)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f.name, format='csv')
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str})
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':')
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':')
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n')
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n')
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False)
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
self.__test_equal(sf2, self.dataframe)
import csv
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'', quote_level=csv.QUOTE_MINIMAL)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'', quote_level=csv.QUOTE_ALL)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'', quote_level=csv.QUOTE_NONE)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
self.__test_equal(sf2, self.dataframe)
# Pandas compatibility options
sf.export_csv(f.name, sep=':', lineterminator='\r\n', doublequote=False, quotechar='\'', quote_level=csv.QUOTE_NONE)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, sep=':', lineterminator='\r\n', doublequote=False, quotechar='\'')
self.__test_equal(sf2, self.dataframe)
f.close()
os.unlink(f.name)
def test_save_to_json(self):
f = tempfile.NamedTemporaryFile(suffix='.json', delete=False)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f.name, format='json')
sf2 = SFrame.read_json(f.name)
# the float column will be parsed as integer
sf2['float_data'] = sf2['float_data'].astype(float)
self.__test_equal(sf2, self.dataframe)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.export_json(f.name)
sf2 = SFrame.read_json(f.name)
sf2['float_data'] = sf2['float_data'].astype(float)
self.__test_equal(sf2, self.dataframe)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.export_json(f.name, orient='lines')
sf2 = SFrame.read_json(f.name, orient='lines')
sf2['float_data'] = sf2['float_data'].astype(float)
self.__test_equal(sf2, self.dataframe)
f.close()
os.unlink(f.name)
def _remove_sframe_files(self, prefix):
filelist = [ f for f in os.listdir(".") if f.startswith(prefix) ]
for f in filelist:
os.remove(f)
def test_creation_from_txt(self):
f = tempfile.NamedTemporaryFile(suffix='.txt', delete=False)
df = self.dataframe[['string_data']]
df.to_csv(f.name, index=False)
sf = SFrame(f.name)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, df)
fgzip = tempfile.NamedTemporaryFile(suffix='.txt.gz', delete=False)
f_in = open(f.name, 'rb')
f_out = gzip.open(fgzip.name, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
sf = SFrame(fgzip.name)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, df)
fgzip.close()
os.unlink(fgzip.name)
f.close()
os.unlink(f.name)
def test_creation_from_csv_on_local(self):
if os.path.exists('./foo.csv'):
os.remove('./foo.csv')
with open('./foo.csv', 'w') as f:
url = f.name
basesf = SFrame(self.dataframe)
basesf.save(url, format="csv")
f.close()
sf = SFrame('./foo.csv')
self.assertEquals(sf['float_data'].dtype(), int)
sf['float_data'] = sf['float_data'].astype(float)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, self.dataframe)
sf = SFrame(url)
self.assertEquals(sf['float_data'].dtype(), int)
sf['float_data'] = sf['float_data'].astype(float)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, self.dataframe)
os.remove(url)
def test_alternate_line_endings(self):
# test Windows line endings
if os.path.exists('./windows_lines.csv'):
os.remove('./windows_lines.csv')
windows_file_url = None
with open('./windows_lines.csv', 'w') as f:
windows_file_url = f.name
def_writer = csv.writer(f, dialect='excel')
column_list = ['numbers']
def_writer.writerow(column_list)
for i in self.int_data:
def_writer.writerow([i])
sf = SFrame.read_csv('./windows_lines.csv', column_type_hints={'numbers':int})
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.column_types(), [int])
self.assertEquals(list(sf['numbers'].head()), self.int_data)
sf = SFrame.read_csv('./windows_lines.csv', column_type_hints={'numbers':list}, error_bad_lines=False)
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.num_rows(), 0)
os.remove(windows_file_url)
def test_skip_rows(self):
# test line skippng
if os.path.exists('./skip_lines.csv'):
os.remove('./skip_lines.csv')
skip_file_url = None
with open('./skip_lines.csv', 'w') as f:
f.write("trash\n");
f.write("junk\n");
skip_file_url = f.name
def_writer = csv.writer(f, dialect='excel')
column_list = ['numbers']
def_writer.writerow(column_list)
for i in self.int_data:
def_writer.writerow([i])
sf = SFrame.read_csv('./skip_lines.csv', skiprows=2, column_type_hints={'numbers':int})
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.column_types(), [int])
self.assertEquals(list(sf['numbers'].head()), self.int_data)
sf = SFrame.read_csv('./skip_lines.csv', skiprows=2, column_type_hints={'numbers':list}, error_bad_lines=False)
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.num_rows(), 0)
os.remove(skip_file_url)
def test_creation_from_csv_on_http(self):
pass
# sf = SFrame(data=self.url, use_header=False)
# self.__test_equal(sf, pd.DataFrame({'1': self.a_to_z}))
def test_creation_from_csv_on_s3(self):
# Requires s3 account for jenkins
# sf = SFrame(data='s3://graphlab-testdata/foo.csv')
# print sf.head(sf.num_rows())
pass
def test_creation_from_csv_dir_local(self):
csv_dir = "./csv_dir"
if os.path.exists(csv_dir):
shutil.rmtree(csv_dir)
os.mkdir(csv_dir)
for i in range(0, 100):
with open(os.path.join(csv_dir, 'foo.%d.csv' % i), 'w') as f:
url = f.name
self.dataframe.to_csv(url, index=False)
f.close()
singleton_sf = SFrame.read_csv(os.path.join(csv_dir, "foo.0.csv"))
self.assertEquals(singleton_sf.num_rows(), 10)
many_sf = SFrame.read_csv(csv_dir)
self.assertEquals(many_sf.num_rows(), 1000)
glob_sf = SFrame.read_csv(os.path.join(csv_dir, "foo.*2.csv"))
self.assertEquals(glob_sf.num_rows(), 100)
with self.assertRaises(RuntimeError):
SFrame.read_csv("missingdirectory")
with self.assertRaises(ValueError):
SFrame.read_csv("")
shutil.rmtree(csv_dir)
def test_creation_from_iterable(self):
# Normal dict of lists
the_dict = {'ints':self.int_data,'floats':self.float_data,'strings':self.string_data}
sf = SFrame(the_dict)
df = pd.DataFrame(the_dict)
self.__test_equal(sf, df)
# Test that a missing value does not change the data type
the_dict['ints'][0] = None
sf = SFrame(the_dict)
self.assertEquals(sf['ints'].dtype(), int)
# numpy.nan is actually a float, so it should cast the column to float
the_dict['ints'][0] = np.nan
sf = SFrame(the_dict)
self.assertEquals(sf['ints'].dtype(), float)
# Just a single list
sf = SFrame(self.int_data)
df = pd.DataFrame(self.int_data)
df.columns = ['X1']
self.__test_equal(sf, df)
# Normal list of lists
list_of_lists = [[1.0,2.0,3.0],[4.0,5.0,6.0],[7.0,8.0,9.0]]
sf = SFrame(list_of_lists)
cntr = 0
for i in sf:
self.assertEquals(list_of_lists[cntr], list(i['X1']))
cntr += 1
self.assertEquals(sf.num_columns(), 1)
the_dict = {'ints':self.int_data,'floats':self.float_data,'strings':self.string_data}
sf = SFrame(the_dict)
sf2 = SFrame({'ints':sf['ints'],'floats':sf['floats'],'strings':sf['strings']})
df = pd.DataFrame(the_dict)
self.__test_equal(sf2, df)
sf2 = SFrame([sf['ints'],sf['floats'],sf['strings']])
self.assertEquals(['X1','X2','X3'],sf2.column_names())
sf2.rename({'X1':'ints','X2':'floats','X3':'strings'})
sf2=sf2[['floats','ints','strings']]
self.__test_equal(sf2, df)
sf = SFrame({'text': ('foo', 'bar', 'biz')})
df = pd.DataFrame({'text': ['foo', 'bar', 'biz']})
self.__test_equal(sf, df)
def test_head_tail(self):
sf = SFrame(data=self.dataframe)
assert_frame_equal(sf.head(4).to_dataframe(), self.dataframe.head(4))
# Cannot test for equality the same way because of dataframe indices
taildf = sf.tail(4)
for i in range(0, 4):
self.assertEqual(taildf['int_data'][i], self.dataframe['int_data'][i+6])
self.assertEqual(taildf['float_data'][i], self.dataframe['float_data'][i+6])
self.assertEqual(taildf['string_data'][i], self.dataframe['string_data'][i+6])
def test_head_tail_edge_case(self):
sf = SFrame()
self.assertEquals(sf.head().num_columns(), 0)
self.assertEquals(sf.tail().num_columns(), 0)
self.assertEquals(sf.head().num_rows(), 0)
self.assertEquals(sf.tail().num_rows(), 0)
sf = SFrame()
sf['a'] = []
self.assertEquals(sf.head().num_columns(), 1)
self.assertEquals(sf.tail().num_columns(), 1)
self.assertEquals(sf.head().num_rows(), 0)
self.assertEquals(sf.tail().num_rows(), 0)
def test_transform(self):
sf = SFrame(data=self.dataframe)
for i in range(sf.num_cols()):
colname = sf.column_names()[i]
sa = sf.apply(lambda x: x[colname], sf.column_types()[i])
self.__assert_sarray_equal(sa, sf[sf.column_names()[i]])
sa = sf.apply(lambda x: x['int_data'] + x['float_data'], float)
self.__assert_sarray_equal(sf['int_data'] + sf['float_data'], sa)
def test_transform_with_recursion(self):
sf = SFrame(data={'a':[0,1,2,3,4], 'b':['0','1','2','3','4']})
# this should be the equivalent to sf.apply(lambda x:x since a is
# equivalent to range(4)
sa = sf.apply(lambda x: sf[x['a']])
sb = sf.apply(lambda x: x)
self.__assert_sarray_equal(sa, sb)
def test_transform_with_type_inference(self):
sf = SFrame(data=self.dataframe)
for i in range(sf.num_cols()):
colname = sf.column_names()[i]
sa = sf.apply(lambda x: x[colname])
self.__assert_sarray_equal(sa, sf[sf.column_names()[i]])
sa = sf.apply(lambda x: x['int_data'] + x['float_data'])
self.__assert_sarray_equal(sf['int_data'] + sf['float_data'], sa)
# SFrame apply returns list of vector of numeric should be vector, not list
sa = sf.apply(lambda x: [x['int_data'], x['float_data']])
self.assertEqual(sa.dtype(), array.array);
def test_transform_with_exception(self):
sf = SFrame(data=self.dataframe)
self.assertRaises(KeyError, lambda: sf.apply(lambda x: x['some random key'])) # cannot find the key
self.assertRaises(TypeError, lambda: sf.apply(lambda x: sum(x.values()))) # lambda cannot sum int and str
self.assertRaises(ZeroDivisionError, lambda: sf.apply(lambda x: x['int_data'] / 0)) # divide by 0 error
self.assertRaises(IndexError, lambda: sf.apply(lambda x: list(x.values())[10])) # index out of bound error
def test_empty_transform(self):
sf = SFrame()
b = sf.apply(lambda x:x)
self.assertEquals(len(b.head()), 0)
def test_flatmap(self):
# Correctness of typical usage
n = 10
sf = SFrame({'id': range(n)})
new_sf = sf.flat_map(["id_range"], lambda x: [[str(i)] for i in range(x['id'])])
self.assertEqual(new_sf.column_names(), ["id_range"])
self.assertEqual(new_sf.column_types(), [str])
expected_col = [str(x) for i in range(n) for x in range(i)]
self.assertListEqual(list(new_sf['id_range']), expected_col)
# Empty SFrame, without explicit column types
sf = SFrame()
with self.assertRaises(TypeError):
new_sf = sf.flat_map(['id_range'],
lambda x: [[i] for i in range(x['id'])])
# Empty rows successfully removed
sf = SFrame({'id': range(15)})
new_sf = sf.flat_map(['id'],
lambda x: [[x['id']]] if x['id'] > 8 else [])
self.assertEqual(new_sf.num_rows(), 6)
# First ten rows are empty raises error
with self.assertRaises(TypeError):
new_sf = sf.flat_map(['id'],
lambda x: [[x['id']]] if x['id'] > 9 else [])
def test_select_column(self):
sf = SFrame(data=self.dataframe)
sub_sf = sf.select_columns(['int_data', 'string_data'])
exp_df = pd.DataFrame({'int_data': self.int_data, 'string_data': self.string_data})
self.__test_equal(sub_sf, exp_df)
with self.assertRaises(ValueError):
sf.select_columns(['int_data', 'string_data', 'int_data'])
# test indexing
sub_col = sf['float_data']
self.assertEqual(list(sub_col.head(10)), self.float_data)
with self.assertRaises(TypeError):
sub_sf = sf.select_columns(['duh',1])
with self.assertRaises(TypeError):
sub_sf = sf.select_columns(0)
with self.assertRaises(RuntimeError):
sub_sf = sf.select_columns(['not_a_column'])
self.assertEqual(sf.select_columns([int]).column_names(), ['int_data'])
self.assertEqual(sf.select_columns([int, str]).column_names(), ['int_data', 'string_data'])
self.assertEqual(sf[int].column_names(), ['int_data'])
self.assertEqual(sf[[int, str]].column_names(), ['int_data', 'string_data'])
self.assertEqual(sf[int, str].column_names(), ['int_data', 'string_data'])
self.assertEqual(sf['int_data', 'string_data'].column_names(), ['int_data', 'string_data'])
self.assertEqual(sf['string_data', 'int_data'].column_names(), ['string_data', 'int_data'])
sf = SFrame()
with self.assertRaises(RuntimeError):
sf.select_column('x')
with self.assertRaises(RuntimeError):
sf.select_columns(['x'])
sf.add_column(SArray(), 'x')
# does not throw
sf.select_column('x')
sf.select_columns(['x'])
with self.assertRaises(RuntimeError):
sf.select_column('y')
with self.assertRaises(RuntimeError):
sf.select_columns(['y'])
def test_topk(self):
sf = SFrame(data=self.dataframe)
# Test that order is preserved
df2 = sf.topk('int_data').to_dataframe()
df2_expected = self.dataframe.sort('int_data', ascending=False)
df2_expected.index = range(df2.shape[0])
assert_frame_equal(df2, df2_expected)
df2 = sf.topk('float_data', 3).to_dataframe()
df2_expected = self.dataframe.sort('float_data', ascending=False).head(3)
df2_expected.index = range(3)
assert_frame_equal(df2, df2_expected)
df2 = sf.topk('string_data', 3).to_dataframe()
for i in range(0, 3):
self.assertEqual(df2['int_data'][2-i], i + 7)
with self.assertRaises(TypeError):
sf.topk(2,3)
sf = SFrame()
sf.add_column(SArray([1,2,3,4,5]), 'a')
sf.add_column(SArray([1,2,3,4,5]), 'b')
sf.topk('a', 1) # should not fail
def test_filter(self):
sf = SFrame(data=self.dataframe)
filter_sa = SArray([1,1,1,0,0,0,0,1,1,1])
sf2 = sf[filter_sa]
exp_df = sf.head(3).append(sf.tail(3))
self.__test_equal(sf2, exp_df.to_dataframe())
# filter by 1s
sf2 = sf[SArray(self.int_data)]
exp_df = sf.head(10).to_dataframe()
self.__test_equal(sf2, exp_df)
# filter by 0s
sf2 = sf[SArray([0,0,0,0,0,0,0,0,0,0])]
exp_df = sf.head(0).to_dataframe()
self.__test_equal(sf2, exp_df)
# wrong size
with self.assertRaises(IndexError):
sf2 = sf[SArray([0,1,205])]
# slightly bigger size
sf = SFrame()
n = 1000000
sf['a'] = range(n)
result = sf[sf['a'] == -1]
self.assertEquals(len(result), 0)
result = sf[sf['a'] > n - 123]
self.assertEquals(len(result), 122)
l = list(result['a'])
for i in range(len(result)):
self.assertEquals(i + n - 122, l[i])
result = sf[sf['a'] < 2000]
self.assertEquals(len(result), 2000)
l = list(result['a'])
for i in range(len(result)):
self.assertEquals(i, l[i])
def test_sample_split(self):
sf = SFrame(data=self.__create_test_df(100))
entry_list = set()
for i in sf:
entry_list.add(str(i))
sample_sf = sf.sample(.12, 9)
sample_sf2 = sf.sample(.12, 9)
self.assertEqual(len(sample_sf), len(sample_sf2))
assert_frame_equal(sample_sf.head().to_dataframe(), sample_sf2.head().to_dataframe())
for i in sample_sf:
self.assertTrue(str(i) in entry_list)
with self.assertRaises(ValueError):
sf.sample(3)
sample_sf = SFrame().sample(.12, 9)
self.assertEqual(len(sample_sf), 0)
a_split = sf.random_split(.12, 9)
first_split_entries = set()
for i in a_split[0]:
first_split_entries.add(str(i))
for i in a_split[1]:
self.assertTrue(str(i) in entry_list)
self.assertTrue(str(i) not in first_split_entries)
with self.assertRaises(ValueError):
sf.random_split(3)
self.assertEqual(len(SFrame().random_split(.4)[0]), 0)
self.assertEqual(len(SFrame().random_split(.4)[1]), 0)
# tests add_column, rename
def test_edit_column_ops(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
# Make sure auto names work
names = sf.column_names()
cntr = 1
for i in names:
self.assertEquals("X"+str(cntr), i)
cntr = cntr + 1
# Remove a column
del sf['X2']
# names
names = sf.column_names()
self.assertEquals(len(names), 2)
self.assertEquals('X1', names[0])
self.assertEquals('X3', names[1])
# check content
self.assertEquals(list(sf['X1'].head(10)), self.int_data)
self.assertEquals(list(sf['X3'].head(10)), self.string_data)
# check that a new automatically named column will not conflict
sf.add_column(SArray(self.string_data))
names = sf.column_names()
self.assertEquals(len(names), 3)
uniq_set = set()
for i in names:
uniq_set.add(i)
if len(uniq_set) == 1:
self.assertEquals(list(sf[i].head(10)), self.int_data)
else:
self.assertEquals(list(sf[i].head(10)), self.string_data)
self.assertEquals(len(uniq_set), 3)
# replacing columns preserves order
names = sf.column_names()
for n in names:
sf[n] = sf[n].apply(lambda x: x)
self.assertEquals(sf.column_names(), names)
# do it again!
del sf['X1']
sf.add_column(SArray(self.string_data))
names = sf.column_names()
self.assertEquals(len(names), 3)
uniq_set = set()
for i in names:
uniq_set.add(i)
self.assertEquals(list(sf[i].head(10)), self.string_data)
self.assertEquals(len(uniq_set), len(names))
# standard rename
rename_dict = {'X3':'data','X3.1':'more_data','X3.2':'even_more'}
sf.rename(rename_dict)
self.assertEquals(sf.column_names(), ['data','more_data','even_more'])
# rename a column to a name that's already taken
with self.assertRaises(RuntimeError):
sf.rename({'data':'more_data'})
# try to rename a column that doesn't exist
with self.assertRaises(ValueError):
sf.rename({'foo':'bar'})
# pass something other than a dict
with self.assertRaises(TypeError):
sf.rename('foo')
# Setting a column to const preserves order
names = sf.column_names()
for n in names:
sf[n] = 1
self.assertEquals(sf.column_names(), names)
def test_duplicate_add_column_failure(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data), "hello")
with self.assertRaises(RuntimeError):
sf.add_column(SArray(self.float_data), "hello")
def test_remove_column(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
sf2 = sf.remove_column('X3')
assert sf is sf2
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X4', 'X5'])
sf2 = sf.remove_columns(['X2', 'X5'])
assert sf is sf2
self.assertEquals(sf.column_names(), ['X1', 'X4'])
# with a generator expression
sf2 = sf.remove_columns((n for n in ['X1', 'X5'] if n in sf.column_names()))
assert sf is sf2
self.assertEquals(sf.column_names(), ['X4'])
def test_remove_bad_column(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
self.assertRaises(KeyError, lambda: sf.remove_column('bad'))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
self.assertRaises(KeyError, lambda: sf.remove_columns(['X1', 'X2', 'X3', 'bad', 'X4']))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
def __generate_synthetic_sframe__(self, num_users):
"""
synthetic collaborative data.
generate 1000 users, user i watched movie 0, ... i-1.
rating(i, j) = i + j
length(i, j) = i - j
"""
sf = SFrame()
sparse_matrix = {}
for i in range(1, num_users + 1):
sparse_matrix[i] = [(j, i + j, i - j) for j in range(1, i + 1)]
user_ids = []
movie_ids = []
ratings = []
length_of_watching = []
for u in sparse_matrix:
user_ids += [u] * len(sparse_matrix[u])
movie_ids += [x[0] for x in sparse_matrix[u]]
ratings += [x[1] for x in sparse_matrix[u]]
length_of_watching += [x[2] for x in sparse_matrix[u]]
# typical add column stuff
sf['user_id'] = (SArray(user_ids, int))
sf['movie_id'] = (SArray(movie_ids, str))
sf['rating'] = (SArray(ratings, float))
sf['length'] = (SArray(length_of_watching, int))
return sf
def test_aggregate_ops(self):
"""
Test builtin groupby aggregators
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] \
for y in range(m)]
sf = SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
sf.__materialize__()
built_ins = [aggregate.COUNT(), aggregate.SUM('value'),
aggregate.AVG('value'), aggregate.MIN('value'),
aggregate.MAX('value'), aggregate.VAR('value'),
aggregate.STDV('value'), aggregate.SUM('vector_values'),
aggregate.MEAN('vector_values'),
aggregate.COUNT_DISTINCT('value'),
aggregate.DISTINCT('value'),
aggregate.FREQ_COUNT('value')]
sf2 = sf.groupby('key', built_ins)
self.assertEquals(len(sf2), 1)
self.assertEqual(sf2['Count'][0], m)
self.assertEqual(sf2['Sum of value'][0], sum(values))
self.assertAlmostEqual(sf2['Avg of value'][0], np.mean(values))
self.assertEqual(sf2['Min of value'][0], min(values))
self.assertEqual(sf2['Max of value'][0], max(values))
self.assertAlmostEqual(sf2['Var of value'][0], np.var(values))
self.assertAlmostEqual(sf2['Stdv of value'][0], np.std(values))
np.testing.assert_almost_equal(list(sf2['Vector Sum of vector_values'][0]),
list(np.sum(vector_values, axis=0)))
np.testing.assert_almost_equal(list(sf2['Vector Avg of vector_values'][0]),
list(np.mean(vector_values, axis=0)))
self.assertEqual(sf2['Count Distinct of value'][0],
len(np.unique(values)))
self.assertEqual(sorted(sf2['Distinct of value'][0]),
sorted(list(np.unique(values))))
self.assertEqual(sf2['Frequency Count of value'][0],
{k:1 for k in np.unique(values)})
# For vectors
def test_min_max_with_missing_values(self):
"""
Test builtin groupby aggregators
"""
sf = SFrame()
sf['key'] = [1,1,1,1,1,1,2,2,2,2]
sf['value'] = [1,None,None,None,None,None, None,None,None,None]
built_ins = [aggregate.COUNT(), aggregate.SUM('value'),
aggregate.AVG('value'), aggregate.MIN('value'),
aggregate.MAX('value'), aggregate.VAR('value'),
aggregate.STDV('value'), aggregate.COUNT_DISTINCT('value'),
aggregate.DISTINCT('value'), aggregate.FREQ_COUNT('value')]
sf2 = sf.groupby('key', built_ins).sort('key')
self.assertEqual(list(sf2['Count']), [6,4])
self.assertEqual(list(sf2['Sum of value']), [1, 0])
self.assertEqual(list(sf2['Avg of value']), [1, None])
self.assertEqual(list(sf2['Min of value']), [1, None])
self.assertEqual(list(sf2['Max of value']), [1, None])
self.assertEqual(list(sf2['Var of value']), [0, 0])
self.assertEqual(list(sf2['Stdv of value']), [0, 0])
self.assertEqual(list(sf2['Count Distinct of value']), [2, 1])
self.assertEqual(set(sf2['Distinct of value'][0]), set([1, None]))
self.assertEqual(set(sf2['Distinct of value'][1]), set([None]))
self.assertEqual(sf2['Frequency Count of value'][0], {1:1, None:5})
self.assertEqual(sf2['Frequency Count of value'][1], {None:4})
def test_aggregate_ops_on_lazy_frame(self):
"""
Test builtin groupby aggregators
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] \
for y in range(m)]
sf = SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
sf['value'] = sf['value'] + 0
built_ins = [aggregate.COUNT(), aggregate.SUM('value'),
aggregate.AVG('value'), aggregate.MIN('value'),
aggregate.MAX('value'), aggregate.VAR('value'),
aggregate.STDV('value'), aggregate.SUM('vector_values'),
aggregate.MEAN('vector_values'),
aggregate.COUNT_DISTINCT('value'),
aggregate.DISTINCT('value')]
sf2 = sf.groupby('key', built_ins)
self.assertEquals(len(sf2), 1)
self.assertEqual(sf2['Count'][0], m)
self.assertEqual(sf2['Sum of value'][0], sum(values))
self.assertAlmostEqual(sf2['Avg of value'][0], np.mean(values))
self.assertEqual(sf2['Min of value'][0], min(values))
self.assertEqual(sf2['Max of value'][0], max(values))
self.assertAlmostEqual(sf2['Var of value'][0], np.var(values))
self.assertAlmostEqual(sf2['Stdv of value'][0], np.std(values))
np.testing.assert_almost_equal(list(sf2['Vector Sum of vector_values'][0]),
list(np.sum(vector_values, axis=0)))
np.testing.assert_almost_equal(list(sf2['Vector Avg of vector_values'][0]),
list(np.mean(vector_values, axis=0)))
self.assertEqual(sf2['Count Distinct of value'][0],
len(np.unique(values)))
self.assertEqual(sorted(sf2['Distinct of value'][0]),
sorted(np.unique(values)))
def test_aggregate_ops2(self):
"""
Test builtin groupby aggregators using explicit named columns
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] \
for y in range(m)]
sf = SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
built_ins = {'count':aggregate.COUNT,
'sum':aggregate.SUM('value'),
'avg':aggregate.AVG('value'),
'avg2':aggregate.MEAN('value'),
'min':aggregate.MIN('value'),
'max':aggregate.MAX('value'),
'var':aggregate.VAR('value'),
'var2':aggregate.VARIANCE('value'),
'stdv':aggregate.STD('value'),
'stdv2':aggregate.STDV('value'),
'vector_sum': aggregate.SUM('vector_values'),
'vector_mean': aggregate.MEAN('vector_values'),
'count_unique':aggregate.COUNT_DISTINCT('value'),
'unique':aggregate.DISTINCT('value'),
'frequency':aggregate.FREQ_COUNT('value')}
sf2 = sf.groupby('key', built_ins)
self.assertEquals(len(sf2), 1)
self.assertEqual(sf2['count'][0], m)
self.assertEqual(sf2['sum'][0], sum(values))
self.assertAlmostEqual(sf2['avg'][0], np.mean(values))
self.assertAlmostEqual(sf2['avg2'][0], np.mean(values))
self.assertEqual(sf2['min'][0], min(values))
self.assertEqual(sf2['max'][0], max(values))
self.assertAlmostEqual(sf2['var'][0], np.var(values))
self.assertAlmostEqual(sf2['var2'][0], np.var(values))
self.assertAlmostEqual(sf2['stdv'][0], np.std(values))
self.assertAlmostEqual(sf2['stdv2'][0], np.std(values))
np.testing.assert_almost_equal(sf2['vector_sum'][0], list(np.sum(vector_values, axis=0)))
np.testing.assert_almost_equal(sf2['vector_mean'][0], list(np.mean(vector_values, axis=0)))
self.assertEqual(sf2['count_unique'][0], len(np.unique(values)))
self.assertEqual(sorted(sf2['unique'][0]),
sorted(np.unique(values)))
self.assertEqual(sf2['frequency'][0],
{k:1 for k in np.unique(values)})
def test_groupby(self):
"""
Test builtin groupby and aggregate on different column types
"""
num_users = 500
sf = self.__generate_synthetic_sframe__(num_users=num_users)
built_ins = [aggregate.COUNT(), aggregate.SUM('rating'),
aggregate.AVG('rating'), aggregate.MIN('rating'),
aggregate.MAX('rating'), aggregate.VAR('rating'),
aggregate.STDV('rating')]
built_in_names = ['Sum', 'Avg', 'Min', 'Max', 'Var', 'Stdv']
"""
Test groupby user_id and aggregate on rating
"""
sf_user_rating = sf.groupby('user_id', built_ins)
actual = sf_user_rating.column_names()
expected = ['%s of rating' % v for v in built_in_names] \
+ ['user_id'] + ['Count']
self.assertSetEqual(set(actual), set(expected))
for row in sf_user_rating:
uid = row['user_id']
mids = range(1, uid + 1)
ratings = [uid + i for i in mids]
expected = [len(ratings), sum(ratings), np.mean(ratings),
min(ratings), max(ratings), np.var(ratings),
np.sqrt(np.var(ratings))]
actual = [row['Count']] + [row['%s of rating' % op] \
for op in built_in_names]
for i in range(len(actual)):
self.assertAlmostEqual(actual[i], expected[i])
"""
Test that count can be applied on empty aggregate column.
"""
sf_user_rating = sf.groupby("user_id", {'counter': aggregate.COUNT()})
actual = {x['user_id']: x['counter'] for x in sf_user_rating}
expected = {i: i for i in range(1, num_users + 1)}
self.assertDictEqual(actual, expected)
"""
Test groupby movie_id and aggregate on length_of_watching
"""
built_ins = [aggregate.COUNT(), aggregate.SUM('length'),
aggregate.AVG('length'), aggregate.MIN('length'),
aggregate.MAX('length'), aggregate.VAR('length'),
aggregate.STDV('length')]
sf_movie_length = sf.groupby('movie_id', built_ins)
actual = sf_movie_length.column_names()
expected = ['%s of length' % v for v in built_in_names] \
+ ['movie_id'] + ['Count']
self.assertSetEqual(set(actual), set(expected))
for row in sf_movie_length:
mid = row['movie_id']
uids = range(int(mid), num_users + 1)
values = [i - int(mid) for i in uids]
expected = [len(values), sum(values), np.mean(values), min(values),
max(values), np.var(values), np.std(values)]
actual = [row['Count']] + [row['%s of length' % op] \
for op in built_in_names]
for i in range(len(actual)):
self.assertAlmostEqual(actual[i], expected[i])
def test_quantile_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
# max and min rating for each user
g = sf.groupby('user_id', [aggregate.MIN('rating'),
aggregate.MAX('rating'),
aggregate.QUANTILE('rating', 0, 1)])
self.assertEquals(len(g), 500)
for row in g:
minrating = row['Min of rating']
maxrating = row['Max of rating']
arr = list(row['Quantiles of rating'])
self.assertEquals(len(arr), 2)
self.assertEquals(arr[0], minrating)
self.assertEquals(arr[1], maxrating)
def test_argmax_argmin_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
sf_ret = sf.groupby('user_id',
{'movie with max rating' : aggregate.ARGMAX('rating','movie_id'),
'movie with min rating' : aggregate.ARGMIN('rating','movie_id')})
self.assertEquals(len(sf_ret), 500)
self.assertEqual(sf_ret["movie with max rating"].dtype(), str)
self.assertEqual(sf_ret["movie with min rating"].dtype(), str)
self.assertEqual(sf_ret["user_id"].dtype(), int)
# make sure we have computed correctly.
max_d = {}
min_d = {}
for i in sf:
key = i['user_id']
if key not in max_d:
max_d[key] = (i['movie_id'],i['rating'])
min_d[key] = (i['movie_id'],i['rating'])
else:
if max_d[key][1] < i['rating']:
max_d[key] = (i['movie_id'],i['rating'])
if min_d[key][1] > i['rating']:
min_d[key] = (i['movie_id'],i['rating'])
for i in sf_ret:
key = i['user_id']
self.assertEqual(i["movie with max rating"],max_d[key][0])
self.assertEqual(i["movie with min rating"],min_d[key][0])
def test_multicolumn_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
sf_um = sf.groupby(["user_id", "movie_id"], aggregate.COUNT)
# I can query it
t = sf_um.to_dataframe()
self.assertEqual(sf_um["user_id"].dtype(), int)
self.assertEqual(sf_um["movie_id"].dtype(), str)
# make sure we have counted correctly
d = {}
for i in sf:
key = str(i['user_id']) + "," + i["movie_id"]
if key not in d:
d[key] = 0
d[key] = d[key] + 1
for i in sf_um:
key = str(i['user_id']) + "," + i["movie_id"]
self.assertTrue(key in d)
self.assertEqual(i['Count'], d[key])
sf_um = sf.groupby(["movie_id", "user_id"], aggregate.COUNT())
# I can query it
t = sf_um.to_dataframe()
self.assertEqual(sf_um["user_id"].dtype(), int)
self.assertEqual(sf_um["movie_id"].dtype(), str)
# make sure we have counted correctly
d = {}
for i in sf:
key = str(i['user_id']) + "," + i["movie_id"]
if key not in d:
d[key] = 0
d[key] = d[key] + 1
for i in sf_um:
key = str(i['user_id']) + "," + i["movie_id"]
self.assertTrue(key in d)
self.assertEqual(i['Count'], d[key])
def __assert_concat_result_equal(self, result, expected, list_columns):
self.assertEqual(result.num_columns(), expected.num_columns())
for column in result.column_names():
c1 = result[column]
c2 = expected[column]
self.assertEqual(c1.dtype(), c2.dtype())
self.assertEqual(c1.size(), c2.size())
if (column in list_columns):
for i in range(len(c1)):
if (c1[i] == None):
self.assertTrue(c2[i] == None)
continue
if (c1.dtype() == dict):
for k in c1[i]:
self.assertEqual(c2[i][k], c1[i][k])
else:
s1 = list(c1[i]);
if s1 != None: s1.sort()
s2 = list(c2[i]);
if s2 != None: s2.sort()
self.assertEqual(s1, s2)
else:
self.assertEqual(list(c1),list(c2))
def test_groupby_dict_key(self):
t = SFrame({'a':[{1:2},{3:4}]})
with self.assertRaises(TypeError):
t.groupby('a', {})
def test_concat(self):
sf = SFrame()
sf['a'] = [1,1,1,1, 2,2,2, 3, 4,4, 5]
sf['b'] = [1,2,1,2, 3,3,1, 4, None, 2, None]
sf['c'] = ['a','b','a','b', 'e','e', None, 'h', 'i','j', 'k']
sf['d'] = [1.0,2.0,1.0,2.0, 3.0,3.0,1.0, 4.0, None, 2.0, None]
sf['e'] = [{'x': 1}] * len(sf['a'])
print(sf['b'].dtype())
result = sf.groupby('a', aggregate.CONCAT('b'))
expected_result = SFrame({
'a': [1,2,3,4, 5],
'List of b': [[1.,1.,2.,2.],[1.,3.,3.],[4.],[2.], []]
})
expected_result['List of b'] = expected_result['List of b'].astype(list)
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['List of b'])
result = sf.groupby('a', aggregate.CONCAT('d'))
expected_result = SFrame({
'a': [1,2,3,4, 5],
'List of d': [[1,1,2,2],[1,3,3],[4],[2], []]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['List of d'])
result = sf.groupby('a', {'c_c' :aggregate.CONCAT('c')})
expected_result = SFrame({
'a': [1,2,3,4, 5],
'c_c': [['a','b','a','b'],['e','e'],['h'],['i','j'], ['k']]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['c_c'])
result = sf.groupby('a', aggregate.CONCAT('b','c'))
expected_result = SFrame({
'a': [1,2,3,4,5],
'Dict of b_c': [{1:'a',2:'b'},{3:'e', 1: None},{4:'h'},{2:'j'}, {}]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['Dict of b_c'])
result = sf.groupby('a', {'c_b':aggregate.CONCAT('c','b')})
expected_result = SFrame({
'a': [1,2,3,4,5],
'c_b': [{'a':1, 'b':2},{'e':3},{'h':4},{'i':None, 'j':2},{'k':None}]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['c_b'])
result = sf.groupby('a', {'cs':aggregate.CONCAT('c'), 'bs':aggregate.CONCAT('b')})
expected_result = SFrame({
'a': [1,2,3,4,5],
'bs': [[1,1,2,2],[1,3,3],[4],[2], []],
'cs': [['a','b','a','b'],['e','e'],['h'],['i','j'], ['k']]
})
expected_result['bs'] = expected_result['bs'].astype(list)
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['bs','cs'])
#exception fail if there is not column
with self.assertRaises(TypeError):
sf.groupby('a', aggregate.CONCAT())
with self.assertRaises(KeyError):
sf.groupby('a', aggregate.CONCAT('nonexist'))
with self.assertRaises(TypeError):
sf.groupby('a', aggregate.CONCAT('e', 'a'))
def test_select_one(self):
sf = SFrame({'a':[1,1,2,2,3,3,4,4,5,5],'b':[1,2,3,4,5,6,7,8,9,10]})
res = list(sf.groupby('a', {'b':aggregate.SELECT_ONE('b')}))
self.assertEqual(len(res), 5)
for i in res:
self.assertTrue(i['b'] == 2 * i['a'] or i['b'] == 2 * i['a'] - 1)
def test_unique(self):
sf = SFrame({'a':[1,1,2,2,3,3,4,4,5,5],'b':[1,2,3,4,5,6,7,8,9,10]})
self.assertEqual(len(sf.unique()), 10)
vals = [1,1,2,2,3,3,4,4, None, None]
sf = SFrame({'a':vals,'b':vals})
res = sf.unique()
self.assertEqual(len(res), 5)
self.assertEqual(set(res['a']), set([1,2,3,4,None]))
self.assertEqual(set(res['b']), set([1,2,3,4,None]))
def test_append_empty(self):
sf_with_data = SFrame(data=self.dataframe)
empty_sf = SFrame()
self.assertFalse(sf_with_data.append(empty_sf) is sf_with_data)
self.assertFalse(empty_sf.append(sf_with_data) is sf_with_data)
self.assertFalse(empty_sf.append(empty_sf) is empty_sf)
def test_append_all_match(self):
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
new_sf = sf1.append(sf2)
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
def test_append_lazy(self):
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
new_sf = sf1.append(sf2)
self.assertTrue(new_sf.__is_materialized__())
filter_sf1 = SArray([1 for i in range(sf1.num_rows())] + [0 for i in range(sf2.num_rows())])
filter_sf2 = SArray([0 for i in range(sf1.num_rows())] + [1 for i in range(sf2.num_rows())])
new_sf1 = new_sf[filter_sf1]
new_sf2 = new_sf[filter_sf2]
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
assert_frame_equal(sf1.to_dataframe(), new_sf1.to_dataframe())
assert_frame_equal(sf2.to_dataframe(), new_sf2.to_dataframe())
row = sf1.head(1)
sf = SFrame()
for i in range(10):
sf = sf.append(row)
df = sf.to_dataframe()
for i in range(10):
self.assertEqual(list(df.iloc[[i]]), list(sf.head(1).to_dataframe().iloc[[0]]))
def test_recursive_append(self):
sf = SFrame()
for i in range(200):
sf = sf.append(SFrame(data = self.dataframe))
#consume
sf.__materialize__()
def test_print_sframe(self):
sf = SFrame()
def _test_print():
sf.__repr__()
sf._repr_html_()
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
output = StringIO()
sf.print_rows(output_file=output)
n = 20
sf['int'] = [i for i in range(n)]
sf['float'] = [float(i) for i in range(n)]
sf['str'] = [str(i) for i in range(n)]
uc = '\xe5\xa4\xa7\xe5\xa4\xb4' # dato pronounced in chinese, big head
sf['unicode'] = [uc for i in range(n)]
sf['array'] = [array.array('d', [i]) for i in range(n)]
sf['list'] = [[i, float(i), [i]] for i in range(n)]
utc = dt.datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')
sf['dt'] = [utc for i in range(n)]
sf['img'] = [Image() for i in range(n)]
sf['long_str'] = ["".join([str(i)] * 50) for i in range(n)]
sf['long_unicode'] = ["".join([uc] * 50) for i in range(n)]
sf['bad_unicode'] = ['\x9d' + uc for i in range(n)]
_test_print()
def test_print_lazy_sframe(self):
sf1 = SFrame(data=self.dataframe)
self.assertTrue(sf1.__is_materialized__())
sf2 = sf1[sf1['int_data'] > 3]
sf2.__repr__()
sf2.__str__()
self.assertFalse(sf2.__is_materialized__())
len(sf2)
self.assertTrue(sf2.__is_materialized__())
def test_append_order_diff(self):
# name match but column order not match
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
sf2.swap_columns('int_data', 'string_data')
new_sf = sf1.append(sf2)
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
def test_append_empty_sframe(self):
sf = SFrame(data=self.dataframe)
other = SFrame()
# non empty append empty
assert_frame_equal(sf.append(other).to_dataframe(), self.dataframe)
# empty append non empty
assert_frame_equal(other.append(sf).to_dataframe(), self.dataframe)
#empty append empty
assert_frame_equal(other.append(other).to_dataframe(), pd.DataFrame())
def test_append_exception(self):
sf = SFrame(data=self.dataframe)
# column number not match
other = SFrame()
other.add_column(SArray(), "test")
self.assertRaises(RuntimeError, lambda: sf.append(other)) # column not the same
# column name not match
other = SFrame()
names = sf.column_names()
for name in sf.column_names():
other.add_column(SArray(), name)
names[0] = 'some name not match'
self.assertRaises(RuntimeError, lambda: sf.append(other))
# name match but column type order not match
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
#change one column type
sf1["int_data"] = sf2.select_column("int_data").astype(float)
self.assertRaises(RuntimeError, lambda: sf.append(other))
def test_simple_joins(self):
inner_expected = SFrame()
inner_expected.add_column(SArray(['Robinson','Jones','Smith','Heisenberg','Rafferty']), 'last_name')
inner_expected.add_column(SArray([34,33,34,33,31]), 'dep_id')
inner_expected.add_column(SArray(['Clerical','Engineering','Clerical','Engineering','Sales']), 'dep_name')
# Tests the "natural join" case
beg = time.time()
res = self.employees_sf.join(self.departments_sf)
end = time.time()
print("Really small join: " + str(end-beg) + " s")
self.__assert_join_results_equal(res, inner_expected)
left_join_row = SFrame()
left_join_row.add_column(SArray(['John']), 'last_name')
left_join_row.add_column(SArray([None], int), 'dep_id')
left_join_row.add_column(SArray([None], str), 'dep_name')
left_expected = inner_expected.append(left_join_row)
# Left outer join, passing string to 'on'
res = self.employees_sf.join(self.departments_sf, how='left', on='dep_id')
self.__assert_join_results_equal(res, left_expected)
right_join_row = SFrame()
right_join_row.add_column(SArray([None], str), 'last_name')
right_join_row.add_column(SArray([35]), 'dep_id')
right_join_row.add_column(SArray(['Marketing']), 'dep_name')
right_expected = inner_expected.append(right_join_row)
# Right outer join, passing list to 'on'
res = self.employees_sf.join(self.departments_sf, how='right', on=['dep_id'])
self.__assert_join_results_equal(res, right_expected)
outer_expected = left_expected.append(right_join_row)
# Full outer join, passing dict to 'on'
res = self.employees_sf.join(self.departments_sf, how='outer', on={'dep_id':'dep_id'})
self.__assert_join_results_equal(res, outer_expected)
# Test a join on non-matching key
res = self.employees_sf.join(self.departments_sf, on={'last_name':'dep_name'})
self.assertEquals(res.num_rows(), 0)
self.assertEquals(res.num_cols(), 3)
self.assertEquals(res.column_names(), ['last_name', 'dep_id', 'dep_id.1'])
# Test a join on a non-unique key
bad_departments = SFrame()
bad_departments['dep_id'] = SArray([33,33,31,31])
bad_departments['dep_name'] = self.departments_sf['dep_name']
no_pk_expected = SFrame()
no_pk_expected['last_name'] = SArray(['Rafferty','Rafferty','Heisenberg','Jones','Heisenberg','Jones'])
no_pk_expected['dep_id'] = SArray([31,31,33,33,33,33])
no_pk_expected['dep_name'] = SArray(['Clerical','Marketing','Sales','Sales','Engineering','Engineering'])
res = self.employees_sf.join(bad_departments, on='dep_id')
self.__assert_join_results_equal(res, no_pk_expected)
# Left join on non-unique key
bad_departments = bad_departments.append(right_join_row[['dep_id', 'dep_name']])
bad_departments = bad_departments.append(right_join_row[['dep_id', 'dep_name']])
no_pk_expected = no_pk_expected.append(right_join_row)
no_pk_expected = no_pk_expected.append(right_join_row)
no_pk_expected = no_pk_expected[['dep_id', 'dep_name', 'last_name']]
res = bad_departments.join(self.employees_sf, on='dep_id', how='left')
self.__assert_join_results_equal(res, no_pk_expected)
def test_big_composite_join(self):
# Create a semi large SFrame with composite primary key (letter, number)
letter_keys = []
number_keys = []
data = []
for i in string.ascii_lowercase:
for j in range(0,100):
letter_keys.append(i)
number_keys.append(j)
which = j % 3
if which == 0:
data.append(string.ascii_uppercase)
elif which == 1:
data.append(string.digits)
elif which == 2:
data.append(string.hexdigits)
pk_gibberish = SFrame()
pk_gibberish['letter'] = SArray(letter_keys, str)
pk_gibberish['number'] = SArray(number_keys, int)
pk_gibberish['data'] = SArray(data, str)
# Some rows that won't match
more_data = []
more_letter_keys = []
more_number_keys = []
for i in range(0,40000):
more_data.append('fish')
more_letter_keys.append('A')
more_number_keys.append(200)
for i in range(0,80):
for j in range(100,1000):
more_data.append('waffles')
more_letter_keys.append(letter_keys[j])
more_number_keys.append(number_keys[j])
# Non-matching row in this stretch
if j == 147:
more_letter_keys[-1] = 'A'
for i in range(0,5000):
more_data.append('pizza')
more_letter_keys.append('Z')
more_number_keys.append(400)
join_with_gibberish = SFrame()
join_with_gibberish['data'] = SArray(more_data, str)
join_with_gibberish['moredata'] = SArray(more_data, str)
join_with_gibberish['a_number'] = SArray(more_number_keys, int)
join_with_gibberish['a_letter'] = SArray(more_letter_keys, str)
expected_answer = SFrame()
exp_letter = []
exp_number = []
exp_data = []
for i in range(0,80):
exp_letter.extend(letter_keys[100:147])
exp_number.extend(number_keys[100:147])
exp_letter.extend(letter_keys[148:1000])
exp_number.extend(number_keys[148:1000])
exp_data.extend(data[100:147])
exp_data.extend(data[148:1000])
expected_answer['letter'] = SArray(exp_letter, str)
expected_answer['number'] = SArray(exp_number, int)
expected_answer['data'] = SArray(exp_data, str)
expected_answer['data.1'] = 'waffles'
expected_answer['moredata'] = 'waffles'
beg = time.time()
res = pk_gibberish.join(join_with_gibberish, on={'letter':'a_letter','number':'a_number'})
end = time.time()
print("Join took " + str(end-beg) + " seconds")
self.__assert_join_results_equal(res, expected_answer)
def test_convert_dataframe_empty(self):
sf = SFrame()
sf['a'] = SArray([], int)
df = sf.to_dataframe()
self.assertEqual(df['a'].dtype, int)
sf1 = SFrame(df)
self.assertEquals(sf1['a'].dtype(), int)
self.assertEqual(sf1.num_rows(), 0)
def test_replace_one_column(self):
sf = SFrame()
sf['a'] = [1,2,3]
self.assertEquals(list(sf['a']), [1,2,3])
# this should succeed as we are replacing a new column
sf['a'] = [1,2]
self.assertEquals(list(sf['a']), [1,2])
# failed to add new column should revert original sframe
with self.assertRaises(TypeError):
sf['a'] = [1,2,'a']
self.assertEquals(list(sf['a']), [1,2])
# add a column with different length should fail if there are more than one column
sf = SFrame()
sf['a'] = [1,2,3]
sf['b'] = ['a', 'b', 'c']
with self.assertRaises(RuntimeError):
sf['a'] = [1,2]
def test_filter_by(self):
# Set up SFrame to filter by
sf = SFrame()
sf.add_column(SArray(self.int_data), "ints")
sf.add_column(SArray(self.float_data), "floats")
sf.add_column(SArray(self.string_data), "strings")
# Normal cases
res = sf.filter_by(SArray(self.int_data), "ints")
self.__assert_join_results_equal(res, sf)
res = sf.filter_by(SArray(self.int_data), "ints", exclude=True)
self.assertEquals(list(res), [])
res = sf.filter_by([5,6], "ints")
exp = SFrame()
exp.add_column(SArray(self.int_data[4:6]), "ints")
exp.add_column(SArray(self.float_data[4:6]), "floats")
exp.add_column(SArray(self.string_data[4:6]), "strings")
self.__assert_join_results_equal(res, exp)
exp_opposite = SFrame()
exp_opposite.add_column(SArray(self.int_data[:4]+self.int_data[6:]), "ints")
exp_opposite.add_column(SArray(self.float_data[:4]+self.float_data[6:]), "floats")
exp_opposite.add_column(SArray(self.string_data[:4]+self.string_data[6:]), "strings")
res = sf.filter_by([5,6], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
exp_one = SFrame()
exp_one.add_column(SArray(self.int_data[4:5]), "ints")
exp_one.add_column(SArray(self.float_data[4:5]), "floats")
exp_one.add_column(SArray(self.string_data[4:5]), "strings")
exp_all_but_one = SFrame()
exp_all_but_one.add_column(SArray(self.int_data[:4]+self.int_data[5:]), "ints")
exp_all_but_one.add_column(SArray(self.float_data[:4]+self.float_data[5:]), "floats")
exp_all_but_one.add_column(SArray(self.string_data[:4]+self.string_data[5:]), "strings")
res = sf.filter_by(5, "ints")
self.__assert_join_results_equal(res, exp_one)
res = sf.filter_by(5, "ints", exclude=True)
self.__assert_join_results_equal(res, exp_all_but_one)
res = sf.filter_by("5", "strings")
self.__assert_join_results_equal(res, exp_one)
res = sf.filter_by(5, "ints", exclude=True)
self.__assert_join_results_equal(res, exp_all_but_one)
# Only missing values
res = sf.filter_by([77,77,88,88], "ints")
# Test against empty SFrame with correct columns/types
self.__assert_join_results_equal(res, exp_one[exp_one['ints'] == 9000])
res = sf.filter_by([77,77,88,88], "ints", exclude=True)
self.__assert_join_results_equal(res, sf)
# Duplicate values
res = sf.filter_by([6,6,5,5,6,5,5,6,5,5,5], "ints")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([6,6,5,5,6,5,5,6,5,5,5], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
# Duplicate and missing
res = sf.filter_by([11,12,46,6,6,55,5,5], "ints")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([11,12,46,6,6,55,5,5], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
# Type mismatch
with self.assertRaises(TypeError):
res = sf.filter_by(["hi"], "ints")
# Column doesn't exist
with self.assertRaises(KeyError):
res = sf.filter_by([1,2], "intssss")
# Something that can't be turned into an SArray
with self.assertRaises(Exception):
res = sf.filter_by({1:2,3:4}, "ints")
# column_name not given as string
with self.assertRaises(TypeError):
res = sf.filter_by(1,2)
# Duplicate column names after join. Should be last because of the
# renames.
sf.rename({'ints':'id','floats':'id1','strings':'id11'})
exp.rename({'ints':'id','floats':'id1','strings':'id11'})
exp_opposite.rename({'ints':'id','floats':'id1','strings':'id11'})
res = sf.filter_by([5,6], "id")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([5,6], "id", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
# XXXXXX: should be inner function
def __test_to_from_dataframe(self, data, type):
sf = SFrame()
sf['a'] = data
df = sf.to_dataframe()
sf1 = SFrame(df)
self.assertTrue(sf1.dtype()[0]== type)
df = pd.DataFrame({'val': data})
sf1 = SFrame(df)
self.assertTrue(sf1.dtype()[0]== type)
def test_to_from_dataframe(self):
self.__test_to_from_dataframe([1,2,3], int)
self.__test_to_from_dataframe(['a', 'b', 'c'], str)
self.__test_to_from_dataframe([1.0, 2.0, 3.0], float)
self.__test_to_from_dataframe([[1, 'b', {'a': 1}], [1,2,3]], list)
self.__test_to_from_dataframe([{'a':1, 1:None}, {'b':2}], dict)
self.__test_to_from_dataframe([[1,2],[1,2],[]], array.array)
def test_pack_columns_exception(self):
sf = SFrame()
sf['a'] = [1, 2, 3, None, None]
sf['b'] = [None, '2', '3', None, '5']
sf['c'] = [None, 2.0, 3.0, None, 5.0]
# cannot pack non array value into array
with self.assertRaises(TypeError):
sf.pack_columns(dtype=array.array)
# cannnot given non numeric na vlaue to array
with self.assertRaises(ValueError):
sf.pack_columns(dtype=array.array, fill_na='c')
# cannot pack non exist columns
with self.assertRaises(ValueError):
sf.pack_columns(['d','a'])
# dtype has to be dict/array/list
with self.assertRaises(ValueError):
sf.pack_columns(dtype=str)
# pack duplicate columns
with self.assertRaises(ValueError):
sf.pack_columns(['a','a'])
# pack partial columns to array, should fail if for columns that are not numeric
with self.assertRaises(TypeError):
sf.pack_columns(['a','b'], dtype=array.array)
with self.assertRaises(TypeError):
sf.pack_columns(column_prefix = 1)
with self.assertRaises(ValueError):
sf.pack_columns(column_prefix = '1')
with self.assertRaises(ValueError):
sf.pack_columns(column_prefix = 'c', columns=['a', 'b'])
def test_pack_columns2(self):
sf = SFrame()
sf['id'] = [1, 2, 3, 4]
sf['category.a'] = [None, '2', '3', None]
sf['category.b'] = [None, 2.0, None, 4.0]
expected = SArray([
[None, None],
['2', 2.0],
['3', None],
[None, 4.0]])
result = sf.pack_columns(column_prefix='category')
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['id'], sf['id'])
self.__assert_sarray_equal(result['category'], expected)
result = sf.pack_columns(column_prefix='category', new_column_name="new name")
self.assertEqual(result.column_names(), ['id', 'new name'])
self.__assert_sarray_equal(result['id'], sf['id'])
self.__assert_sarray_equal(result['new name'], expected)
# default dtype is list
result = sf.pack_columns(column_prefix='category', dtype=list)
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['category'], expected)
# remove prefix == True by default
expected = SArray([
{},
{'a':'2', 'b':2.0},
{'a':'3'},
{'b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict)
self.__assert_sarray_equal(result['category'], expected)
# remove prefix == False
expected = SArray([
{},
{'category.a':'2', 'category.b':2.0},
{'category.a':'3'},
{'category.b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict, remove_prefix=False)
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['category'], expected)
# fill_na
expected = SArray([
{'a':1, 'b':1},
{'a':'2', 'b':2.0},
{'a':'3', 'b':1},
{'a':1, 'b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict, fill_na = 1)
self.__assert_sarray_equal(result['category'], expected)
expected = SArray([
[1],
[2],
[3],
[4]], list)
result = sf.pack_columns(['id'], new_column_name='id')
self.assertEqual(sorted(result.column_names()), sorted(['id', 'category.a', 'category.b']))
self.__assert_sarray_equal(result['id'], expected)
def test_pack_columns(self):
sf = SFrame()
sf['id'] = [1, 2, 3, 4, 5]
sf['b'] = [None, '2', '3', None, '5']
sf['c'] = [None, 2.0, 3.0, None, 5.0]
expected_all_default = SArray([
[1, None, None],
[2, '2', 2.0],
[3, '3', 3.0],
[4, None, None],
[5, '5', 5.0]
])
# pack all columns, all default values
self.__assert_sarray_equal(sf.pack_columns()['X1'], expected_all_default)
expected_ab_default = SArray([
[1, None],
[2, '2'],
[3, '3'],
[4, None],
[5, '5']
])
expected_all_fillna_1 = SArray([
[1, -1, -1],
[2, '2', 2.0],
[3, '3', 3.0],
[4, -1, -1],
[5, '5', 5.0]
])
# pack all columns do not drop na and also fill with some value
result = sf.pack_columns(fill_na=-1)
self.assertEqual(result.column_names(), ['X1'])
self.__assert_sarray_equal(result['X1'], expected_all_fillna_1)
# pack partial columns, all default value
result = sf.pack_columns(['id','b'])
self.assertEqual(result.column_names(), ['c','X2'])
self.__assert_sarray_equal(result['c'], sf['c'])
self.__assert_sarray_equal(result['X2'], expected_ab_default)
expected_sarray_ac_fillna_default = SArray([
[1, float('NaN')],
[2, 2.0],
[3, 3.0],
[4, float('NaN')],
[5, 5.0]
])
result = sf.pack_columns(['id','c'], dtype=array.array)
self.assertEqual(result.column_names(), ['b', 'X2'])
self.__assert_sarray_equal(result['b'], sf['b'])
self.__assert_sarray_equal(result['X2'], expected_sarray_ac_fillna_default)
expected_dict_default = SArray([
{'id': 1},
{'id': 2, 'b':'2', 'c': 2.0},
{'id': 3, 'b':'3', 'c': 3.0},
{'id':4 },
{'id':5, 'b':'5', 'c': 5.0}
])
result = sf.pack_columns(dtype=dict)
self.__assert_sarray_equal(result['X1'], expected_dict_default)
expected_dict_fillna = SArray([
{'id': 1, 'b':-1, 'c': -1},
{'id': 2, 'b':'2', 'c': 2.0},
{'id': 3, 'b':'3', 'c': 3.0},
{'id': 4, 'b':-1, 'c': -1},
{'id': 5, 'b':'5', 'c': 5.0}
])
result = sf.pack_columns(dtype=dict, fill_na=-1)
self.__assert_sarray_equal(result['X1'], expected_dict_fillna)
# pack large number of rows
sf = SFrame()
num_rows = 100000
sf['a'] = range(0, num_rows);
sf['b'] = range(0, num_rows);
result = sf.pack_columns(['a', 'b']);
self.assertEqual(len(result), num_rows);
def test_pack_columns_dtype(self):
a = SFrame({'name':[-140500967,-1405039672],'data':[3,4]})
b = a.pack_columns(['name','data'],dtype=array.array)
expected = SArray([[-140500967, 3],[-1405039672,4]])
self.__assert_sarray_equal(b['X1'], expected)
def test_unpack_dict_mixtype(self):
sf = SFrame({'a':[{'a':["haha", "hoho"]}, {'a':array.array('d', [1,2,3])}]})
sf = sf.unpack('a', column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), list)
sf = SFrame({'a':[{'a':["haha", "hoho"]}, {'a':None}]})
sf = sf.unpack('a', column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), list)
sa = SArray([{'a':array.array('d', [1,2,3])}, {'a':None}])
sf = sa.unpack(column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), array.array)
sa = SArray([{'a':array.array('d', [1,2,3])}, {'a':{'b':1}}])
sf = sa.unpack(column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), str)
sa = SArray([{'a': 1, 'b': 0.1}, {'a': 0.1, 'b': 1}])
sf = sa.unpack(column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), float)
self.assertEqual(sf['b'].dtype(), float)
def test_unpack_list(self):
sa = SArray([
[1, None, None],
[2, '2', 2.0],
[3, '3', 3.0],
[4, None, None],
[5, '5', 5.0]
])
expected = SFrame()
expected ['a'] = [1, 2, 3, 4, 5]
expected ['b'] = [None, '2', '3', None, '5']
expected ['c'] = [None, 2.0, 3.0, None, 5.0]
result = sa.unpack();
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(column_name_prefix='ttt');
self.assertEqual(result.column_names(), ['ttt.0', 'ttt.1', 'ttt.2'])
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column types
result = sa.unpack(column_types=[int, str, float]);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# more column types
result = sa.unpack(column_types=[int, str, float, int]);
result.rename(dict(zip(result.column_names(), ['a','b','c','d'])))
e = expected.select_columns(['a','b','c'])
e.add_column(SArray([None for i in range(5)], int),'d')
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# less column types
result = sa.unpack(column_types=[int, str]);
result.rename(dict(zip(result.column_names(), ['a','b'])))
e = expected.select_columns(['a','b'])
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# fill na_value
e = SFrame()
e['a'] = [1, 2, None, 4, 5]
e['b'] = [None, '2', '3', None, '5']
e['c'] = [None, 2.0, None, None, 5.0]
result = sa.unpack(na_value=3);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# wrong length
with self.assertRaises(TypeError):
sa.unpack(column_name_prefix=['a','b'])
# wrong type
with self.assertRaises(RuntimeError):
sa.unpack(column_types = [str, int, float])
# wrong limit types
with self.assertRaises(TypeError):
sa.unpack(limit=["1"])
# int array cannot be unpacked
with self.assertRaises(TypeError):
SArray([1,2,3,4]).unpack()
# column name must be a string
with self.assertRaises(TypeError):
sa.unpack(1)
# invalid column type
with self.assertRaises(TypeError):
sa.unpack(column_types = int)
# invalid column type
with self.assertRaises(TypeError):
sa.unpack(column_types = [np.array])
# cannot infer type if no values
with self.assertRaises(RuntimeError):
SArray([], list).unpack()
def test_unpack_array(self):
import array
sa = SArray([
array.array('d', [1, 1, 0]),
array.array('d', [2, -1, 1]),
array.array('d', [3, 3, 2]),
array.array('d', [-1, 2, 3]),
array.array('d', [5, 5, 4])
])
expected = SFrame()
expected ['a'] = [1.0, 2.0, 3.0, -1.0, 5.0]
expected ['b'] = [1.0, -1.0, 3.0, 2.0, 5.0]
expected ['c'] = [0.0, 1.0, 2.0, 3.0, 4.0]
result = sa.unpack();
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# right amount column names
result = sa.unpack(column_name_prefix = 'unpacked');
result.rename(dict(zip(result.column_names(), ['t.0', 't.1', 't.2'])))
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column types
result = sa.unpack(column_types=[int, str, float]);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
expected['a'] = expected['a'].astype(int)
expected['b'] = expected['b'].astype(str)
expected['c'] = expected['c'].astype(float)
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# more column types
result = sa.unpack(column_types=[int, str, float, int]);
result.rename(dict(zip(result.column_names(), ['a','b','c','d'])))
e = expected.select_columns(['a','b','c'])
e.add_column(SArray([None for i in range(5)], int),'d')
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# less column types
result = sa.unpack(column_types=[int, str]);
result.rename(dict(zip(result.column_names(), ['a','b'])))
e = expected.select_columns(['a','b'])
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# fill na_value
e = SFrame()
e['a'] = SArray([1, 2, 3, None, 5], float)
e['b'] = SArray([1, None, 3, 2, 5], float)
e['c'] = SArray([0, 1, 2, 3, 4], float)
result = sa.unpack(na_value=-1);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
def test_unpack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4,5,6,7]
sf["is_restaurant"] = [1, 1,0,0, 1, None, None]
sf["is_retail"] = [None,1,1,None,1, None, None]
sf["is_electronics"] = ["yes", "no","yes",None,"no", None, None]
packed_sf = SFrame()
packed_sf['user_id'] = sf['user_id']
packed_sf["category"] = [
{"is_restaurant": 1, "is_electronics": "yes"},
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{"is_restaurant": 0, "is_retail": 1, "is_electronics": "yes"},
{"is_restaurant": 0 },
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{ },
None]
with self.assertRaises(TypeError):
packed_sf['user_id'].unpack()
with self.assertRaises(TypeError):
packed_sf['category'].unpack(1)
with self.assertRaises(TypeError):
packed_sf['category'].unpack(value_types = [int])
# unpack only one column
expected_sf = SFrame()
expected_sf["is_retail"] = sf["is_retail"]
unpacked_sf = packed_sf['category'].unpack(limit=["is_retail"], column_types=[int], column_name_prefix=None)
assert_frame_equal(unpacked_sf.to_dataframe(), expected_sf.to_dataframe())
# unpack all
unpacked_sf = packed_sf['category'].unpack(column_name_prefix=None, column_types=[int, int, str], limit=["is_restaurant", "is_retail", "is_electronics"])
assert_frame_equal(unpacked_sf.to_dataframe(), sf[["is_restaurant", "is_retail", "is_electronics"]].to_dataframe())
# auto infer types, the column order may be different, so use order here before comparison
unpacked_sf = packed_sf["category"].unpack()
unpacked_sf.rename({
"X.is_restaurant": "is_restaurant",
"X.is_retail": "is_retail",
"X.is_electronics": "is_electronics"
})
assert_frame_equal(unpacked_sf.to_dataframe().sort(axis=1), sf[["is_restaurant", "is_retail", "is_electronics"]].to_dataframe().sort(axis=1))
unpacked_sf = packed_sf["category"].unpack(na_value = 0, column_name_prefix="new")
expected = SFrame()
expected["new.is_restaurant"] = [1, 1,None,None, 1, None, None]
expected["new.is_retail"] = [None,1,1,None,1, None, None]
expected["new.is_electronics"] = ["yes", "no","yes",None,"no", None, None]
assert_frame_equal(unpacked_sf.to_dataframe().sort(axis=1), expected.to_dataframe().sort(axis=1))
# unpack a dictionary key integer as key
sa = SArray([
{1: 'a'},
{2: 'b'}
])
result = sa.unpack()
expected = SFrame({'X.1':['a', None], 'X.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(limit=[2])
expected = SFrame({'X.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(limit=[2], column_name_prefix="expanded")
expected = SFrame({'expanded.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
sa = SArray([{i:i} for i in range(500)])
unpacked_sa = sa.unpack()
self.assertEqual(len(unpacked_sa), len(sa))
i = 0
for v in unpacked_sa:
for j in range(500):
val = v['X.' + str(j)]
if (j == i):
self.assertEqual(val, i);
else:
self.assertEqual(val, None);
i = i + 1
# if types don't agree, convert to string automatically
sa = SArray([{'a':1},{'a': 'a_3'}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [str])
sa = SArray([{'a':None}, {'a': 1}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [int])
sa = SArray([{'a':1}, {'a': None}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [int])
# type inferrence is already at server side even if limit is given
sa = SArray([{'c'+str(i): i if i % 2 == 0 else 'v' + str(i)} for i in range(1000)])
unpacked = sa.unpack(limit=['c'+str(i) for i in range(10)], column_name_prefix="")
for i in range(10):
v = unpacked[i]
for j in range(10):
if (j != i):
self.assertEqual(v['c'+str(j)], None)
elif j % 2 == 0:
self.assertEqual(v['c'+str(j)], j)
else:
self.assertEqual(v['c'+str(j)], 'v' + str(j))
def test_unpack_sframe(self):
sf = SFrame()
sf['user_id'] = range(7)
sf["category"] = [
{"is_restaurant": 1, "is_electronics": "yes"},
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{"is_restaurant": 0, "is_retail": 1, "is_electronics": "yes"},
{"is_restaurant": 0 },
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{ },
None]
sf['list'] = [
None,
range(1),
range(2),
range(3),
range(1),
range(2),
range(3),
]
with self.assertRaises(TypeError):
sf.unpack('user_id')
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list'] = sf['list']
expected["is_restaurant"] = [1, 1,0,0, 1, None, None]
expected["is_retail"] = [None,1,1,None,1, None, None]
expected["is_electronics"] = ["yes", "no","yes",None,"no", None, None]
result = sf.unpack('category')
result.rename({
'category.is_restaurant': 'is_restaurant',
'category.is_retail': 'is_retail',
'category.is_electronics': 'is_electronics'
})
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="")
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="abc")
result.rename({
'abc.is_restaurant': 'is_restaurant',
'abc.is_retail': 'is_retail',
'abc.is_electronics': 'is_electronics'
})
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="", column_types=[str], limit=['is_restaurant'])
new_expected = expected[['user_id', 'list', 'is_restaurant']]
new_expected['is_restaurant'] = new_expected['is_restaurant'].astype(str)
assert_frame_equal(new_expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="", na_value = None)
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='list')
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list.0'] = [None,0,0,0, 0,0,0]
expected['list.1'] = [None,None,1,1, None,1,1]
expected['list.2'] = [None,None,None,2, None, None,2]
expected['category'] = sf['category']
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='list', na_value= 2)
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list.0'] = [None,0,0,0, 0,0,0]
expected['list.1'] = [None,None,1,1, None,1,1]
expected['list.2'] = [None,None,None,None, None, None,None]
expected['category'] = sf['category']
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
# auto resolving conflicting names
sf = SFrame()
sf['a'] = range(100)
sf['b'] = [range(5) for i in range(100)]
sf['b.0'] = range(100)
sf['b.0.1'] = range(100)
result = sf.unpack('b')
self.assertEqual(result.column_names(), ['a', 'b.0', 'b.0.1', 'b.0.1.1', 'b.1.1.1', 'b.2.1.1', 'b.3.1.1', 'b.4.1.1'])
sf = SFrame()
sf['a'] = range(100)
sf['b'] = [{'str1': i, 'str2':i + 1} for i in range(100)]
sf['b.str1'] = range(100)
result = sf.unpack('b')
self.assertEqual(len(result.column_names()), 4)
def test_stack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4,5]
sf["user_name"] = ['user' + str(i) for i in list(sf['user_id'])]
sf["category"] = [
{"is_restaurant": 1, },
{"is_restaurant": 0, "is_retail": 1 },
{ "is_retail": 0 },
{},
None]
expected_sf = SFrame();
expected_sf["user_id"] = [1,2, 2, 3,4,5]
expected_sf["user_name"] = ['user' + str(i) for i in list(expected_sf['user_id'])]
expected_sf['category'] = ['is_restaurant', 'is_restaurant', 'is_retail', 'is_retail', None, None]
expected_sf['value'] = [1,0,1,0, None, None]
df_expected = expected_sf.to_dataframe().sort(['user_id', 'category']).reset_index(drop=True)
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(ValueError):
sf.stack('category', ['user_id', 'value'])
# normal case
stacked_sf = sf.stack('category', ['category', 'value'])
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", "category"]).reset_index(drop=True), df_expected)
# set column types
stacked_sf = sf.stack('category')
self.assertTrue(stacked_sf.column_types()[2] == str)
self.assertTrue(stacked_sf.column_types()[3] == int)
# auto generate column names
stacked_sf = sf.stack('category')
new_column_names = stacked_sf.column_names()
self.assertTrue(len(new_column_names) == 4)
expected_sf.rename({'category':new_column_names[2], 'value':new_column_names[3]})
df_expected = expected_sf.to_dataframe().sort(['user_id', new_column_names[2]]).reset_index(drop=True)
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", new_column_names[2]]).reset_index(drop=True), df_expected)
#dropna
expected_sf = SFrame();
expected_sf["user_id"] = [1,2, 2, 3, 4, 5]
expected_sf["user_name"] = ['user' + str(i) for i in list(expected_sf['user_id'])]
expected_sf['category'] = ['is_restaurant', 'is_restaurant', 'is_retail', 'is_retail', None, None]
expected_sf['value'] = [1,0,1,0, None, None]
df_expected = expected_sf.to_dataframe().sort(['user_id', 'category']).reset_index(drop=True)
stacked_sf = sf.stack('category', ['category','value'], drop_na = False)
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", "category"]).reset_index(drop=True), df_expected)
sf = SFrame()
sf['a'] = SArray(([{}] * 100) + [{'a':1}])
# its a dict need 2 types
with self.assertRaises(ValueError):
sf.stack('a',['key', 'value'], new_column_type=[str])
with self.assertRaises(ValueError):
sf.stack('a',['key', 'value'], new_column_type=str)
sf.stack('a',['key', 'value'], new_column_type=[str, int])
expected_sf = SFrame()
expected_sf['key'] = SArray([None] * 100 + ["a"])
expected_sf['value'] = SArray([None] * 100 + [1])
def test_stack_list(self):
sf = SFrame()
sf["a"] = [1,2,3,4,5]
sf["b"] = [['a', 'b'], ['c'], ['d'],['e', None], None]
expected_result = SFrame()
expected_result['a'] = [1,1,2,3,4,4,5]
expected_result['X1'] = ['a','b','c','d','e',None, None]
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(TypeError):
sf.stack('a')
with self.assertRaises(TypeError):
sf.stack('b', ["something"])
result = sf.stack("b", drop_na = False)
stacked_column_name = result.column_names()[1]
expected_result.rename({'X1':stacked_column_name})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# default drop_na=False
result = sf.stack("b")
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
expected_result.rename({stacked_column_name: 'b'})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# drop_na=True
result = sf.stack("b", drop_na = True)
expected_result = SFrame()
expected_result['a'] = [1,1,2,3,4,4]
expected_result[result.column_names()[1]] = ['a','b','c','d','e',None]
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
sf = SFrame()
n = 1000000
sf['a'] = range(1,n)
sf['b'] = [[str(i), str(i+1)] for i in range(1,n)]
result = sf.stack('b')
self.assertTrue(len(result), n * 2)
sf = SFrame()
sf['a'] = SArray(([[]] * 100) + [['a','b']])
# its a dict need 2 types
with self.assertRaises(ValueError):
sf.stack('a', 'a', new_column_type=[str, int])
sf.stack('a', 'a', new_column_type=str)
expected_sf = SFrame()
expected_sf['a'] = SArray([None] * 100 + ["a", "b"])
def test_stack_vector(self):
sf = SFrame()
sf["a"] = [1,2,3,4,5]
sf["b"] = [[1],[1,2],[1,2,3],[1,2,3,4],None]
expected_result = SFrame()
expected_result['a'] = [1,2,2,3,3,3,4,4,4,4,5]
expected_result['X1'] = [1,1,2,1,2,3,1,2,3,4,None]
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(TypeError):
sf.stack('a')
with self.assertRaises(TypeError):
sf.stack('b', ["something"])
result = sf.stack("b", drop_na = False)
stacked_column_name = result.column_names()[1]
expected_result.rename({'X1':stacked_column_name})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# default drop_na=False
result = sf.stack("b")
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
expected_result.rename({stacked_column_name: 'b'})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# drop_na=True
result = sf.stack("b", drop_na = True)
expected_result = SFrame()
expected_result['a'] = [1,2,2,3,3,3,4,4,4,4]
expected_result[result.column_names()[1]] = SArray([1,1,2,1,2,3,1,2,3,4], float)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
import array
sf = SFrame()
sf['a'] = SArray(([array.array('d')] * 100) + [array.array('d',[1.0,2.0])])
# its a dict need 2 types
with self.assertRaises(ValueError):
sf.stack('a', 'a', new_column_type=[str, int])
sf.stack('a', 'a', new_column_type=int)
expected_sf = SFrame()
expected_sf['a'] = SArray([None] * 100 + [1, 2])
def test_unstack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4]
sf["user_name"] = ['user' + str(i) for i in list(sf['user_id'])]
sf["categories"] = [
{"is_restaurant": 1, },
{"is_restaurant": 0, "is_retail": 1 },
{ "is_retail": 0 },
None]
stacked_sf = sf.stack('categories', ['category', 'value'], drop_na=False)
# normal unstack
unstacked_sf = stacked_sf.unstack(column=['category', 'value'], new_column_name = 'categories')
# these frames are *almost* equal except user4 will be {} instead of None
assert_frame_equal(sf.fillna('categories',{}).to_dataframe(), unstacked_sf.to_dataframe().sort("user_id").reset_index(drop=True))
# missing new column name
unstacked_sf = stacked_sf.unstack(['category', 'value'])
self.assertEqual(len(unstacked_sf.column_names()), 3)
unstacked_sf.rename({unstacked_sf.column_names()[2] : 'categories'})
assert_frame_equal(sf.fillna('categories',{}).to_dataframe(), unstacked_sf.to_dataframe().sort("user_id").reset_index(drop=True))
# missing column names
with self.assertRaises(KeyError):
stacked_sf.unstack(['category','value1'])
# wrong input
with self.assertRaises(TypeError):
stacked_sf.unstack(['category'])
# duplicate new column name
with self.assertRaises(RuntimeError):
unstacked_sf = stacked_sf.unstack(['category', 'value'], 'user_name')
def test_unstack_list(self):
sf = SFrame()
sf['a'] = [1,2,3,4]
sf['b'] = [range(10), range(20), range(30), range(50)]
stacked_sf = sf.stack('b', new_column_name = 'new_b')
unstacked_sf = stacked_sf.unstack('new_b', new_column_name = 'b')
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
unstacked_sf = stacked_sf.unstack('new_b')
unstacked_sf.rename({unstacked_sf.column_names()[1]: 'b'})
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
unstacked_sf = stacked_sf.unstack('new_b', new_column_name='b')
unstacked_sf.rename({unstacked_sf.column_names()[1]: 'b'})
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
with self.assertRaises(RuntimeError):
stacked_sf.unstack('new_b', new_column_name='a')
with self.assertRaises(TypeError):
stacked_sf.unstack(['new_b'])
with self.assertRaises(KeyError):
stacked_sf.unstack('non exist')
def test_content_identifier(self):
sf = SFrame({"a":[1,2,3,4],"b":["1","2","3","4"]})
a1 = sf['a'].__get_content_identifier__()
a2 = sf['a'].__get_content_identifier__()
self.assertEquals(a1, a2)
def test_random_access(self):
t1 = list(range(0,100000))
t2 = [str(i) for i in t1]
t = [{'t1':t1[i], 't2':t2[i]} for i in range(len(t1))];
s = SFrame({'t1':t1,'t2':t2})
# simple slices
self.__test_equal(s[1:10000], pd.DataFrame(t[1:10000]))
self.__test_equal(s[0:10000:3], pd.DataFrame(t[0:10000:3]))
self.__test_equal(s[1:10000:3], pd.DataFrame(t[1:10000:3]))
self.__test_equal(s[2:10000:3], pd.DataFrame(t[2:10000:3]))
self.__test_equal(s[3:10000:101], pd.DataFrame(t[3:10000:101]))
# negative slices
self.__test_equal(s[-5:], pd.DataFrame(t[-5:]))
self.__test_equal(s[-1:], pd.DataFrame(t[-1:]))
self.__test_equal(s[-100:-10], pd.DataFrame(t[-100:-10]))
self.__test_equal(s[-100:-10:2], pd.DataFrame(t[-100:-10:2]))
# single element reads
self.assertEqual(s[511], t[511])
self.assertEqual(s[1912],t[1912])
self.assertEqual(s[-1], t[-1])
self.assertEqual(s[-10],t[-10])
# edge case odities
self.__test_equal(s[10:100:100], pd.DataFrame(t[10:100:100]))
self.__test_equal(s[-100:len(s):10], pd.DataFrame(t[-100:len(t):10]))
self.assertEqual(len(s[-1:-2]), 0)
self.assertEqual(len(s[-1:-1000:2]), 0)
with self.assertRaises(IndexError):
s[len(s)]
def sort_n_rows(self, nrows=100):
nrows += 1
sf = SFrame()
sf['a'] = range(1, nrows)
sf['b'] = [float(i) for i in range(1,nrows)]
sf['c'] = [str(i) for i in range(1,nrows)]
sf['d'] = [[i, i+1] for i in range(1,nrows)]
reversed_sf = SFrame()
reversed_sf['a'] = range(nrows-1, 0, -1)
reversed_sf['b'] = [float(i) for i in range(nrows-1, 0, -1)]
reversed_sf['c'] = [str(i) for i in range(nrows-1, 0, -1)]
reversed_sf['d'] = [[i, i+1] for i in range(nrows-1, 0, -1)]
with self.assertRaises(TypeError):
sf.sort()
with self.assertRaises(TypeError):
sf.sort(1)
with self.assertRaises(TypeError):
sf.sort("d")
with self.assertRaises(ValueError):
sf.sort("nonexist")
with self.assertRaises(TypeError):
sf.sort({'a':True})
result = sf.sort('a')
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
# try a lazy input
result = sf[sf['a'] > 10].sort('a')
assert_frame_equal(sf[sf['a'] > 10].to_dataframe(), result.to_dataframe());
result = sf.sort('a', ascending = False)
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
# lazy reversed
result = sf[sf['a'] > 10].sort('a', ascending = False)
assert_frame_equal(reversed_sf[reversed_sf['a'] > 10].to_dataframe(), result.to_dataframe());
# lazy reversed
result = sf[sf['a'] > 10].sort('a', ascending = False)
assert_frame_equal(reversed_sf[reversed_sf['a'] > 10].to_dataframe(), result.to_dataframe());
# sort two columns
result = sf.sort(['a', 'b'])
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
result = sf.sort(['a', 'c'], ascending = False)
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
result = sf.sort([('a', True), ('b', False)])
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
result = sf.sort([('a', False), ('b', True)])
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
# empty sort should not throw
sf = SFrame({'x':[]})
sf.sort('x')
def test_sort(self):
#self.sort_n_rows(100)
for i in range(1, 10):
self.sort_n_rows(i)
def test_dropna(self):
# empty case
sf = SFrame()
self.assertEquals(len(sf.dropna()), 0)
# normal case
self.__test_equal(self.employees_sf.dropna(), self.employees_sf[0:5].to_dataframe())
test_split = self.employees_sf.dropna_split()
self.__test_equal(test_split[0], self.employees_sf[0:5].to_dataframe())
self.__test_equal(test_split[1], self.employees_sf[5:6].to_dataframe())
# create some other test sframe
test_sf = SFrame({'ints':SArray([None,None,3,4,None], int),
'floats':SArray([np.nan,2.,3.,4.,np.nan],float),
'strs':SArray(['1',np.nan,'','4',None], str),
'lists':SArray([[1],None,[],[1,1,1,1],None], list),
'dicts':SArray([{1:2},{2:3},{},{4:5},None], dict)})
# another normal, but more interesting case
self.__test_equal(test_sf.dropna(),
pd.DataFrame({'ints':[3,4],'floats':[3.,4.],'strs':['','4'],'lists':[[],[1,1,1,1]],'dicts':[{},{4:5}]}))
test_split = test_sf.dropna_split()
self.__test_equal(test_split[0], test_sf[2:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[0:2].append(test_sf[4:5]).to_dataframe())
# the 'all' case
self.__test_equal(test_sf.dropna(how='all'), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split(how='all')
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
# select some columns
self.__test_equal(test_sf.dropna(['ints','floats'], how='all'), test_sf[1:4].to_dataframe())
test_split = test_sf.dropna_split(['ints','floats'], how='all')
self.__test_equal(test_split[0], test_sf[1:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[0:1].append(test_sf[4:5]).to_dataframe())
self.__test_equal(test_sf.dropna('strs'), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split('strs')
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
self.__test_equal(test_sf.dropna(['strs','dicts']), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split(['strs','dicts'])
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
# bad stuff
with self.assertRaises(TypeError):
test_sf.dropna(1)
test_sf.dropna([1,2])
test_sf.dropna('strs', how=1)
test_sf.dropna_split(1)
test_sf.dropna_split([1,2])
test_sf.dropna_split('strs', how=1)
with self.assertRaises(ValueError):
test_sf.dropna('ints', how='blah')
test_sf.dropna_split('ints', how='blah')
with self.assertRaises(RuntimeError):
test_sf.dropna('dontexist')
test_sf.dropna_split('dontexist')
def test_add_row_number(self):
sf = SFrame(self.__create_test_df(400000))
sf = sf.add_row_number('id')
self.assertEquals(list(sf['id']), list(range(0,400000)))
del sf['id']
sf = sf.add_row_number('id', -20000)
self.assertEquals(list(sf['id']), list(range(-20000,380000)))
del sf['id']
sf = sf.add_row_number('id', 40000)
self.assertEquals(list(sf['id']), list(range(40000,440000)))
with self.assertRaises(RuntimeError):
sf.add_row_number('id')
with self.assertRaises(TypeError):
sf = sf.add_row_number(46)
sf = sf.add_row_number('id2',start='hi')
def test_check_lazy_sframe_size(self):
# empty sframe, materialized, has_size
sf = SFrame()
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# add one column, not materialized, has_size
sf['a'] = range(1000)
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# materialize it, materialized, has_size
sf['a'] = range(1000)
sf.__materialize__()
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# logical filter, not materialized, not has_size
sf = sf[sf['a'] > 5000]
self.assertFalse(sf.__is_materialized__())
self.assertFalse(sf.__has_size__())
def test_lazy_logical_filter_sarray(self):
g=SArray(range(10000))
g2=SArray(range(10000))
a=g[g>10]
a2=g2[g>10]
z=a[a2>20]
self.assertEqual(len(z), 9979)
def test_lazy_logical_filter_sframe(self):
g=SFrame({'a':range(10000)})
g2=SFrame({'a':range(10000)})
a=g[g['a']>10]
a2=g2[g['a']>10]
z=a[a2['a']>20]
self.assertEqual(len(z), 9979)
def test_sframe_to_rdd(self):
if not HAS_PYSPARK:
print("Did not run Pyspark unit tests!")
return
sc = SparkContext('local')
# Easiest case: single column of integers
test_rdd = sc.parallelize(range(100))
sf = SFrame.from_rdd(test_rdd)
self.assertTrue(sf.num_cols(), 1)
self.assertTrue(sf.column_names(), ['X1'])
# We cast integers to floats to be safe on varying types
self.assertEquals([float(i) for i in range(0,100)], list(sf['X1']))
sc.stop()
def test_rdd_to_sframe(self):
if not HAS_PYSPARK:
print("Did not run Pyspark unit tests!")
return
sc = SparkContext('local')
# Easiest case: single column of integers
sf = SFrame({'column_name':range(100)})
test_rdd = sf.to_rdd(sc)
res = test_rdd.collect()
self.assertEquals(res, [{'column_name':long(i)} for i in range(100)])
sc.stop()
def test_column_manipulation_of_lazy_sframe(self):
g=SFrame({'a':[1,2,3,4,5],'id':[1,2,3,4,5]})
g = g[g['id'] > 2]
del g['id']
# if lazy column deletion is quirky, this will cause an exception
self.assertEquals(list(g[0:2]['a']), [3,4])
g=SFrame({'a':[1,2,3,4,5],'id':[1,2,3,4,5]})
g = g[g['id'] > 2]
g.swap_columns('a','id')
# if lazy column swap is quirky, this will cause an exception
self.assertEquals(list(g[0:2]['a']), [3,4])
def test_empty_sarray(self):
with util.TempDirectory() as f:
sf = SArray()
sf.save(f)
sf2 = SArray(f)
self.assertEquals(len(sf2), 0)
def test_empty_sframe(self):
with util.TempDirectory() as f:
sf = SFrame()
sf.save(f)
sf2 = SFrame(f)
self.assertEquals(len(sf2), 0)
self.assertEquals(sf2.num_columns(), 0)
def test_none_column(self):
sf = SFrame({'a':[1,2,3,4,5]})
sf['b'] = None
self.assertEqual(sf['b'].dtype(), float)
df = pd.DataFrame({'a': [1,2,3,4,5], 'b': [None,None,None,None,None]})
self.__test_equal(sf, df)
sa = SArray.from_const(None, 100)
self.assertEquals(list(sa), [None] * 100)
self.assertEqual(sa.dtype(), float)
def test_apply_with_partial(self):
sf = SFrame({'a': [1, 2, 3, 4, 5]})
def concat_fn(character, row):
return '%s%d' % (character, row['a'])
my_partial_fn = functools.partial(concat_fn, 'x')
sa = sf.apply(my_partial_fn)
self.assertEqual(list(sa), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_apply_with_functor(self):
sf = SFrame({'a': [1, 2, 3, 4, 5]})
class Concatenator(object):
def __init__(self, character):
self.character = character
def __call__(self, row):
return '%s%d' % (self.character, row['a'])
concatenator = Concatenator('x')
sa = sf.apply(concatenator)
self.assertEqual(list(sa), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_save_sframe(self):
'''save lazily evaluated SFrame should not matrialize to target folder
'''
data = SFrame()
data['x'] = range(100)
data['x'] = data['x'] > 50
#lazy and good
tmp_dir = tempfile.mkdtemp()
data.save(tmp_dir)
shutil.rmtree(tmp_dir)
print(data)
def test_empty_argmax_does_not_fail(self):
# an empty argmax should not result in a crash
sf = SFrame({'id': [0, 0, 0, 1, 1, 2, 2],
'value': [3.0, 2.0, 2.3, None, None, 4.3, 1.3],
'category': ['A', 'B', 'A', 'E', 'A', 'A', 'B']})
sf.groupby('id', aggregate.ARGMAX('value', 'category'))
def test_cache_invalidation(self):
# Changes to the SFrame should invalidate the indexing cache.
X = SFrame({'a' : range(4000),
'b' : range(4000)})
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : i, 'b' : i})
X['a'] = range(1000, 5000)
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : 1000 + i, 'b' : i})
del X['b']
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : 1000 + i})
X['b'] = X['a']
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : 1000 + i, 'b' : 1000 + i})
X.rename({'b' : 'c'})
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : 1000 + i, 'c' : 1000 + i})
def test_to_numpy(self):
X = SFrame({'a' : range(100),
'b' : range(100)})
import numpy as np
import numpy.testing as nptest
Y = np.transpose(np.array([range(100), range(100)]))
nptest.assert_array_equal(X.to_numpy(), Y)
X['b'] = X['b'].astype(str)
s = [str(i) for i in range(100)]
Y = np.transpose(np.array([s, s]))
nptest.assert_array_equal(X.to_numpy(), Y)
@mock.patch(__name__+'.sqlite3.Cursor', spec=True)
@mock.patch(__name__+'.sqlite3.Connection', spec=True)
def test_from_sql(self, mock_conn, mock_cursor):
# Set up mock connection and cursor
conn = mock_conn('example.db')
curs = mock_cursor()
conn.cursor.return_value = curs
sf_type_codes = [44,44,41,22,114,199,43]
sf_data = list(zip(*self.all_type_cols))
sf_iter = sf_data.__iter__()
def mock_fetchone():
try:
return next(sf_iter)
except StopIteration:
return None
def mock_fetchmany(size=1):
count = 0
ret_list = []
for i in sf_iter:
if count == curs.arraysize:
break
ret_list.append(i)
count += 1
return ret_list
curs.fetchone.side_effect = mock_fetchone
curs.fetchmany.side_effect = mock_fetchmany
curs.description = [['X'+str(i+1),sf_type_codes[i]]+[None for j in range(5)] for i in range(len(sf_data[0]))]
# bigger than cache, no Nones
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=5, dbapi_module=dbapi2_mock())
_assert_sframe_equal(sf, self.sf_all_types)
# smaller than cache, no Nones
sf_iter = sf_data.__iter__()
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=100, dbapi_module=dbapi2_mock())
_assert_sframe_equal(sf, self.sf_all_types)
none_col = [None for i in range(5)]
nones_in_cache = list(zip(*[none_col for i in range(len(sf_data[0]))]))
none_sf = SFrame({'X'+str(i):none_col for i in range(1,len(sf_data[0])+1)})
test_data = (nones_in_cache+sf_data)
sf_iter = test_data.__iter__()
# more None rows than cache & types in description
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=5, dbapi_module=dbapi2_mock())
sf_inferred_types = SFrame()
expected_types = [float,float,str,str,str,str,dt.datetime]
for i in zip(self.sf_all_types.column_names(),expected_types):
new_col = SArray(none_col).astype(i[1])
new_col = new_col.append(self.sf_all_types[i[0]].apply(lambda x: i[1](x) if i[1] is not dt.datetime else x))
sf_inferred_types.add_column(new_col)
# Don't test the string representation of dict and list; there are
# funky consistency issues with the string representations of these
sf.remove_columns(['X5', 'X6'])
sf_inferred_types.remove_columns(['X5', 'X6'])
_assert_sframe_equal(sf, sf_inferred_types)
# more None rows than cache & no type information
for i in range(len(curs.description)):
curs.description[i][1] = None
sf_iter = test_data.__iter__()
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=5, dbapi_module=dbapi2_mock())
sf_inferred_types = SFrame()
expected_types = [str for i in range(len(sf_data[0]))]
for i in zip(self.sf_all_types.column_names(),expected_types):
new_col = SArray(none_col).astype(i[1])
new_col = new_col.append(self.sf_all_types[i[0]].apply(lambda x: str(x)))
sf_inferred_types.add_column(new_col)
# Don't test the string representation of dict, could be out of order
sf.remove_columns(['X5', 'X6'])
sf_inferred_types.remove_columns(['X5', 'X6'])
_assert_sframe_equal(sf, sf_inferred_types)
### column_type_hints tests
sf_iter = test_data.__iter__()
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=5,
dbapi_module=dbapi2_mock(), column_type_hints=str)
sf.remove_columns(['X5', 'X6'])
_assert_sframe_equal(sf, sf_inferred_types)
# Provide unhintable types
sf_iter = test_data.__iter__()
expected_types = [int,float,str,array.array,list,dict,dt.datetime]
with self.assertRaises(TypeError):
sf = SFrame.from_sql(conn,
"SELECT * FROM test_table", type_inference_rows=5,
dbapi_module=dbapi2_mock(), column_type_hints=expected_types)
sf_iter = test_data.__iter__()
expected_types = {'X'+str(i+1):expected_types[i] for i in range(3)}
sf = SFrame.from_sql(conn,
"SELECT * FROM test_table", type_inference_rows=10,
dbapi_module=dbapi2_mock(), column_type_hints=expected_types)
_assert_sframe_equal(sf[5:],self.sf_all_types)
# Test a float forced to a str
sf_iter = test_data.__iter__()
expected_types['X2'] = str
self.sf_all_types['X2'] = self.sf_all_types['X2'].apply(lambda x: str(x))
sf = SFrame.from_sql(conn,
"SELECT * FROM test_table", type_inference_rows=10,
dbapi_module=dbapi2_mock(), column_type_hints=expected_types)
_assert_sframe_equal(sf[5:],self.sf_all_types)
# Type unsupported by sframe
curs.description = [['X1',44],['X2',44]]
sf_iter = [[complex(4.5,3),1], [complex(3.4,5),2]].__iter__()
sf = SFrame.from_sql(conn, "SELECT * FROM test_table")
expected_sf = SFrame({'X1':["(4.5+3j)","(3.4+5j)"],'X2':[1,2]})
_assert_sframe_equal(sf, expected_sf)
# bad DBAPI version!
bad_version = dbapi2_mock()
bad_version.apilevel = "1.0 "
with self.assertRaises(NotImplementedError):
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", dbapi_module=bad_version)
# Bad module
with self.assertRaises(AttributeError):
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", dbapi_module=os)
# Bad connection
with self.assertRaises(AttributeError):
sf = SFrame.from_sql(4, "SELECT * FROM test_table")
# Empty query result
curs.description = []
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", dbapi_module=dbapi2_mock())
_assert_sframe_equal(sf, SFrame())
@mock.patch(__name__+'.sqlite3.Cursor', spec=True)
@mock.patch(__name__+'.sqlite3.Connection', spec=True)
def test_to_sql(self, mock_conn, mock_cursor):
conn = mock_conn('example.db')
curs = mock_cursor()
insert_stmt = "INSERT INTO ins_test (X1,X2,X3,X4,X5,X6,X7) VALUES ({0},{1},{2},{3},{4},{5},{6})"
num_cols = len(self.sf_all_types.column_names())
test_cases = [
('qmark',insert_stmt.format(*['?' for i in range(num_cols)])),
('numeric',insert_stmt.format(*[':'+str(i) for i in range(1,num_cols+1)])),
('named',insert_stmt.format(*[':X'+str(i) for i in range(1,num_cols+1)])),
('format',insert_stmt.format(*['%s' for i in range(num_cols)])),
('pyformat',insert_stmt.format(*['%(X'+str(i)+')s' for i in range(1,num_cols+1)])),
]
for i in test_cases:
conn.cursor.return_value = curs
mock_mod = dbapi2_mock()
mock_mod.paramstyle = i[0]
self.sf_all_types.to_sql(conn, "ins_test", dbapi_module=mock_mod)
conn.cursor.assert_called_once_with()
calls = []
col_names = self.sf_all_types.column_names()
for j in self.sf_all_types:
if i[0] == 'named' or i[0] == 'pyformat':
calls.append(mock.call(i[1],j))
else:
calls.append(mock.call(i[1],[j[k] for k in col_names]))
curs.execute.assert_has_calls(calls, any_order=False)
self.assertEquals(curs.execute.call_count, len(self.sf_all_types))
conn.commit.assert_called_once_with()
curs.close.assert_called_once_with()
conn.reset_mock()
curs.reset_mock()
# bad DBAPI version!
bad_version = dbapi2_mock()
bad_version.apilevel = "1.0 "
with self.assertRaises(NotImplementedError):
self.sf_all_types.to_sql(conn, "ins_test", dbapi_module=bad_version)
# bad paramstyle
bad_paramstyle = dbapi2_mock()
bad_paramstyle.paramstyle = 'foo'
with self.assertRaises(TypeError):
self.sf_all_types.to_sql(conn, "ins_test", dbapi_module=bad_paramstyle)
def test_materialize(self):
sf = SFrame({'a':range(100)})
sf = sf[sf['a'] > 10]
self.assertFalse(sf.is_materialized())
sf.materialize()
self.assertTrue(sf.is_materialized())
def test_materialization_slicing(self):
# Has been known to fail.
g=SFrame({'a':range(100)})[:10]
g['b'] = g['a'] + 1
g['b'].materialize()
g.materialize()
def test_copy(self):
from copy import copy
sf = generate_random_sframe(100, "Cns")
sf_copy = copy(sf)
assert sf is not sf_copy
_assert_sframe_equal(sf, sf_copy)
def test_deepcopy(self):
from copy import deepcopy
sf = generate_random_sframe(100, "Cns")
sf_copy = deepcopy(sf)
assert sf is not sf_copy
_assert_sframe_equal(sf, sf_copy)
if __name__ == "__main__":
import sys
# Check if we are supposed to connect to another server
for i, v in enumerate(sys.argv):
if v.startswith("ipc://"):
_launch(v)
# The rest of the arguments need to get passed through to
# the unittest module
del sys.argv[i]
break
unittest.main()
| bsd-3-clause |
ishay2b/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 23 | 5276 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/tests/extension/base/methods.py | 1 | 18343 | import operator
import numpy as np
import pytest
from pandas.core.dtypes.common import is_bool_dtype
import pandas as pd
import pandas._testing as tm
from pandas.core.sorting import nargsort
from .base import BaseExtensionTests
class BaseMethodsTests(BaseExtensionTests):
"""Various Series and DataFrame methods."""
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna):
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
expected = pd.Series(other).value_counts(dropna=dropna).sort_index()
self.assert_series_equal(result, expected)
def test_value_counts_with_normalize(self, data):
# GH 33172
data = data[:10].unique()
values = np.array(data[~data.isna()])
result = (
pd.Series(data, dtype=data.dtype).value_counts(normalize=True).sort_index()
)
expected = pd.Series([1 / len(values)] * len(values), index=result.index)
self.assert_series_equal(result, expected)
def test_count(self, data_missing):
df = pd.DataFrame({"A": data_missing})
result = df.count(axis="columns")
expected = pd.Series([0, 1])
self.assert_series_equal(result, expected)
def test_series_count(self, data_missing):
# GH#26835
ser = pd.Series(data_missing)
result = ser.count()
expected = 1
assert result == expected
def test_apply_simple_series(self, data):
result = pd.Series(data).apply(id)
assert isinstance(result, pd.Series)
def test_argsort(self, data_for_sorting):
result = pd.Series(data_for_sorting).argsort()
expected = pd.Series(np.array([2, 0, 1], dtype=np.int64))
self.assert_series_equal(result, expected)
def test_argsort_missing_array(self, data_missing_for_sorting):
result = data_missing_for_sorting.argsort()
expected = np.array([2, 0, 1], dtype=np.dtype("int"))
# we don't care whether it's int32 or int64
result = result.astype("int64", casting="safe")
expected = expected.astype("int64", casting="safe")
tm.assert_numpy_array_equal(result, expected)
def test_argsort_missing(self, data_missing_for_sorting):
result = pd.Series(data_missing_for_sorting).argsort()
expected = pd.Series(np.array([1, -1, 0], dtype=np.int64))
self.assert_series_equal(result, expected)
def test_argmin_argmax(self, data_for_sorting, data_missing_for_sorting, na_value):
# GH 24382
# data_for_sorting -> [B, C, A] with A < B < C
assert data_for_sorting.argmax() == 1
assert data_for_sorting.argmin() == 2
# with repeated values -> first occurence
data = data_for_sorting.take([2, 0, 0, 1, 1, 2])
assert data.argmax() == 3
assert data.argmin() == 0
# with missing values
# data_missing_for_sorting -> [B, NA, A] with A < B and NA missing.
assert data_missing_for_sorting.argmax() == 0
assert data_missing_for_sorting.argmin() == 2
@pytest.mark.parametrize("method", ["argmax", "argmin"])
def test_argmin_argmax_empty_array(self, method, data):
# GH 24382
err_msg = "attempt to get"
with pytest.raises(ValueError, match=err_msg):
getattr(data[:0], method)()
@pytest.mark.parametrize("method", ["argmax", "argmin"])
def test_argmin_argmax_all_na(self, method, data, na_value):
# all missing with skipna=True is the same as emtpy
err_msg = "attempt to get"
data_na = type(data)._from_sequence([na_value, na_value], dtype=data.dtype)
with pytest.raises(ValueError, match=err_msg):
getattr(data_na, method)()
@pytest.mark.parametrize(
"na_position, expected",
[
("last", np.array([2, 0, 1], dtype=np.dtype("intp"))),
("first", np.array([1, 2, 0], dtype=np.dtype("intp"))),
],
)
def test_nargsort(self, data_missing_for_sorting, na_position, expected):
# GH 25439
result = nargsort(data_missing_for_sorting, na_position=na_position)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values(self, data_for_sorting, ascending, sort_by_key):
ser = pd.Series(data_for_sorting)
result = ser.sort_values(ascending=ascending, key=sort_by_key)
expected = ser.iloc[[2, 0, 1]]
if not ascending:
# GH 35922. Expect stable sort
if ser.nunique() == 2:
expected = ser.iloc[[0, 1, 2]]
else:
expected = ser.iloc[[1, 0, 2]]
self.assert_series_equal(result, expected)
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_missing(
self, data_missing_for_sorting, ascending, sort_by_key
):
ser = pd.Series(data_missing_for_sorting)
result = ser.sort_values(ascending=ascending, key=sort_by_key)
if ascending:
expected = ser.iloc[[2, 0, 1]]
else:
expected = ser.iloc[[0, 2, 1]]
self.assert_series_equal(result, expected)
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_frame(self, data_for_sorting, ascending):
df = pd.DataFrame({"A": [1, 2, 1], "B": data_for_sorting})
result = df.sort_values(["A", "B"])
expected = pd.DataFrame(
{"A": [1, 1, 2], "B": data_for_sorting.take([2, 0, 1])}, index=[2, 0, 1]
)
self.assert_frame_equal(result, expected)
@pytest.mark.parametrize("box", [pd.Series, lambda x: x])
@pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique])
def test_unique(self, data, box, method):
duplicated = box(data._from_sequence([data[0], data[0]]))
result = method(duplicated)
assert len(result) == 1
assert isinstance(result, type(data))
assert result[0] == duplicated[0]
@pytest.mark.parametrize("na_sentinel", [-1, -2])
def test_factorize(self, data_for_grouping, na_sentinel):
codes, uniques = pd.factorize(data_for_grouping, na_sentinel=na_sentinel)
expected_codes = np.array(
[0, 0, na_sentinel, na_sentinel, 1, 1, 0, 2], dtype=np.intp
)
expected_uniques = data_for_grouping.take([0, 4, 7])
tm.assert_numpy_array_equal(codes, expected_codes)
self.assert_extension_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize("na_sentinel", [-1, -2])
def test_factorize_equivalence(self, data_for_grouping, na_sentinel):
codes_1, uniques_1 = pd.factorize(data_for_grouping, na_sentinel=na_sentinel)
codes_2, uniques_2 = data_for_grouping.factorize(na_sentinel=na_sentinel)
tm.assert_numpy_array_equal(codes_1, codes_2)
self.assert_extension_array_equal(uniques_1, uniques_2)
assert len(uniques_1) == len(pd.unique(uniques_1))
assert uniques_1.dtype == data_for_grouping.dtype
def test_factorize_empty(self, data):
codes, uniques = pd.factorize(data[:0])
expected_codes = np.array([], dtype=np.intp)
expected_uniques = type(data)._from_sequence([], dtype=data[:0].dtype)
tm.assert_numpy_array_equal(codes, expected_codes)
self.assert_extension_array_equal(uniques, expected_uniques)
def test_fillna_copy_frame(self, data_missing):
arr = data_missing.take([1, 1])
df = pd.DataFrame({"A": arr})
filled_val = df.iloc[0, 0]
result = df.fillna(filled_val)
assert df.A.values is not result.A.values
def test_fillna_copy_series(self, data_missing):
arr = data_missing.take([1, 1])
ser = pd.Series(arr)
filled_val = ser[0]
result = ser.fillna(filled_val)
assert ser._values is not result._values
assert ser._values is arr
def test_fillna_length_mismatch(self, data_missing):
msg = "Length of 'value' does not match."
with pytest.raises(ValueError, match=msg):
data_missing.fillna(data_missing.take([1]))
def test_combine_le(self, data_repeated):
# GH 20825
# Test that combine works when doing a <= (le) comparison
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 <= x2)
expected = pd.Series(
[a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))]
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 <= x2)
expected = pd.Series([a <= val for a in list(orig_data1)])
self.assert_series_equal(result, expected)
def test_combine_add(self, data_repeated):
# GH 20825
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 + x2)
with np.errstate(over="ignore"):
expected = pd.Series(
orig_data1._from_sequence(
[a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]
)
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 + x2)
expected = pd.Series(
orig_data1._from_sequence([a + val for a in list(orig_data1)])
)
self.assert_series_equal(result, expected)
def test_combine_first(self, data):
# https://github.com/pandas-dev/pandas/issues/24147
a = pd.Series(data[:3])
b = pd.Series(data[2:5], index=[2, 3, 4])
result = a.combine_first(b)
expected = pd.Series(data[:5])
self.assert_series_equal(result, expected)
@pytest.mark.parametrize("frame", [True, False])
@pytest.mark.parametrize(
"periods, indices",
[(-2, [2, 3, 4, -1, -1]), (0, [0, 1, 2, 3, 4]), (2, [-1, -1, 0, 1, 2])],
)
def test_container_shift(self, data, frame, periods, indices):
# https://github.com/pandas-dev/pandas/issues/22386
subset = data[:5]
data = pd.Series(subset, name="A")
expected = pd.Series(subset.take(indices, allow_fill=True), name="A")
if frame:
result = data.to_frame(name="A").assign(B=1).shift(periods)
expected = pd.concat(
[expected, pd.Series([1] * 5, name="B").shift(periods)], axis=1
)
compare = self.assert_frame_equal
else:
result = data.shift(periods)
compare = self.assert_series_equal
compare(result, expected)
def test_shift_0_periods(self, data):
# GH#33856 shifting with periods=0 should return a copy, not same obj
result = data.shift(0)
assert data[0] != data[1] # otherwise below is invalid
data[0] = data[1]
assert result[0] != result[1] # i.e. not the same object/view
@pytest.mark.parametrize("periods", [1, -2])
def test_diff(self, data, periods):
data = data[:5]
if is_bool_dtype(data.dtype):
op = operator.xor
else:
op = operator.sub
try:
# does this array implement ops?
op(data, data)
except Exception:
pytest.skip(f"{type(data)} does not support diff")
s = pd.Series(data)
result = s.diff(periods)
expected = pd.Series(op(data, data.shift(periods)))
self.assert_series_equal(result, expected)
df = pd.DataFrame({"A": data, "B": [1.0] * 5})
result = df.diff(periods)
if periods == 1:
b = [np.nan, 0, 0, 0, 0]
else:
b = [0, 0, 0, np.nan, np.nan]
expected = pd.DataFrame({"A": expected, "B": b})
self.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"periods, indices",
[[-4, [-1, -1]], [-1, [1, -1]], [0, [0, 1]], [1, [-1, 0]], [4, [-1, -1]]],
)
def test_shift_non_empty_array(self, data, periods, indices):
# https://github.com/pandas-dev/pandas/issues/23911
subset = data[:2]
result = subset.shift(periods)
expected = subset.take(indices, allow_fill=True)
self.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("periods", [-4, -1, 0, 1, 4])
def test_shift_empty_array(self, data, periods):
# https://github.com/pandas-dev/pandas/issues/23911
empty = data[:0]
result = empty.shift(periods)
expected = empty
self.assert_extension_array_equal(result, expected)
def test_shift_zero_copies(self, data):
result = data.shift(0)
assert result is not data
result = data[:0].shift(2)
assert result is not data
def test_shift_fill_value(self, data):
arr = data[:4]
fill_value = data[0]
result = arr.shift(1, fill_value=fill_value)
expected = data.take([0, 0, 1, 2])
self.assert_extension_array_equal(result, expected)
result = arr.shift(-2, fill_value=fill_value)
expected = data.take([2, 3, 0, 0])
self.assert_extension_array_equal(result, expected)
def test_not_hashable(self, data):
# We are in general mutable, so not hashable
with pytest.raises(TypeError, match="unhashable type"):
hash(data)
def test_hash_pandas_object_works(self, data, as_frame):
# https://github.com/pandas-dev/pandas/issues/23066
data = pd.Series(data)
if as_frame:
data = data.to_frame()
a = pd.util.hash_pandas_object(data)
b = pd.util.hash_pandas_object(data)
self.assert_equal(a, b)
def test_searchsorted(self, data_for_sorting, as_series):
b, c, a = data_for_sorting
arr = type(data_for_sorting)._from_sequence([a, b, c])
if as_series:
arr = pd.Series(arr)
assert arr.searchsorted(a) == 0
assert arr.searchsorted(a, side="right") == 1
assert arr.searchsorted(b) == 1
assert arr.searchsorted(b, side="right") == 2
assert arr.searchsorted(c) == 2
assert arr.searchsorted(c, side="right") == 3
result = arr.searchsorted(arr.take([0, 2]))
expected = np.array([0, 2], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
# sorter
sorter = np.array([1, 2, 0])
assert data_for_sorting.searchsorted(a, sorter=sorter) == 0
def test_where_series(self, data, na_value, as_frame):
assert data[0] != data[1]
cls = type(data)
a, b = data[:2]
ser = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype))
cond = np.array([True, True, False, False])
if as_frame:
ser = ser.to_frame(name="a")
cond = cond.reshape(-1, 1)
result = ser.where(cond)
expected = pd.Series(
cls._from_sequence([a, a, na_value, na_value], dtype=data.dtype)
)
if as_frame:
expected = expected.to_frame(name="a")
self.assert_equal(result, expected)
# array other
cond = np.array([True, False, True, True])
other = cls._from_sequence([a, b, a, b], dtype=data.dtype)
if as_frame:
other = pd.DataFrame({"a": other})
cond = pd.DataFrame({"a": cond})
result = ser.where(cond, other)
expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype))
if as_frame:
expected = expected.to_frame(name="a")
self.assert_equal(result, expected)
@pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]])
def test_repeat(self, data, repeats, as_series, use_numpy):
arr = type(data)._from_sequence(data[:3], dtype=data.dtype)
if as_series:
arr = pd.Series(arr)
result = np.repeat(arr, repeats) if use_numpy else arr.repeat(repeats)
repeats = [repeats] * 3 if isinstance(repeats, int) else repeats
expected = [x for x, n in zip(arr, repeats) for _ in range(n)]
expected = type(data)._from_sequence(expected, dtype=data.dtype)
if as_series:
expected = pd.Series(expected, index=arr.index.repeat(repeats))
self.assert_equal(result, expected)
@pytest.mark.parametrize(
"repeats, kwargs, error, msg",
[
(2, {"axis": 1}, ValueError, "axis"),
(-1, {}, ValueError, "negative"),
([1, 2], {}, ValueError, "shape"),
(2, {"foo": "bar"}, TypeError, "'foo'"),
],
)
def test_repeat_raises(self, data, repeats, kwargs, error, msg, use_numpy):
with pytest.raises(error, match=msg):
if use_numpy:
np.repeat(data, repeats, **kwargs)
else:
data.repeat(repeats, **kwargs)
@pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])
def test_equals(self, data, na_value, as_series, box):
data2 = type(data)._from_sequence([data[0]] * len(data), dtype=data.dtype)
data_na = type(data)._from_sequence([na_value] * len(data), dtype=data.dtype)
data = tm.box_expected(data, box, transpose=False)
data2 = tm.box_expected(data2, box, transpose=False)
data_na = tm.box_expected(data_na, box, transpose=False)
# we are asserting with `is True/False` explicitly, to test that the
# result is an actual Python bool, and not something "truthy"
assert data.equals(data) is True
assert data.equals(data.copy()) is True
# unequal other data
assert data.equals(data2) is False
assert data.equals(data_na) is False
# different length
assert data[:2].equals(data[:3]) is False
# emtpy are equal
assert data[:0].equals(data[:0]) is True
# other types
assert data.equals(None) is False
assert data[[0]].equals(data[0]) is False
| gpl-2.0 |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/tests/frame/test_period.py | 18 | 5005 | import numpy as np
from numpy.random import randn
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas import (PeriodIndex, period_range, DataFrame, date_range,
Index, to_datetime, DatetimeIndex)
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestPeriodIndex(object):
def setup_method(self, method):
pass
def test_as_frame_columns(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.iloc[:, 0])
# GH # 1211
repr(df)
ts = df['1/1/2000']
tm.assert_series_equal(ts, df.iloc[:, 0])
def test_frame_setitem(self):
rng = period_range('1/1/2000', periods=5, name='index')
df = DataFrame(randn(5, 3), index=rng)
df['Index'] = rng
rs = Index(df['Index'])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == 'Index'
assert rng.name == 'index'
rs = df.reset_index().set_index('index')
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_frame_to_time_stamp(self):
K = 5
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
df = DataFrame(randn(len(index), K), index=index)
df['mix'] = 'a'
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end')
tm.assert_index_equal(result.index, exp_index)
tm.assert_numpy_array_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start')
tm.assert_index_equal(result.index, exp_index)
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.index, exp_index)
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.index, exp_index)
result = df.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.index, exp_index)
# columns
df = df.T
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end', axis=1)
tm.assert_index_equal(result.columns, exp_index)
tm.assert_numpy_array_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start', axis=1)
tm.assert_index_equal(result.columns, exp_index)
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end', axis=1)
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.columns, exp_index)
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end', axis=1)
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.columns, exp_index)
result = df.to_timestamp('S', 'end', axis=1)
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.columns, exp_index)
# invalid axis
tm.assert_raises_regex(
ValueError, 'axis', df.to_timestamp, axis=2)
result1 = df.to_timestamp('5t', axis=1)
result2 = df.to_timestamp('t', axis=1)
expected = pd.date_range('2001-01-01', '2009-01-01', freq='AS')
assert isinstance(result1.columns, DatetimeIndex)
assert isinstance(result2.columns, DatetimeIndex)
tm.assert_numpy_array_equal(result1.columns.asi8, expected.asi8)
tm.assert_numpy_array_equal(result2.columns.asi8, expected.asi8)
# PeriodIndex.to_timestamp always use 'infer'
assert result1.columns.freqstr == 'AS-JAN'
assert result2.columns.freqstr == 'AS-JAN'
def test_frame_index_to_string(self):
index = PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M')
frame = DataFrame(np.random.randn(3, 4), index=index)
# it works!
frame.to_string()
def test_align_frame(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_frame_equal(result, expected)
| agpl-3.0 |
andre-nguyen/rpg_svo | svo_analysis/src/svo_analysis/analyse_dataset.py | 17 | 1178 | # -*- coding: utf-8 -*-
import associate
import numpy as np
import matplotlib.pyplot as plt
import yaml
def loadDataset(filename):
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
D = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64)
return D
dataset_dir = '/home/cforster/Datasets/SlamBenchmark/px4_r2'
trajectory_data = dataset_dir+'/groundtruth.txt'
stepsize = 10
# load dataset
data = loadDataset(trajectory_data)
n = data.shape[0]
steps = np.arange(0,n,stepsize)
# compute trajectory length
last_pos = data[0,1:4]
trajectory_length = 0
for i in steps[1:]:
new_pos = data[i,1:4]
trajectory_length += np.linalg.norm(new_pos-last_pos)
last_pos = new_pos
print 'trajectory lenght = ' + str(trajectory_length) + 'm'
print 'height mean = ' + str(np.mean(data[:,3])) + 'm'
print 'height median = ' + str(np.median(data[:,3])) + 'm'
print 'height std = ' + str(np.std(data[:,3])) + 'm'
print 'duration = ' + str(data[-1,0]-data[0,0]) + 's'
print 'speed = ' + str(trajectory_length/(data[-1,0]-data[0,0])) + 'm/s'
| gpl-3.0 |
hamogu/filili | filili/findlines.py | 1 | 7501 | import time
import numpy as np
try:
import matplotlib.pyplot as plt
has_mpl = True
except ImportError:
has_mpl = False
print 'Warning: findlines.py: No matplotlib found'
print ' Some plotting routines are disabled'
#from sigma_clip import sigma_clipping
# TBD: Should this be separated in code that requires sherpa and code that does not?
import sherpa.astro.ui as ui
import shmodelshelper as smh
def maximum_filter_noscipy(input, size):
'''reimplement scipy.ndimage.maximum_filter1d
This implementation is in pure python for compatability in case
scipy is not available. The scipy version is written in C and should
be faster for larger arrays.
This procedure implemnts only a subset of the options from the
scipy verion.
Calculate a one-dimensional maximum filter along a 1-d array.
Parameters
----------
input : array-like
input array to filter
size : int
length along which to calculate 1D maximum
'''
if input.ndim != 1:
raise ValueError('Input array must have exactly one dimension')
maxfilter = np.zeros_like(input)
for i in range(input.size):
maxfilter[i] = np.maximum(input[max(0, i-size):min(i+size, input.size)])
return maxfilter
try:
from scipy.ndimage import maximum_filter1d
except ImportError:
maximum_filter1d = maximum_filter_noscipy
def findlines(x, y, fwhm, smoothwindow = 'hanning', sigma_threshold = 3.):
'''
Several things here and I am not quite sure yet what turn out to be useful
- smoothing: show real peaks and not just noise
- maximum_filter = array: will find the peaks
- sigma_clipping = are the peaks large enough to be relevant?
Parameters
----------
x : ndarray
x values, e.g. wavelength
y : ndarray
y values, e.g. flux or res_flux / error
fwhm : float
estimate for FWHM of lines. Used as smoothing scale
smoothwindow : string or None
if `smoothwindow` is on of `['flat', 'hanning', 'hamming',
'bartlett', 'blackman']` a correspondig window function
will be used to smooth the signal before line detection.
Returns
-------
peaks : ndarray
index numbers for peaks found
'''
fwhminpix = int(fwhm / np.diff(x).mean())
if smoothwindow is not None:
#print smoothwindow
#print fwhminpix
y = smooth(y, window_len = 3*fwhminpix, window = smoothwindow)
maxindex = (maximum_filter1d(y, max(fwhminpix,3)) == y)
maxindex = maxindex & (y > (y.mean() + sigma_threshold * y.std()))
# sigma_clipping works only if there is plenty of continuum
#clipped_y = sigma_clipping(y, threshold = sigma_threshold)
# believe only peaks which are so large, that the get clipped by sigma_clipping
#maxindex = maxindex & (clipped_y.mask == False)
return np.flatnonzero(maxindex)
def smooth(x,window_len=11, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
Parameters
----------
x: ndarray
the input signal
window_len: integer , optional
The dimension of the smoothing window; should be an odd integer
window: string, optional
The type of window from `['flat', 'hanning', 'hamming', 'bartlett',
'blackman']`. A 'flat' window will produce a moving average
smoothing.
Returns
-------
y : ndarray
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
See also
--------
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
window could accept even number
from http://www.scipy.org/Cookbook/SignalSmooth
"""
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
# make it an odd number, so that reflection of values is same on each side
if np.mod(window_len,2) != 1:
window_len +=1
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=np.r_[x[(window_len-1)/2:0:-1],x,x[-1:-window_len/2:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
smoothwindow = 'hanning'
sigma_threshold = 2.
def mainloop(mymodel, fwhm, id = None, maxiter = 5, mindist = 0., do_plots = 0):
if id is None:
id = ui.get_default_id()
data = ui.get_data(id)
wave = data.get_indep()[0]
error = data.get_error()[0]
# model could habe been initalized with arbitrary values
ui.fit(id)
for i in range(maxiter):
oldmodel = smh.get_model_parts(id)
res_flux = ui.get_resid_plot(id).y
if smoothwindow is not None:
fwhminpix = int(fwhm / np.diff(wave).mean())
y = smooth(res_flux/error, window_len = 3*fwhminpix, window = smoothwindow)
else:
y = res_flux/error
peaks = findlines(wave, y, fwhm, smoothwindow = None, sigma_threshold = sigma_threshold)
if has_mpl and (do_plots > 2):
plt.figure()
plt.plot(wave, res_flux/error, 's')
for pos in mymodel.line_value_list('pos'):
plt.plot([pos, pos], plt.ylim(),'k:')
for peak in peaks:
plt.plot([wave[peak], wave[peak]], plt.ylim())
plt.plot(wave, y)
plt.draw()
for peak in peaks:
if (len(mymodel.line_value_list('pos')) == 0) or (min(np.abs(mymodel.line_value_list('pos') - wave[peak])) >= mindist):
mymodel.add_line(**mymodel.guess(wave, smooth(res_flux, window_len = 3*fwhminpix, window = smoothwindow), peak, fwhm = fwhm))
newmodel = smh.get_model_parts(id)
print 'Iteration {0:3n}: {1:3n} lines added'.format(i, len(newmodel) - len(oldmodel))
if set(newmodel) == set(oldmodel):
print 'No new lines added this step - fitting finished'
break
# Now do the fitting in Sherpa
#ui.set_method('simplex')
ui.fit(id)
#ui.set_method('moncar')
#ui.fit(id)
if has_mpl and (do_plots > 0):
if do_plots > 1:
plt.figure()
else:
plt.clf()
ui.plot_fit(id)
for pos in mymodel.line_value_list('pos'):
plt.plot([pos, pos], plt.ylim(),'k:')
for peak in peaks:
plt.plot([wave[peak], wave[peak]], plt.ylim())
plt.plot(wave, res_flux)
plt.draw()
else:
print 'Max number of iterations reached'
#model.cleanup() #remove lines running to 0 etc.
return mymodel
| mit |
vortex-ape/scikit-learn | sklearn/covariance/tests/test_graphical_lasso.py | 4 | 6553 | """ Test the graphical_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
import pytest
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_warns_message
from sklearn.covariance import (graphical_lasso, GraphicalLasso,
GraphicalLassoCV, empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
from sklearn.utils.fixes import PY3_OR_LATER
from numpy.testing import assert_equal
def test_graphical_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graphical_lasso(emp_cov, return_costs=True,
alpha=alpha, mode=method)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphicalLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphicalLasso(
assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graphical_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# (need to set penalize.diagonal to FALSE)
cov_R = np.array([
[0.68112222, 0.0000000, 0.265820, 0.02464314],
[0.00000000, 0.1887129, 0.000000, 0.00000000],
[0.26582000, 0.0000000, 3.095503, 0.28697200],
[0.02464314, 0.0000000, 0.286972, 0.57713289]
])
icov_R = np.array([
[1.5190747, 0.000000, -0.1304475, 0.0000000],
[0.0000000, 5.299055, 0.0000000, 0.0000000],
[-0.1304475, 0.000000, 0.3498624, -0.1683946],
[0.0000000, 0.000000, -0.1683946, 1.8164353]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graphical_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graphical_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graphical_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22
def test_graphical_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphicalLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphicalLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22
@pytest.mark.skipif(not PY3_OR_LATER,
reason='On Python 2 DeprecationWarning is not issued for some unkown reason.')
def test_deprecated_grid_scores(random_state=1):
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
graphical_lasso = GraphicalLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1)
graphical_lasso.fit(X)
depr_message = ("Attribute grid_scores was deprecated in version "
"0.19 and will be removed in 0.21. Use "
"``grid_scores_`` instead")
with pytest.warns(DeprecationWarning, match=depr_message):
assert_equal(graphical_lasso.grid_scores, graphical_lasso.grid_scores_)
| bsd-3-clause |
quheng/scikit-learn | sklearn/ensemble/tests/test_forest.py | 48 | 39224 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(X, y, name, criterion):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = est.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, X, y, name, criterion
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, X, y, name, criterion
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
| bsd-3-clause |
wlamond/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 87 | 3903 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import Memory
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1))
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
gtesei/fast-furious | competitions/quora-question-pairs/xgb2.py | 1 | 3082 | import numpy as np
import pandas as pd
import xgboost as xgb
import datetime
import operator
from sklearn.cross_validation import train_test_split
from collections import Counter
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
from pylab import plot, show, subplot, specgram, imshow, savefig
RS = 12357
ROUNDS = 10000
print("Started")
np.random.seed(RS)
input_folder = './data/'
# data
df_train = pd.read_csv(input_folder + 'train.csv')
df_test = pd.read_csv(input_folder + 'test.csv')
print("Original data: X_train: {}, X_test: {}".format(df_train.shape, df_test.shape))
x_train_1 = pd.read_csv('xtrain.csv')
del x_train_1['Unnamed: 0']
x_test_1 = pd.read_csv('xtest.csv')
del x_test_1['Unnamed: 0']
print("Feature set 1: X_train: {}, X_test: {}".format(x_train_1.shape,x_test_1.shape))
x_train_2 = pd.read_csv('xtrain_2.csv')
#del x_train_2['Unnamed: 0']
x_test_2 = pd.read_csv('xtest_2.csv')
#del x_test_2['Unnamed: 0']
print("Feature set 2: X_train: {}, X_test: {}".format(x_train_2.shape, x_test_2.shape))
y_train = df_train['is_duplicate'].values
x_train = pd.concat([x_train_1,x_train_2],axis=1)
x_test = pd.concat([x_test_1,x_test_2],axis=1)
print("Merge: X_train: {}, X_test: {}".format(x_train.shape, x_test.shape))
assert x_train.shape[0] == df_train.shape[0]
assert x_test.shape[0] == df_test.shape[0]
# resample
if 1: # Now we oversample the negative class - on your own risk of overfitting!
pos_train = x_train[y_train == 1]
neg_train = x_train[y_train == 0]
print("Oversampling started for proportion: {}".format(len(pos_train) / (len(pos_train) + len(neg_train))))
p = 0.165
scale = ((len(pos_train) / (len(pos_train) + len(neg_train))) / p) - 1
while scale > 1:
neg_train = pd.concat([neg_train, neg_train])
scale -=1
neg_train = pd.concat([neg_train, neg_train[:int(scale * len(neg_train))]])
print("Oversampling done, new proportion: {}".format(len(pos_train) / (len(pos_train) + len(neg_train))))
x_train = pd.concat([pos_train, neg_train])
y_train = (np.zeros(len(pos_train)) + 1).tolist() + np.zeros(len(neg_train)).tolist()
del pos_train, neg_train
# XGB
params = {}
params['objective'] = 'binary:logistic'
params['eval_metric'] = 'logloss'
params['eta'] = 0.01
params['max_depth'] = 5
params['silent'] = 1
params['seed'] = RS
print("Will train XGB for {} rounds, RandomSeed: {}".format(ROUNDS, RS))
x, X_val, ytrain, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=RS)
print("Training data: X_train: {}, Y_train: {}, X_test: {}".format(x_train.shape, len(y_train), x_test.shape))
xg_train = xgb.DMatrix(x, label=ytrain)
xg_val = xgb.DMatrix(X_val, label=y_val)
watchlist = [(xg_train,'train'), (xg_val,'eval')]
clf = xgb.train(params=params,dtrain=xg_train,num_boost_round=ROUNDS,early_stopping_rounds=200,evals=watchlist)
preds = clf.predict(xgb.DMatrix(x_test))
print("Writing output...")
sub = pd.DataFrame()
sub['test_id'] = df_test['test_id']
sub['is_duplicate'] = preds
sub.to_csv("xgb_feat_seed_2{}_n{}.csv".format(RS, ROUNDS), index=False)
print("Done.")
| mit |
BrettJSettle/MotilityTracking | tifffile.py | 1 | 122080 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2013, Christoph Gohlke
# Copyright (c) 2008-2013, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and meta-data can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
ImageJ, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG/CCITT compressed image data or EXIF/IPTC/GPS/XMP
meta-data is not implemented. Only primary info records are read for STK,
FluoView, and NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SEQ, GEL,
and OME-TIFF, are custom extensions defined by MetaMorph, Carl Zeiss
MicroImaging, Olympus, Media Cybernetics, Molecular Dynamics, and the Open
Microscopy Environment consortium respectively.
For command line usage run ``python tifffile.py --help``
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2013.05.02
Requirements
------------
* `CPython 2.7 or 3.3 <http://www.python.org>`_
* `Numpy 1.7 <http://www.numpy.org>`_
* `Matplotlib 1.2 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2013.01.18 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `Imread <http://luispedro.org/software/imread>`_
* `PyLibTiff <http://code.google.com/p/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `CellProfiler.bioformats <http://www.cellprofiler.org/>`_
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis, for a bug fix and some read_cz_lsm functions.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(5) BioFormats. http://www.loci.wisc.edu/ome/formats.html
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) TiffDecoder.java
http://rsbweb.nih.gov/ij/developer/source/ij/io/TiffDecoder.java.html
(8) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
Examples
--------
>>> data = numpy.random.rand(301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> assert numpy.all(image == data)
>>> tif = TiffFile('test.tif')
>>> images = tif.asarray()
>>> image0 = tif[0].asarray()
>>> for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
... if page.is_rgb: pass
... if page.is_palette:
... t = page.color_map
... if page.is_stk:
... t = page.mm_uic_tags.number_planes
... if page.is_lsm:
... t = page.cz_lsm_info
>>> tif.close()
"""
from __future__ import division, print_function
import sys
import os
import re
import glob
import math
import zlib
import time
import struct
import warnings
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as ElementTree
import numpy
__version__ = '2013.05.02'
__docformat__ = 'restructuredtext en'
__all__ = ['imsave', 'imread', 'imshow', 'TiffFile', 'TiffSequence']
def imsave(filename, data, photometric=None, planarconfig=None,
resolution=None, description=None, software='tifffile.py',
byteorder=None, bigtiff=False):
"""Write image data to TIFF file.
Image data are written uncompressed in one stripe per plane.
Dimensions larger than 2 or 3 (depending on photometric mode and
planar configuration) are flattened and saved as separate pages.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image height,
width, and samples.
photometric : {'minisblack', 'miniswhite', 'rgb'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
software : str
Name of the software used to create the image.
Saved with the first page only.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
bigtiff : bool
If True the BigTIFF format is used.
By default the standard TIFF format is used for data less than 2040 MB.
Examples
--------
>>> data = numpy.random.rand(10, 3, 301, 219)
>>> imsave('temp.tif', data)
"""
assert(photometric in (None, 'minisblack', 'miniswhite', 'rgb'))
assert(planarconfig in (None, 'contig', 'planar'))
assert(byteorder in (None, '<', '>'))
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
if not bigtiff and data.size * data.dtype.itemsize < 2040*2**20:
bigtiff = False
offset_size = 4
tag_size = 12
numtag_format = 'H'
offset_format = 'I'
val_format = '4s'
else:
bigtiff = True
offset_size = 8
tag_size = 20
numtag_format = 'Q'
offset_format = 'Q'
val_format = '8s'
# unify shape of data
samplesperpixel = 1
extrasamples = 0
if photometric is None:
if data.ndim > 2 and (shape[-3] in (3, 4) or shape[-1] in (3, 4)):
photometric = 'rgb'
else:
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if planarconfig is None:
planarconfig = 'planar' if shape[-3] in (3, 4) else 'contig'
if planarconfig == 'contig':
if shape[-1] not in (3, 4):
raise ValueError("not a contiguous RGB(A) image")
data = data.reshape((-1, 1) + shape[-3:])
samplesperpixel = shape[-1]
else:
if shape[-3] not in (3, 4):
raise ValueError("not a planar RGB(A) image")
data = data.reshape((-1, ) + shape[-3:] + (1, ))
samplesperpixel = shape[-3]
if samplesperpixel == 4:
extrasamples = 1
elif planarconfig and len(shape) > 2:
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[-3:])
samplesperpixel = shape[-1]
else:
data = data.reshape((-1, ) + shape[-3:] + (1, ))
samplesperpixel = shape[-3]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
data = data.reshape((-1, 1) + shape[-2:] + (1, ))
shape = data.shape # (pages, planes, height, width, contig samples)
bytestr = bytes if sys.version[0] == '2' else lambda x: bytes(x, 'ascii')
tifftypes = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
tifftags = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'extra_samples': 338, 'sample_format': 339}
tags = []
tag_data = []
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def tag(name, dtype, number, value, offset=[0]):
# append tag binary string to tags list
# append (offset, value as binary string) to tag_data list
# increment offset by tag_size
if dtype == 's':
value = bytestr(value) + b'\0'
number = len(value)
value = (value, )
t = [pack('HH', tifftags[name], tifftypes[dtype]),
pack(offset_format, number)]
if len(dtype) > 1:
number *= int(dtype[:-1])
dtype = dtype[-1]
if number == 1:
if isinstance(value, (tuple, list)):
value = value[0]
t.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * number <= offset_size:
t.append(pack(val_format, pack(str(number)+dtype, *value)))
else:
t.append(pack(offset_format, 0))
tag_data.append((offset[0] + offset_size + 4,
pack(str(number)+dtype, *value)))
tags.append(b''.join(t))
offset[0] += tag_size
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if software:
tag('software', 's', 0, software)
if description:
tag('image_description', 's', 0, description)
elif shape != data_shape:
tag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)))
tag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"))
# write previous tags only once
writeonce = (len(tags), len(tag_data)) if shape[0] > 1 else None
tag('compression', 'H', 1, 1)
tag('orientation', 'H', 1, 1)
tag('image_width', 'I', 1, shape[-2])
tag('image_length', 'I', 1, shape[-3])
tag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
tag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
tag('photometric', 'H', 1,
{'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric])
tag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig:
tag('planar_configuration', 'H', 1, 1 if planarconfig=='contig' else 2)
tag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
tag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb':
tag('extra_samples', 'H', 1, 1) # alpha channel
else:
tag('extra_samples', 'H', extrasamples, (0, ) * extrasamples)
if resolution:
tag('x_resolution', '2I', 1, rational(resolution[0]))
tag('y_resolution', '2I', 1, rational(resolution[1]))
tag('resolution_unit', 'H', 1, 2)
tag('rows_per_strip', 'I', 1, shape[-3])
# use one strip per plane
strip_byte_counts = (data[0, 0].size * data.dtype.itemsize, ) * shape[1]
tag('strip_byte_counts', offset_format, shape[1], strip_byte_counts)
# strip_offsets must be the last tag; will be updated later
tag('strip_offsets', offset_format, shape[1], (0, ) * shape[1])
fh = open(filename, 'wb')
seek = fh.seek
tell = fh.tell
def write(arg, *args):
fh.write(pack(arg, *args) if args else arg)
write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
write('HHH', 43, 8, 0)
else:
write('H', 42)
ifd_offset = tell()
write(offset_format, 0) # first IFD
for i in range(shape[0]):
# update pointer at ifd_offset
pos = tell()
seek(ifd_offset)
write(offset_format, pos)
seek(pos)
# write tags
write(numtag_format, len(tags))
tag_offset = tell()
write(b''.join(tags))
ifd_offset = tell()
write(offset_format, 0) # offset to next IFD
# write extra tag data and update pointers
for off, dat in tag_data:
pos = tell()
seek(tag_offset + off)
write(offset_format, pos)
seek(pos)
write(dat)
# update strip_offsets
pos = tell()
if len(strip_byte_counts) == 1:
seek(ifd_offset - offset_size)
write(offset_format, pos)
else:
seek(pos - offset_size*shape[1])
strip_offset = pos
for size in strip_byte_counts:
write(offset_format, strip_offset)
strip_offset += size
seek(pos)
# write data
data[i].tofile(fh) # if this fails try to update Python and numpy
fh.flush()
# remove tags that should be written only once
if writeonce:
tags = tags[writeonce[0]:]
d = writeonce[0] * tag_size
tag_data = [(o-d, v) for (o, v) in tag_data[writeonce[1]:]]
writeonce = None
fh.close()
def imread(files, *args, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
Examples
--------
>>> im = imread('test.tif', 0)
>>> im.shape
(256, 256, 4)
>>> ims = imread(['test.tif', 'test.tif'])
>>> ims.shape
(2, 256, 256, 4)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, basestring):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(*args, **kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(*args, **kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func', )
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and meta-data from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Attributes
----------
pages : list
All TIFF pages in file.
series : list of Records(shape, dtype, axes, TiffPages)
TIFF pages with compatible shapes and types.
All attributes are read-only.
Examples
--------
>>> tif = TiffFile('test.tif')
... try:
... images = tif.asarray()
... except Exception as e:
... print(e)
... finally:
... tif.close()
"""
def __init__(self, arg, name=None, multifile=False):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Human readable label of open file.
multifile : bool
If True, series may include pages from multiple files.
"""
if isinstance(arg, basestring):
filename = os.path.abspath(arg)
self._fh = open(filename, 'rb')
else:
filename = str(name)
self._fh = arg
self._fh.seek(0, 2)
self._fsize = self._fh.tell()
self._fh.seek(0)
self.fname = os.path.basename(filename)
self.fpath = os.path.dirname(filename)
self._tiffs = {self.fname: self} # cache of TiffFiles
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
try:
self._fromfile()
except Exception:
self._fh.close()
raise
def close(self):
"""Close open file handle(s)."""
for tif in self._tiffs.values():
if tif._fh:
tif._fh.close()
tif._fh = None
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
#print("Page %d \r" % len(self.pages), end='')
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if not self.pages:
raise ValueError("empty TIFF file")
@lazyattr
def series(self):
"""Return series of TiffPage with compatible shape and properties."""
series = []
if self.is_ome:
series = self._omeseries()
elif self.is_fluoview:
dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(self.pages[0].mm_header.dimensions))
series = [Record(
axes=''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1),
shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
pages=self.pages, dtype=numpy.dtype(self.pages[0].dtype))]
elif self.is_lsm:
lsmi = self.pages[0].cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if self.pages[0].is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = [getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes]
pages = [p for p in self.pages if not p.is_reduced]
series = [Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype))]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + list(pages[0].shape)
axes = axes[:i] + 'CYX'
series.append(Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype)))
elif self.is_imagej:
shape = []
axes = []
ij = self.pages[0].imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not self.is_rgb:
shape.append(ij['channels'])
axes.append('C')
remain = len(self.pages) // (numpy.prod(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
shape.extend(self.pages[0].shape)
axes.extend(self.pages[0].axes)
axes = ''.join(axes)
series = [Record(pages=self.pages, shape=shape, axes=axes,
dtype=numpy.dtype(self.pages[0].dtype))]
elif self.is_nih:
series = [Record(pages=self.pages,
shape=(len(self.pages),) + self.pages[0].shape,
axes='I' + self.pages[0].axes,
dtype=numpy.dtype(self.pages[0].dtype))]
elif self.pages[0].is_shaped:
shape = self.pages[0].tags['image_description'].value[7:-1]
shape = tuple(int(i) for i in shape.split(b','))
series = [Record(pages=self.pages, shape=shape,
axes='Q' * len(shape),
dtype=numpy.dtype(self.pages[0].dtype))]
if not series:
shapes = []
pages = {}
for page in self.pages:
if not page.shape:
continue
shape = page.shape + (page.axes,
page.compression in TIFF_DECOMPESSORS)
if not shape in pages:
shapes.append(shape)
pages[shape] = [page]
else:
pages[shape].append(page)
series = [Record(pages=pages[s],
axes=(('I' + s[-2])
if len(pages[s]) > 1 else s[-2]),
dtype=numpy.dtype(pages[s][0].dtype),
shape=((len(pages[s]), ) + s[:-2]
if len(pages[s]) > 1 else s[:-2]))
for s in shapes]
return series
def asarray(self, key=None, series=None):
"""Return image data of multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
"""
if key is None and series is None:
series = 0
if series is not None:
pages = self.series[series].pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if len(pages) == 1:
return pages[0].asarray()
elif self.is_nih:
result = numpy.vstack(p.asarray(colormapped=False,
squeeze=False) for p in pages)
if pages[0].is_palette:
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
if self.is_ome and any(p is None for p in pages):
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray())
result = numpy.vstack((p.asarray() if p else nopage)
for p in pages)
if key is None:
try:
result.shape = self.series[series].shape
except ValueError:
warnings.warn("failed to reshape %s to %s" % (
result.shape, self.series[series].shape))
result.shape = (-1,) + pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
def _omeseries(self):
"""Return image series in OME-TIFF file(s)."""
root = ElementTree.XML(self.pages[0].tags['image_description'].value)
uuid = root.attrib.get('UUID', None)
self._tiffs = {uuid: self}
modulo = {}
result = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("not an OME-TIFF master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
axes = "".join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size'+ax]) for ax in axes)
size = numpy.prod(shape[:-2])
ifds = [None] * size
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
idx = numpy.ravel_multi_index(idx, shape[:-2])
for uuid in data:
if uuid.tag.endswith('UUID'):
if uuid.text not in self._tiffs:
if not self._multifile:
# abort reading multi file OME series
return []
fn = uuid.attrib['FileName']
try:
tf = TiffFile(os.path.join(self.fpath, fn))
except (IOError, ValueError):
warnings.warn("failed to read %s" % fn)
break
self._tiffs[uuid.text] = tf
pages = self._tiffs[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
result.append(Record(axes=axes, shape=shape, pages=ifds,
dtype=numpy.dtype(ifds[0].dtype)))
for record in result:
for axis, (newaxis, labels) in modulo.items():
i = record.axes.index(axis)
size = len(labels)
if record.shape[i] == size:
record.axes = record.axes.replace(axis, newaxis, 1)
else:
record.shape[i] //= size
record.shape.insert(i+1, size)
record.axes = record.axes.replace(axis, axis+newaxis, 1)
return result
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [
self.fname.capitalize(),
format_size(self._fsize),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._tiffs) > 1:
result.append("%i files" % (len(self._tiffs)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@lazyattr
def fstat(self):
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
@lazyattr
def is_bigtiff(self):
return self.offset_size != 4
@lazyattr
def is_rgb(self):
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_palette(self):
return all(p.is_palette for p in self.pages)
@lazyattr
def is_mdgel(self):
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
return self.pages[0].is_lsm
@lazyattr
def is_imagej(self):
return self.pages[0].is_imagej
@lazyattr
def is_nih(self):
return self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
return self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
return self.pages[0].is_ome
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, colormapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
colormapped and with one alpha channel if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'P' plane, 'I' image series,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'F' phase, 'H' lifetime,
'L' exposure, 'V' event, 'Q' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy array
Color look up table if exists.
mm_uic_tags: Record(dict)
Consolidated MetaMorph mm_uic# tags, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and meta_data tags, if exists.
All attributes are read-only.
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0.
"""
fh = self.parent._fh
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
if not offset:
raise StopIteration()
# read standard tags
tags = self.tags
fh.seek(offset)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
except Exception:
warnings.warn("corrupted page list")
raise StopIteration()
for _ in range(numtags):
try:
tag = TiffTag(self.parent)
tags[tag.name] = tag
except TiffTag.Error as e:
warnings.warn(str(e))
# read LSM info subrecords
if self.is_lsm:
pos = fh.tell()
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_'+name]
except KeyError:
continue
if not offset:
continue
fh.seek(offset)
try:
setattr(self, 'cz_lsm_'+name, reader(fh, byteorder))
except ValueError:
pass
fh.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TiffTag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if not 'photometric' in tags:
self.photometric = None
if 'image_length' in tags:
self.strips_per_image = int(math.floor(
float(self.image_length + self.rows_per_strip - 1) /
self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if self.is_imagej:
# consolidate imagej meta data
adict = imagej_description(tags['image_description'].value)
try:
adict.update(imagej_meta_data(
tags['imagej_meta_data'].value,
tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception:
pass
self.imagej_tags = Record(adict)
if not 'image_length' in self.tags or not 'image_width' in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self.strip_offsets = 0
self._shape = ()
self.shape = ()
self.axes = ''
if self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
if self.is_stk:
# consolidate mm_uci tags
planes = tags['mm_uic2'].count
self.mm_uic_tags = Record(tags['mm_uic2'].value)
for key in ('mm_uic3', 'mm_uic4', 'mm_uic1'):
if key in tags:
self.mm_uic_tags.update(tags[key].value)
if self.planar_configuration == 'contig':
self._shape = (planes, 1, self.image_length, self.image_width,
self.samples_per_pixel)
self.shape = tuple(self._shape[i] for i in (0, 2, 3, 4))
self.axes = 'PYXS'
else:
self._shape = (planes, self.samples_per_pixel,
self.image_length, self.image_width, 1)
self.shape = self._shape[:4]
self.axes = 'PSYX'
if self.is_palette and (self.color_map.shape[1]
>= 2**self.bits_per_sample):
self.shape = (3, planes, self.image_length, self.image_width)
self.axes = 'CPYX'
else:
#warnings.warn("palette cannot be applied")
self.is_palette = False
elif self.is_palette:
samples = 1
if 'extra_samples' in self.tags:
samples += len(self.extra_samples)
if self.planar_configuration == 'contig':
self._shape = (
1, 1, self.image_length, self.image_width, samples)
else:
self._shape = (
1, samples, self.image_length, self.image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
self.shape = (3, self.image_length, self.image_width)
self.axes = 'CYX'
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
self.shape = (self.image_length, self.image_width)
self.axes = 'YX'
elif self.is_rgb or self.samples_per_pixel > 1:
if self.planar_configuration == 'contig':
self._shape = (1, 1, self.image_length, self.image_width,
self.samples_per_pixel)
self.shape = (self.image_length, self.image_width,
self.samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (1, self.samples_per_pixel, self.image_length,
self.image_width, 1)
self.shape = self._shape[1:-1]
self.axes = 'SYX'
if self.is_rgb and 'extra_samples' in self.tags:
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.planar_configuration == 'contig':
self.shape = self.shape[:2] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, self.image_length, self.image_width, 1)
self.shape = self._shape[2:4]
self.axes = 'YX'
if not self.compression and not 'strip_byte_counts' in tags:
self.strip_byte_counts = numpy.prod(self.shape) * (
self.bits_per_sample // 8)
def asarray(self, squeeze=True, colormapped=True, rgbonly=True):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any argument is False, the shape of the returned array might be
different from the page shape.
Parameters
----------
squeeze : bool
If True all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True color mapping is applied for palette-indexed images.
rgbonly : bool
If True return RGB(A) image without additional extra samples.
"""
fh = self.parent._fh
if not fh:
raise IOError("TIFF file is not open")
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
if ('ycbcr_subsampling' in self.tags
and self.tags['ycbcr_subsampling'].value not in (1, (1, 1))):
raise ValueError("YCbCr subsampling not supported")
tag = self.tags['sample_format']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError("sample formats don't match %s" % str(tag.value))
dtype = self._dtype
shape = self._shape
if not shape:
return None
image_width = self.image_width
image_length = self.image_length
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
if self.is_tiled:
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
tile_width = self.tile_width
tile_length = self.tile_length
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
shape = shape[:-3] + (tl*tile_length, tw*tile_width, shape[-1])
tile_shape = (tile_length, tile_width, shape[-1])
runlen = tile_width
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
runlen = image_width
try:
offsets[0]
except TypeError:
offsets = (offsets, )
byte_counts = (byte_counts, )
if any(o < 2 for o in offsets):
raise ValueError("corrupted page")
if (not self.is_tiled and (self.is_stk or (not self.compression
and bits_per_sample in (8, 16, 32, 64)
and all(offsets[i] == offsets[i+1] - byte_counts[i]
for i in range(len(offsets)-1))))):
# contiguous data
fh.seek(offsets[0])
result = numpy_fromfile(fh, typecode, numpy.prod(shape))
result = result.astype('=' + dtype)
else:
if self.planar_configuration == 'contig':
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
unpack = lambda x: numpy.fromstring(x, typecode)
elif isinstance(bits_per_sample, tuple):
unpack = lambda x: unpackrgb(x, typecode, bits_per_sample)
else:
unpack = lambda x: unpackints(x, typecode, bits_per_sample,
runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, pl = 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
tile = unpack(decompress(fh.read(bytecount)))
tile.shape = tile_shape
if self.predictor == 'horizontal':
numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
result[0, pl, tl:tl+tile_length,
tw:tw+tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[-2]:
tw, tl = 0, tl + tile_length
if tl >= shape[-3]:
tl, pl = 0, pl + 1
result = result[..., :image_length, :image_width, :]
else:
strip_size = (self.rows_per_strip * self.image_width *
self.samples_per_pixel)
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
strip = unpack(decompress(fh.read(bytecount)))
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor == 'horizontal' and not self.is_tiled:
# work around bug in LSM510 software
if not (self.parent.is_lsm and not self.compression):
numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
if colormapped and self.is_palette:
if self.color_map.shape[1] >= 2**bits_per_sample:
# FluoView and LSM might fail here
result = numpy.take(self.color_map,
result[:, 0, :, :, 0], axis=1)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.planar_configuration == 'contig':
result = result[..., [0, 1, 2, 3+i]]
else:
result = result[:, [0, 1, 2, 3+i]]
break
else:
if self.planar_configuration == 'contig':
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
return result
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
','.join(t[3:] for t in ('is_stk', 'is_lsm', 'is_nih', 'is_ome',
'is_imagej', 'is_fluoview', 'is_mdgel',
'is_mediacy', 'is_reduced', 'is_tiled')
if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def is_rgb(self):
"""True if page contains a RGB image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 2)
@lazyattr
def is_palette(self):
"""True if page contains a palette-colored image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""True if page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""True if page is a reduced image of another image."""
return bool(self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_mdgel(self):
"""True if page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""True if page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""True if page contains MM_UIC2 tag."""
return 'mm_uic2' in self.tags
@lazyattr
def is_lsm(self):
"""True if page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""True if page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""True if page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_ome(self):
"""True if page contains OME-XML in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'<?xml version='))
@lazyattr
def is_shaped(self):
"""True if page contains shape in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'shape=('))
@lazyattr
def is_imagej(self):
"""True if page contains ImageJ description."""
return ('image_description' in self.tags and
self.tags['image_description'].value.startswith(b'ImageJ='))
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data. For codes in CUSTOM_TAGS the 4 bytes file content.
value_offset : int
Location of value in file, if any.
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset')
class Error(Exception):
pass
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fh'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fh = parent._fh
byteorder = parent.byteorder
self._offset = fh.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fh.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
if code in TIFF_TAGS:
name = TIFF_TAGS[code][0]
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[dtype]
except KeyError:
raise TiffTag.Error("unknown tag data type %i" % dtype)
fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fh.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = offset = struct.unpack(byteorder+tof, value)[0]
if offset < 0 or offset > parent._fsize:
raise TiffTag.Error("corrupt file - invalid tag value offset")
elif offset < 4:
raise TiffTag.Error("corrupt value offset for tag %i" % code)
fh.seek(offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fh, byteorder, dtype, count)
fh.seek(0, 2) # bug in numpy/Python 3.x ?
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count)
fh.seek(0, 2) # bug in numpy/Python 3.x ?
fh.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if not code in CUSTOM_TAGS:
if len(value) == 1:
value = value[0]
if dtype.endswith('s'):
value = stripnull(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffSequence(object):
"""Sequence of image files.
Properties
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
Examples
--------
>>> ims = TiffSequence("test.oif.files/*.tif")
>>> ims = ims.asarray()
>>> ims.shape
(2, 100, 256, 256)
"""
_axes_pattern = """
# matches Olympus OIF and Leica TIFF series
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
"""
class _ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern='axes'):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
"""
if isinstance(files, basestring):
files = natural_sorted(glob.glob(files))
files = list(files)
if not files:
raise ValueError("no files found")
#if not os.path.isfile(files[0]):
# raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._axes_pattern if pattern == 'axes' else pattern
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self._ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = ((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
pass
def asarray(self, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
Raise IndexError if image shapes don't match.
"""
im = self.imread(self.files[0])
result_shape = self.shape + im.shape
result = numpy.zeros(result_shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i-j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = self.imread(fname, *args, **kwargs)
result[index] = im
result.shape = result_shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self._ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self._ParseError("pattern doesn't match file names")
matches = matches[-1]
if len(matches) % 2:
raise self._ParseError("pattern doesn't match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self._ParseError("pattern doesn't match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes don't match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i-j+1 for i, j in zip(shape, start_index))
if numpy.prod(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
if k.startswith('_'): # does not work with byte
continue
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
s.append(
("* %s: %s" % (k, str(v))).split("\n", 1)[0]
[:PRINT_LINE_LEN].rstrip())
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TiffTags with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
#sortbycode = lambda a, b: cmp(a.code, b.code)
#for tag in sorted(self.values(), sortbycode):
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (tag.code, tag.name, typecode,
str(tag.value).split('\n', 1)[0])
s.append(line[:PRINT_LINE_LEN].lstrip())
return '\n'.join(s)
def read_bytes(fh, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
return numpy_fromfile(fh, byteorder+dtype[-1], count).tostring()
def read_numpy(fh, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
return numpy_fromfile(fh, byteorder+dtype[-1], count)
def read_mm_header(fh, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return numpy.rec.fromfile(fh, MM_HEADER, 1, byteorder=byteorder)[0]
def read_mm_stamp(fh, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.array."""
return numpy_fromfile(fh, byteorder+'8f8', 1)[0]
def read_mm_uic1(fh, byteorder, dtype, count):
"""Read MM_UIC1 tag from file and return as dictionary."""
t = fh.read(8*count)
t = struct.unpack('%s%iI' % (byteorder, 2*count), t)
return dict((MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2])
if k in MM_TAG_IDS)
def read_mm_uic2(fh, byteorder, dtype, count):
"""Read MM_UIC2 tag from file and return as dictionary."""
result = {'number_planes': count}
values = numpy_fromfile(fh, byteorder+'I', 6*count)
result['z_distance'] = values[0::6] // values[1::6]
#result['date_created'] = tuple(values[2::6])
#result['time_created'] = tuple(values[3::6])
#result['date_modified'] = tuple(values[4::6])
#result['time_modified'] = tuple(values[5::6])
return result
def read_mm_uic3(fh, byteorder, dtype, count):
"""Read MM_UIC3 tag from file and return as dictionary."""
t = numpy_fromfile(fh, byteorder+'I', 2*count)
return {'wavelengths': t[0::2] // t[1::2]}
def read_mm_uic4(fh, byteorder, dtype, count):
"""Read MM_UIC4 tag from file and return as dictionary."""
t = struct.unpack(byteorder + 'hI'*count, fh.read(6*count))
return dict((MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2])
if k in MM_TAG_IDS)
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
result = numpy.rec.fromfile(fh, CZ_LSM_INFO, 1,
byteorder=byteorder)[0]
{50350412: '1.3', 67127628: '2.0'}[result.magic_number] # validation
return result
def read_cz_lsm_time_stamps(fh, byteorder):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack(byteorder+'II', fh.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
return struct.unpack(('%s%dd' % (byteorder, count)),
fh.read(8*count))
def read_cz_lsm_event_list(fh, byteorder):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack(byteorder+'II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack(byteorder+'IdI', fh.read(16))
etext = stripnull(fh.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fh, byteorder):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack(byteorder+"I", fh.read(4))[0]:
raise ValueError("not a lsm_scan_info structure")
fh.read(8)
while True:
entry, dtype, size = unpack(byteorder+"III", fh.read(12))
if dtype == 2:
value = stripnull(fh.read(size))
elif dtype == 4:
value = unpack(byteorder+"i", fh.read(4))[0]
elif dtype == 5:
value = unpack(byteorder+"d", fh.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
block = blocks.pop()
else:
setattr(block, "unknown_%x" % entry, value)
if not blocks:
break
return block
def read_nih_image_header(fh, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = numpy.rec.fromfile(fh, NIH_IMAGE_HEADER, 1, byteorder=byteorder)[0]
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def imagej_meta_data(data, bytecounts, byteorder):
"""Return dict from ImageJ meta data tag value."""
if sys.version_info[0] > 2:
_str = lambda x: str(x, 'cp1252')
else:
_str = str
def read_string(data, byteorder):
return _str(data[1::2])
def read_double(data, byteorder):
return struct.unpack(byteorder+('d' * (len(data) // 8)), data)
def read_bytes(data, byteorder):
#return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = {
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
if not bytecounts:
raise ValueError("no ImageJ meta data")
if not data.startswith(b'IJIJ'):
raise ValueError("invalid ImageJ meta data")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ meta data header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for _ in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description(description):
"""Return dict from ImageJ image_description tag."""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
if sys.version_info[0] > 2:
_str = lambda x: str(x, 'cp1252')
else:
_str = str
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
return result
def _replace_by(module_function, package=None, warn=True):
"""Try replace decorated function by module.function."""
try:
from importlib import import_module
except ImportError:
warnings.warn('Could not import module importlib')
return lambda func: func
def decorate(func, module_function=module_function, warn=warn):
try:
module, function = module_function.split('.')
if not package:
module = import_module(module)
else:
module = import_module('.' + module, package=package)
func, oldfunc = getattr(module, function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
pass #warnings.warn("failed to import %s" % module_function)
return func
return decorate
@_replace_by('_tifffile.decodepackbits')
def decodepackbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i+n])
i += n
elif n > 129:
result_extend(encoded[i:i+1] * (258-n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decodelzw')
def decodelzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of `bitw` bits at `bitcount` position in encoded."""
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code <<= bitcount % 8
code &= mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn(
"decodelzw encountered unexpected end of stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpackints')
def unpackints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen*itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - itemsize
bitmask = int(itemsize*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
result = numpy.empty((l, ), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code <<= bitcount % 8
code &= bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i+1) % runlen == 0:
bitcount += skipbits
return result
def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpackrgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpackrgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpackrgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
data = numpy.fromstring(data, dtype.byteorder+dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i+1:]))
t &= int('0b'+'1'*bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2**o - 1) // (2**bps - 1)
t //= 2**(o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def numpy_fromfile(arg, dtype=float, count=-1, sep=''):
"""Return array from data in binary file.
Work around numpy issue #2230, "numpy.fromfile does not accept StringIO
object" https://github.com/numpy/numpy/issues/2230.
"""
try:
return numpy.fromfile(arg, dtype, count, sep)
except IOError:
if count < 0:
size = 2**30
else:
size = count * numpy.dtype(dtype).itemsize
data = arg.read(int(size))
return numpy.fromstring(data, dtype, count, sep)
def stripnull(string):
"""Return string truncated at first null character."""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def format_size(size):
"""Return file size as string from byte size."""
for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
if size < 2048:
return "%.f %s" % (size, unit)
size /= 1024.0
def natural_sorted(iterable):
"""Return human sorted list of strings.
Examples
--------
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
numbers = re.compile('(\d+)')
sortkey = lambda x: [(int(c) if c.isdigit() else c)
for c in re.split(numbers, x)]
return sorted(iterable, key=sortkey)
def datetime_from_timestamp(n, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Examples
--------
>>> datetime_from_timestamp(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(n)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory. Print error message on failure.
Examples
--------
>>> test_tifffile(verbose=False)
"""
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TiffFile(f, multifile=True)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (
str(tif), str(img.shape), img.dtype, tif[0].compression,
(time.time()-t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful+failed, time.time()-start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated',
6: 'cielab',
7: 'icclab',
8: 'itulab',
32844: 'logl',
32845: 'logluv',
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decodepackbits,
'lzw': decodelzw,
}
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1B', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
#14: '', # UNICODE
#15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
#4: 'void',
#5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample', # rgb(a)
'P': 'plane', # page
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'F': 'phase',
'R': 'tile', # region, point
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'Q': 'other',
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'),
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'),
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'),
('lutmode_t', 'u1'),
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'),
]
#NIH_COLORTABLE_TYPE = (
# 'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
# 'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
#NIH_LUTMODE_TYPE = (
# 'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
# 'ColorLut', 'CustomGrayscale')
#NIH_CURVEFIT_TYPE = (
# 'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
# 'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
# 'UncalibratedOD')
#NIH_UNITS_TYPE = (
# 'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
# 'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
#NIH_STACKTYPE_TYPE = (
# 'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# MetaMorph STK tags
MM_TAG_IDS = {
0: 'auto_scale',
1: 'min_scale',
2: 'max_scale',
3: 'spatial_calibration',
#4: 'x_calibration',
#5: 'y_calibration',
#6: 'calibration_units',
#7: 'name',
8: 'thresh_state',
9: 'thresh_state_red',
11: 'thresh_state_green',
12: 'thresh_state_blue',
13: 'thresh_state_lo',
14: 'thresh_state_hi',
15: 'zoom',
#16: 'create_time',
#17: 'last_saved_time',
18: 'current_buffer',
19: 'gray_fit',
20: 'gray_point_count',
#21: 'gray_x',
#22: 'gray_y',
#23: 'gray_min',
#24: 'gray_max',
#25: 'gray_unit_name',
26: 'standard_lut',
27: 'wavelength',
#28: 'stage_position',
#29: 'camera_chip_offset',
#30: 'overlay_mask',
#31: 'overlay_compress',
#32: 'overlay',
#33: 'special_overlay_mask',
#34: 'special_overlay_compress',
#35: 'special_overlay',
36: 'image_property',
#37: 'stage_label',
#38: 'autoscale_lo_info',
#39: 'autoscale_hi_info',
#40: 'absolute_z',
#41: 'absolute_z_valid',
#42: 'gamma',
#43: 'gamma_red',
#44: 'gamma_green',
#45: 'gamma_blue',
#46: 'camera_bin',
47: 'new_lut',
#48: 'image_property_ex',
49: 'plane_property',
#50: 'user_lut_table',
51: 'red_autoscale_info',
#52: 'red_autoscale_lo_info',
#53: 'red_autoscale_hi_info',
54: 'red_minscale_info',
55: 'red_maxscale_info',
56: 'green_autoscale_info',
#57: 'green_autoscale_lo_info',
#58: 'green_autoscale_hi_info',
59: 'green_minscale_info',
60: 'green_maxscale_info',
61: 'blue_autoscale_info',
#62: 'blue_autoscale_lo_info',
#63: 'blue_autoscale_hi_info',
64: 'blue_min_scale_info',
65: 'blue_max_scale_info',
#66: 'overlay_plane_color'
}
# Olympus FluoView
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'i4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('dimension_data_type', 'i4'),
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('data_type', 'u4'),
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_information', 'u4'),
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
]
# Import functions for LSM_INFO sub-records
CZ_LSM_INFO_READERS = {
'scan_information': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
}
# Descriptions of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
2: '12 bit unsigned integer',
5: '32 bit float',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detectionchannels",
0x80000000: "illuminationchannels",
0xa0000000: "beamsplitters",
0xc0000000: "datachannels",
0x13000000: "markers",
0x11000000: "timers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
0x40000000: "tracks",
0x50000000: "lasers",
0x70000000: "detectionchannels",
0x90000000: "illuminationchannels",
0xb0000000: "beamsplitters",
0xd0000000: "datachannels",
0x14000000: "markers",
0x12000000: "timers",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "oledb_recording_scan_type",
0x10000008: "oledb_recording_scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bccorrection",
0x10000049: "position_bccorrection1",
0x10000050: "position_bccorrection2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
# lasers
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# tracks
0x40000001: "multiplex_type",
0x40000002: "multiplex_order",
0x40000003: "sampling_mode",
0x40000004: "sampling_method",
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# detection_channels
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "detection_channel_name",
0x70000015: "detection_detector_gain_bc1",
0x70000016: "detection_detector_gain_bc2",
0x70000017: "detection_amplifier_gain_bc1",
0x70000018: "detection_amplifier_gain_bc2",
0x70000019: "detection_amplifier_offset_bc1",
0x70000020: "detection_amplifier_offset_bc2",
0x70000021: "detection_spectral_scan_channels",
0x70000022: "detection_spi_wavelength_start",
0x70000023: "detection_spi_wavelength_stop",
0x70000026: "detection_dye_name",
0x70000027: "detection_dye_folder",
# illumination_channels
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitters
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channels
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# markers
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
# timers
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, 1, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2**32-1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
347: ('jpeg_tables', None, None, None, None),
530: ('ycbcr_subsampling', 1, 3, 2, None),
531: ('ycbcr_positioning', 1, 3, 1, None),
32997: ('image_depth', None, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
37510: ('user_comment', None, None, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50838: ('imagej_byte_counts', None, None, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
65200: ('flex_xml', None, 2, None, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_numpy),
33628: ('mm_uic1', read_mm_uic1),
33629: ('mm_uic2', read_mm_uic2),
33630: ('mm_uic3', read_mm_uic3),
33631: ('mm_uic4', read_mm_uic4),
34361: ('mm_header', read_mm_header),
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info),
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_meta_data', read_bytes),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
# TODO: handle photometric == 'separated' (CMYK)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and data.shape[-1] in (3, 4):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette' and isrgb:
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ub' and vmin == 0:
cmap = 'gray'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import optparse
search_doc = lambda r, d: re.search(r, __doc__).group(1) if __doc__ else d
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description=search_doc("\n\n([^|]*?)\n\n", ''),
version="%%prog %s" % search_doc(":Version: (.*)", "Unknown"))
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="don't read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="don't display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the internal tests")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
#if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
notnone = lambda x: next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time()-start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print ("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_palette:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_information', 'mm_uic_tags',
'mm_header', 'imagej_tags', 'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = None, None
if 'gdal_nodata' in page.tags:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
if page.is_stk:
try:
vmin = page.mm_uic_tags['min_scale']
vmax = page.mm_uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = None, None
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str
unicode = str
if __name__ == "__main__":
sys.exit(main()) | mit |
RayMick/scikit-learn | examples/classification/plot_lda.py | 70 | 2413 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
| bsd-3-clause |
MechCoder/scikit-learn | examples/decomposition/plot_image_denoising.py | 6 | 5958 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of a raccoon face image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
try: # SciPy >= 0.16 have face in misc
from scipy.misc import face
face = face(gray=True)
except ImportError:
face = sp.face(gray=True)
# Convert from uint8 representation with values between 0 and 255 to
# a floating point representation with values between 0 and 1.
face = face / 255.
# downsample for higher speed
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face /= 4.0
height, width = face.shape
# Distort the right half of the image
print('Distorting image...')
distorted = face.copy()
distorted[:, width // 2:] += 0.075 * np.random.randn(height, width // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :width // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from face patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, face, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, width // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d(
patches, (height, width // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], face,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
flightgong/scikit-learn | sklearn/decomposition/truncated_svd.py | 2 | 8230 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import warnings
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import (array2d, as_float_array, atleast2d_or_csr,
check_random_state)
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis0
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
`components_` : array, shape (n_components, n_features)
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components.
`explained_variance_` : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized",
n_iter=5, random_state=None, tol=0., n_iterations=None):
if n_iterations is not None:
warnings.warn("n_iterations was renamed to n_iter for consistency "
"and will be removed in 0.16.", DeprecationWarning)
n_iter = n_iterations
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
n_samples = X.shape[0]
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis0(X)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = atleast2d_or_csr(X)
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = array2d(X)
return np.dot(X, self.components_)
@property
def n_iterations(self):
warnings.warn("n_iterations was renamed to n_iter for consistency "
"and will be removed in 0.16.", DeprecationWarning)
return self.n_iter
| bsd-3-clause |
jkarnows/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/hmm.py | 18 | 48579 | # Hidden Markov Models
#
# Author: Ron Weiss <[email protected]>
# and Shiqiao Du <[email protected]>
# API changes: Jaques Grobler <[email protected]>
"""
The :mod:`sklearn.hmm` module implements hidden Markov models.
**Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known
numerical stability issues. This module will be removed in version 0.17.
It has been moved to a separate repository:
https://github.com/hmmlearn/hmmlearn
"""
import string
import numpy as np
from .utils import check_random_state, deprecated
from .utils.extmath import logsumexp
from .base import BaseEstimator
from .mixture import (
GMM, log_multivariate_normal_density, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from . import cluster
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
@deprecated("WARNING: The HMM module and its functions will be removed in 0.17 "
"as it no longer falls within the project's scope and API. "
"It has been moved to a separate repository: "
"https://github.com/hmmlearn/hmmlearn")
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
WARNING: The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
@deprecated("WARNING: The HMM module and its function will be removed in 0.17"
"as it no longer falls within the project's scope and API. "
"It has been moved to a separate repository: "
"https://github.com/hmmlearn/hmmlearn")
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float32).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM.
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
_, posteriors = self.score_samples(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data,
or strengthening the appropriate subclass-specific regularization
parameter.
"""
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
self._init(obs, self.init_params)
logprob = []
for i in range(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_components, n_components))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lnP, lneta)
stats['trans'] += np.exp(np.minimum(logsumexp(lneta, 0), 700))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
n_components : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
means : array, shape (`n_components`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_components`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import GaussianHMM
>>> GaussianHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_components=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_prior = means_prior
self.means_weight = means_weight
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if (hasattr(self, 'n_features')
and means.shape != (self.n_components, self.n_features)):
raise ValueError('means must have shape '
'(n_components, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_components
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_components)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
if (hasattr(self, 'n_features')
and self.n_features != obs[0].shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (obs[0].shape[1],
self.n_features))
self.n_features = obs[0].shape[1]
if 'm' in params:
self._means_ = cluster.KMeans(
n_clusters=self.n_components).fit(obs[0]).cluster_centers_
if 'c' in params:
cv = np.cov(obs[0].T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_components)
self._covars_[self._covars_ == 0] = 1e-5
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_components)
stats['obs'] = np.zeros((self.n_components, self.n_features))
stats['obs**2'] = np.zeros((self.n_components, self.n_features))
stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in range(self.n_components):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
prior = self.means_prior
weight = self.means_weight
if prior is None:
weight = 0
prior = 0
self._means_ = (weight * prior + stats['obs']) / (weight + denom)
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
means_prior = self.means_prior
means_weight = self.means_weight
if means_prior is None:
means_weight = 0
means_prior = 0
meandiff = self._means_ - means_prior
if self._covariance_type in ('spherical', 'diag'):
cv_num = (means_weight * (meandiff) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / np.maximum(cv_den, 1e-5)
if self._covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_components, self.n_features,
self.n_features))
for c in range(self.n_components):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (means_weight * np.outer(meandiff[c],
meandiff[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum) /
(cvweight + stats['post'][:, None, None]))
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
return super(GaussianHMM, self).fit(obs)
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_components`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_components, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_components, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
symbols = symbols.union(set(o))
self.n_symbols = len(symbols)
emissionprob = normalize(self.random_state.rand(self.n_components,
self.n_symbols), 1)
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_components, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
symbols = np.asarray(obs).flatten()
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
return True
def fit(self, obs, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
"""
err_msg = ("Input must be both positive integer array and "
"every element must be continuous, but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return _BaseHMM.fit(self, obs, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_components`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
init_params : string, optional
Controls which parameters are initialized prior to training.
Can contain any combination of 's' for startprob, 't' for transmat, 'm'
for means, 'c' for covars, and 'w' for GMM mixing weights. Defaults to
all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat,
'm' for means, and 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import GMMHMM
>>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in range(self.n_components):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
allobs = np.concatenate(obs, 0)
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(allobs)
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, lgmm_posteriors = g.score_samples(obs)
lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state] +
self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
| apache-2.0 |
pravsripad/jumeg | jumeg/epocher/jumeg_epocher_plot.py | 2 | 8315 | # -*- coding: utf-8 -*-
"""
Created on 08.06.2018
@author: fboers
"""
import os,os.path,logging
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.backends.backend_pdf import PdfPages
import mne
from jumeg.base.jumeg_base import JuMEG_Base_IO
logger = logging.getLogger('jumeg')
__version__="2019.05.14.001"
#--- A4 landscape
pl.rc('figure', figsize=(11.69,8.27))
pl.rcParams.update({'font.size': 8})
class JuMEG_Epocher_Plot(JuMEG_Base_IO):
def __init__ (self,raw=None):
super(JuMEG_Epocher_Plot, self).__init__()
self.raw = raw
self.dpi = 100
self.file_extention = '.png'
self.colors = ['r','b','g','c','m','y','k']
def minmax(self,d):
ymin, ymax = d.min(),d.max()
ymin -= np.abs(ymin) * 0.1 #factor * 1.1
ymax += np.abs(ymax) * 0.1 #factor * 1.1
return ymin,ymax
def _set_colors(self):
for i,j in enumerate(pl.gca().lines):
j.set_color(self.colors[i % len(self.colors)])
def plot_group(self,ep,group="meg",picks=None,info=None,show_evt=False,show_labels=True):
"""
:param ep:
:param group:
:param picks:
:param info:
:param show_evt:
:param show_labels:
:return:
"""
if picks.any():
labels = [ ep.info['ch_names'][x] for x in picks ]
avg = ep.average(picks=picks)
if info:
avg.data *= info.get('scale',1.0)
pl.ylabel('[' + info.get('unit','au') + ']')
d = pl.plot(avg.times, avg.data.T)
self._set_colors()
if show_evt:
#--- change legend
idx0 = np.where(avg.times == 0)
labels = [ep.info['ch_names'][x] for x in picks]
if idx0:
for idx in range(len(labels)):
labels[idx] += " evt: {} ".format(int(avg.data[idx,idx0].flatten()))
if show_labels:
pl.legend(d, labels, loc=2,prop={'size':8})
pl.ylim(self.minmax(avg.data))
pl.xlim(ep.tmin,ep.tmax)
pl.xlabel('[s]')
pl.grid(True)
return avg.data
def plot_stim(self,ep,group="stim",picks=None,info=None,show_evt=False,show_labels=True):
"""
:param ep:
:param group:
:param picks:
:param info:
:param show_evt:
:param show_labels:
:return:
"""
if picks.any():
labels = [ ep.info['ch_names'][x] for x in picks ]
avg = ep.average(picks=picks)
if info:
avg.data *= info.get('scale',1.0)
pl.ylabel('[' + info.get('unit','au') + ']')
d = pl.plot(avg.times, avg.data.T)
self._set_colors()
if show_evt:
#--- change legend
idx0 = np.where(avg.times == 0)
# labels = [ep.info['ch_names'][x] for x in picks]
if idx0:
for idx in range(len(labels)):
labels[idx] += " evt: {} ".format(int(avg.data[idx,idx0].flatten()))
if show_labels:
pl.legend(d, labels, loc=2,prop={'size':8})
pl.ylim(self.minmax(avg.data))
pl.xlim(ep.tmin,ep.tmax)
pl.xlabel('[s]')
pl.grid(True)
def plot_evoked(self,evt,fname=None,save_plot=True,show_plot=False,condition=None,plot_dir=None,
info={'meg':{'scale':1e15,'unit':'fT'},'eeg':{'scale':1e3,'unit':'mV'},'emg':{'scale':1e3,'unit':'mV'},}):
'''
:param evt:
event dictionary
evt['events'] : <np.array([])> from mne.find_events
evt['event_id']: <None> list of event ids
evt['baseline_corrected']: True/False
baseline:
evt['bc']['events'] = np.array([])
evt['bc']['event_id'] = None
:param fname:
:param save_plot:
:param show_plot:
:param condition:
:param plot_dir:
:param info:
plot subplots evoked/average
MEG
ECG/EOG + performance
STIM Trigger/Response
events, rt mean median min max
:return:
'''
if not evt: return
ep = evt["epochs"]
name = 'test'
subject_id = name
if fname:
fout_path = os.path.dirname(fname)
name = os.path.basename(fname)
subject_id = name.split('_')[0]
else:
name = "test.png"
fout_path = "."
if plot_dir:
fout_path += "/" + plot_dir
try:
os.makedirs(fout_path,exist_ok=True)
except:
logger.exception("---> can not create epocher plot\n"+
" -> directory: {}\n".format(fout_path)+
" -> filename : {}".format(fname) )
return
# mkpath( fout_path )
fout = fout_path +'/'+ name
#pl.ioff() # switch off (interactive) plot visualisation
pl.figure(name)
pl.clf()
#fig = pl.figure(name,figsize=(10, 8), dpi=100))
pl.title(name)
#--- make title
t = subject_id + ' Evoked '
if condition:
t += ' ' + condition
t += ' Id: {} counts: {}'.format(ep.events[0,2],ep.events.shape[0])
if ep.info['bads']:
t = t + " bads: " + ','.join(ep.info['bads'])
#---ck if emg channels exist
picks = self.picks.emg_nobads(ep)
if picks.any():
nplt = 4
else:
nplt = 3
#--- meg
pl.subplot(nplt,1,1)
pl.title(t)
self.plot_group(ep,group="meg",picks=self.picks.meg_nobads(ep),info=info.get('meg'),show_labels=False)
#--- ecg eog
pl.subplot(nplt,1,2)
self.plot_group(ep,group="ecg eog",picks=self.picks.ecg_eog(ep),info=info.get('eeg'))
#--- stim
pl.subplot(nplt,1,3)
self.plot_group(ep,group="stim",picks=self.picks.stim_response(ep),info=info.get('stim'),show_evt=True)
'''
ax = pl.gca()
ax.set_ylabel('Stim', color=self.colors[0])
ax.tick_params(axis='y', labelcolor=self.colors[0])
ax.set_ylim(0,data[0].max() +10)
ax2 = ax.twinx() # instantiate a second axes that shares the same x-axis
ax2.set_ylabel('RES', color=self.colors[1])
ax2.tick_params(axis='y', labelcolor=self.colors[1])
ax2.set_ylim( 0,data[1].max()+10 )
#fig.tight_layout() # otherwise the right y-label is slightly clipped
'''
#--- emg
if nplt > 3:
pl.subplot(nplt,1,4)
self.plot_group(ep,group="emg",picks=self.picks.emg_nobads(ep),info=info.get('emg'))
#--- plt event_id table
cols = ('EvtId', 'Counts')
#--- get ids and counts
ids,cnts = np.unique( evt["events"][:,-1],return_counts=True)
data = np.zeros((len(ids),2),dtype=np.int)
data[:,0] += ids
data[:,1] += cnts
yend= len(ids)*0.12 #[left, bottom, width, height]
if yend > 4.0:
yend= 4.0
tab = pl.table(cellText=data,colLabels=cols,loc='top',
colWidths=[0.04 for x in cols],
bbox=[-0.15, -0.40, 0.1, yend], cellLoc='left')
#cellDict = tab.get_celld()
#for i in range(0,len(cols)):
# cellDict[(0,i)].set_height(.02)
# for j in range(1,len(ids)+1):
# cellDict[(j,i)].set_height(.02)
tab.set_fontsize(9)
#---
if save_plot:
fout += self.file_extention
pl.savefig(fout, dpi=self.dpi)
if self.verbose:
logger.info("---> done saving plot: " +fout)
else:
fout= "no plot saved"
#---
if show_plot:
pl.show()
else:
pl.close()
return fout
jumeg_epocher_plot = JuMEG_Epocher_Plot() | bsd-3-clause |
kmather73/ggplot | ggplot/stats/stat_function.py | 12 | 4439 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
from ggplot.utils import make_iterable_ntimes
from ggplot.utils.exceptions import GgplotError
from .stat import stat
class stat_function(stat):
"""
Superimpose a function onto a plot
Uses a
Parameters
----------
x : list, 1darray
x values of data
fun : function
Function to draw.
n : int
Number of points to interpolate over. Must be greater than zero.
Defaults to 101.
color : str
Color to draw function with.
args : list, dict, object
List or dict of additional arguments to pass to function. If neither
list or dict, object is passed as second argument.
Examples
--------
Sin vs cos.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
gg = ggplot(pd.DataFrame({'x':np.arange(10)}),aes(x='x'))
gg = gg + stat_function(fun=np.sin,color="red")
gg = gg + stat_function(fun=np.cos,color="blue")
print(gg)
Compare random sample density to normal distribution.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
x = np.random.normal(size=100)
# normal distribution function
def dnorm(n):
return (1.0 / np.sqrt(2 * np.pi)) * (np.e ** (-0.5 * (n ** 2)))
data = pd.DataFrame({'x':x})
gg = ggplot(aes(x='x'),data=data) + geom_density()
gg = gg + stat_function(fun=dnorm,n=150)
print(gg)
Passing additional arguments to function as list.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
x = np.random.randn(100)
to_the_power_of = lambda n, p: n ** p
y = x ** 3
y += np.random.randn(100) # add noise
data = pd.DataFrame({'x':x,'y':y})
gg = ggplot(aes(x='x',y='y'),data=data) + geom_point()
gg = gg + stat_function(fun=to_the_power_of,args=[3])
print(gg)
Passing additional arguments to function as dict.
.. plot::
:include-source:
import scipy
import numpy as np
import pandas as pd
from ggplot import *
def dnorm(x, mean, var):
return scipy.stats.norm(mean,var).pdf(x)
data = pd.DataFrame({'x':np.arange(-5,6)})
gg = ggplot(aes(x='x'),data=data)
gg = gg + stat_function(fun=dnorm,color="blue",args={'mean':0.0,'var':0.2})
gg = gg + stat_function(fun=dnorm,color="red",args={'mean':0.0,'var':1.0})
gg = gg + stat_function(fun=dnorm,color="yellow",args={'mean':0.0,'var':5.0})
gg = gg + stat_function(fun=dnorm,color="green",args={'mean':-2.0,'var':0.5})
print(gg)
"""
# TODO: Should not have a required aesthetic, use the scale information
# maybe that is where the "scale trainning" helps
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'geom': 'path', 'position': 'identity', 'fun': None,
'n': 101, 'args': None}
_aes_renames = {'size': 'linewidth', 'linetype': 'linestyle'}
CREATES = {'y'}
def _calculate(self, data):
x = data.pop('x')
fun = self.params['fun']
n = self.params['n']
args = self.params['args']
if not hasattr(fun, '__call__'):
raise GgplotError("stat_function requires parameter 'fun' to be " +
"a function or any other callable object")
old_fun = fun
if isinstance(args,list):
fun = lambda x: old_fun(x, *args)
elif isinstance(args,dict):
fun = lambda x: old_fun(x, **args)
elif args is not None:
fun = lambda x: old_fun(x, args)
else:
fun = lambda x: old_fun(x)
x = np.linspace(x.min(), x.max(),n)
y = list(map(fun, x))
new_data = pd.DataFrame({'x': x, 'y': y})
# Copy the other aesthetics into the new dataframe
# Don't copy the any previous 'y' assignments
try:
del data['y']
except KeyError:
pass
n = len(x)
for ae in data:
new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)
return new_data
| bsd-2-clause |
villalonreina/dipy | setup_helpers.py | 11 | 14073 | ''' Distutils / setuptools helpers
'''
import os
import sys
from os.path import join as pjoin, split as psplit, splitext, dirname, exists
import tempfile
import shutil
from distutils.version import LooseVersion
from distutils.command.install_scripts import install_scripts
from distutils.errors import CompileError, LinkError
from distutils import log
BAT_TEMPLATE = \
r"""@echo off
REM wrapper to use shebang first line of {FNAME}
set mypath=%~dp0
set pyscript="%mypath%{FNAME}"
set /p line1=<%pyscript%
if "%line1:~0,2%" == "#!" (goto :goodstart)
echo First line of %pyscript% does not start with "#!"
exit /b 1
:goodstart
set py_exe=%line1:~2%
REM quote exe in case of spaces in path name
set py_exe="%py_exe%"
call %py_exe% %pyscript% %*
"""
# Path of file to which to write C conditional vars from build-time checks
CONFIG_H = pjoin('build', 'config.h')
# File name (no directory) to which to write Python vars from build-time checks
CONFIG_PY = '__config__.py'
# Directory to which to write libraries for building
LIB_DIR_TMP = pjoin('build', 'extra_libs')
class install_scripts_bat(install_scripts):
""" Make scripts executable on Windows
Scripts are bare file names without extension on Unix, fitting (for example)
Debian rules. They identify as python scripts with the usual ``#!`` first
line. Unix recognizes and uses this first "shebang" line, but Windows does
not. So, on Windows only we add a ``.bat`` wrapper of name
``bare_script_name.bat`` to call ``bare_script_name`` using the python
interpreter from the #! first line of the script.
Notes
-----
See discussion at
http://matthew-brett.github.com/pydagogue/installing_scripts.html and
example at git://github.com/matthew-brett/myscripter.git for more
background.
"""
def run(self):
install_scripts.run(self)
if not os.name == "nt":
return
for filepath in self.get_outputs():
# If we can find an executable name in the #! top line of the script
# file, make .bat wrapper for script.
with open(filepath, 'rt') as fobj:
first_line = fobj.readline()
if not (first_line.startswith('#!') and
'python' in first_line.lower()):
log.info("No #!python executable found, skipping .bat "
"wrapper")
continue
pth, fname = psplit(filepath)
froot, ext = splitext(fname)
bat_file = pjoin(pth, froot + '.bat')
bat_contents = BAT_TEMPLATE.replace('{FNAME}', fname)
log.info("Making %s wrapper for %s" % (bat_file, filepath))
if self.dry_run:
continue
with open(bat_file, 'wt') as fobj:
fobj.write(bat_contents)
def add_flag_checking(build_ext_class, flag_defines, top_package_dir=''):
""" Override input `build_ext_class` to check compiler `flag_defines`
Parameters
----------
build_ext_class : class
Class implementing ``distutils.command.build_ext.build_ext`` interface,
with a ``build_extensions`` method.
flag_defines : sequence
A sequence of elements, where the elements are sequences of length 4
consisting of (``compile_flags``, ``link_flags``, ``code``,
``defvar``). ``compile_flags`` is a sequence of compiler flags;
``link_flags`` is a sequence of linker flags. We
check ``compile_flags`` to see whether a C source string ``code`` will
compile, and ``link_flags`` to see whether the resulting object file
will link. If both compile and link works, we add ``compile_flags`` to
``extra_compile_args`` and ``link_flags`` to ``extra_link_args`` of
each extension when we build the extensions. If ``defvar`` is not
None, it is the name of C variable to be defined in ``build/config.h``
with 1 if the combination of (``compile_flags``, ``link_flags``,
``code``) will compile and link, 0 otherwise. If None, do not write
variable.
top_package_dir : str
String giving name of top-level package, for writing Python file
containing configuration variables. If empty, do not write this file.
Variables written are the same as the Cython variables generated via
the `flag_defines` setting.
Returns
-------
checker_class : class
A class with similar interface to
``distutils.command.build_ext.build_ext``, that adds all working
``compile_flags`` values to the ``extra_compile_args`` and working
``link_flags`` to ``extra_link_args`` attributes of extensions, before
compiling.
"""
class Checker(build_ext_class):
flag_defs = tuple(flag_defines)
def can_compile_link(self, compile_flags, link_flags, code):
cc = self.compiler
fname = 'test.c'
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
with open(fname, 'wt') as fobj:
fobj.write(code)
try:
objects = cc.compile([fname],
extra_postargs=compile_flags)
except CompileError:
return False
try:
# Link shared lib rather then executable to avoid
# http://bugs.python.org/issue4431 with MSVC 10+
cc.link_shared_lib(objects, "testlib",
extra_postargs=link_flags)
except (LinkError, TypeError):
return False
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
return True
def build_extensions(self):
""" Hook into extension building to check compiler flags """
def_vars = []
good_compile_flags = []
good_link_flags = []
config_dir = dirname(CONFIG_H)
for compile_flags, link_flags, code, def_var in self.flag_defs:
compile_flags = list(compile_flags)
link_flags = list(link_flags)
flags_good = self.can_compile_link(compile_flags,
link_flags,
code)
if def_var:
def_vars.append((def_var, flags_good))
if flags_good:
good_compile_flags += compile_flags
good_link_flags += link_flags
else:
log.warn("Flags {0} omitted because of compile or link "
"error".format(compile_flags + link_flags))
if def_vars: # write config.h file
if not exists(config_dir):
self.mkpath(config_dir)
with open(CONFIG_H, 'wt') as fobj:
fobj.write('/* Automatically generated; do not edit\n')
fobj.write(' C defines from build-time checks */\n')
for v_name, v_value in def_vars:
fobj.write('int {0} = {1};\n'.format(
v_name, 1 if v_value else 0))
if def_vars and top_package_dir: # write __config__.py file
config_py_dir = (top_package_dir if self.inplace else
pjoin(self.build_lib, top_package_dir))
if not exists(config_py_dir):
self.mkpath(config_py_dir)
config_py = pjoin(config_py_dir, CONFIG_PY)
with open(config_py, 'wt') as fobj:
fobj.write('# Automatically generated; do not edit\n')
fobj.write('# Variables from compile checks\n')
for v_name, v_value in def_vars:
fobj.write('{0} = {1}\n'.format(v_name, v_value))
if def_vars or good_compile_flags or good_link_flags:
for ext in self.extensions:
ext.extra_compile_args += good_compile_flags
ext.extra_link_args += good_link_flags
if def_vars:
ext.include_dirs.append(config_dir)
build_ext_class.build_extensions(self)
return Checker
def get_pkg_version(pkg_name):
""" Return package version for `pkg_name` if installed
Returns
-------
pkg_version : str or None
Return None if package not importable. Return 'unknown' if standard
``__version__`` string not present. Otherwise return version string.
"""
try:
pkg = __import__(pkg_name)
except ImportError:
return None
try:
return pkg.__version__
except AttributeError:
return 'unknown'
def version_error_msg(pkg_name, found_ver, min_ver):
""" Return informative error message for version or None
"""
if found_ver is None:
return 'We need package {0}, but not importable'.format(pkg_name)
if found_ver == 'unknown':
return 'We need {0} version {1}, but cannot get version'.format(
pkg_name, min_ver)
if LooseVersion(found_ver) >= LooseVersion(min_ver):
return None
return 'We need {0} version {1}, but found version {2}'.format(
pkg_name, found_ver, min_ver)
class SetupDependency(object):
""" SetupDependency class
Parameters
----------
import_name : str
Name with which required package should be ``import``ed.
min_ver : str
Distutils version string giving minimum version for package.
req_type : {'install_requires', 'setup_requires'}, optional
Setuptools dependency type.
heavy : {False, True}, optional
If True, and package is already installed (importable), then do not add
to the setuptools dependency lists. This prevents setuptools
reinstalling big packages when the package was installed without using
setuptools, or this is an upgrade, and we want to avoid the pip default
behavior of upgrading all dependencies.
install_name : str, optional
Name identifying package to install from pypi etc, if different from
`import_name`.
"""
def __init__(self, import_name,
min_ver,
req_type='install_requires',
heavy=False,
install_name=None):
self.import_name = import_name
self.min_ver = min_ver
self.req_type = req_type
self.heavy = heavy
self.install_name = (import_name if install_name is None
else install_name)
def check_fill(self, setuptools_kwargs):
""" Process this dependency, maybe filling `setuptools_kwargs`
Run checks on this dependency. If not using setuptools, then raise
error for unmet dependencies. If using setuptools, add missing or
not-heavy dependencies to `setuptools_kwargs`.
A heavy dependency is one that is inconvenient to install
automatically, such as numpy or (particularly) scipy, matplotlib.
Parameters
----------
setuptools_kwargs : dict
Dictionary of setuptools keyword arguments that may be modified
in-place while checking dependencies.
"""
found_ver = get_pkg_version(self.import_name)
ver_err_msg = version_error_msg(self.import_name,
found_ver,
self.min_ver)
if not 'setuptools' in sys.modules:
# Not using setuptools; raise error for any unmet dependencies
if ver_err_msg is not None:
raise RuntimeError(ver_err_msg)
return
# Using setuptools; add packages to given section of
# setup/install_requires, unless it's a heavy dependency for which we
# already have an acceptable importable version.
if self.heavy and ver_err_msg is None:
return
new_req = '{0}>={1}'.format(self.import_name, self.min_ver)
old_reqs = setuptools_kwargs.get(self.req_type, [])
setuptools_kwargs[self.req_type] = old_reqs + [new_req]
class Bunch(object):
def __init__(self, vars):
for key, name in vars.items():
if key.startswith('__'):
continue
self.__dict__[key] = name
def read_vars_from(ver_file):
""" Read variables from Python text file
Parameters
----------
ver_file : str
Filename of file to read
Returns
-------
info_vars : Bunch instance
Bunch object where variables read from `ver_file` appear as
attributes
"""
# Use exec for compabibility with Python 3
ns = {}
with open(ver_file, 'rt') as fobj:
exec(fobj.read(), ns)
return Bunch(ns)
def make_np_ext_builder(build_ext_class):
""" Override input `build_ext_class` to add numpy includes to extension
This is useful to delay call of ``np.get_include`` until the extension is
being built.
Parameters
----------
build_ext_class : class
Class implementing ``distutils.command.build_ext.build_ext`` interface,
with a ``build_extensions`` method.
Returns
-------
np_build_ext_class : class
A class with similar interface to
``distutils.command.build_ext.build_ext``, that adds libraries in
``np.get_include()`` to include directories of extension.
"""
class NpExtBuilder(build_ext_class):
def build_extensions(self):
""" Hook into extension building to add np include dirs
"""
# Delay numpy import until last moment
import numpy as np
for ext in self.extensions:
ext.include_dirs.append(np.get_include())
build_ext_class.build_extensions(self)
return NpExtBuilder
| bsd-3-clause |
samzhang111/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
calebfoss/tensorflow | tensorflow/examples/learn/wide_n_deep_tutorial.py | 24 | 8941 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
from six.moves import urllib
import pandas as pd
import tensorflow as tf
COLUMNS = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"]
LABEL_COLUMN = "label"
CATEGORICAL_COLUMNS = ["workclass", "education", "marital_status", "occupation",
"relationship", "race", "gender", "native_country"]
CONTINUOUS_COLUMNS = ["age", "education_num", "capital_gain", "capital_loss",
"hours_per_week"]
def maybe_download(train_data, test_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data", train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test", test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s" % test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir, model_type):
"""Build an estimator."""
# Sparse base columns.
gender = tf.contrib.layers.sparse_column_with_keys(column_name="gender",
keys=["female", "male"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
# Transformations.
age_buckets = tf.contrib.layers.bucketized_column(age,
boundaries=[
18, 25, 30, 35, 40, 45,
50, 55, 60, 65
])
# Wide columns and deep columns.
wide_columns = [gender, native_country, education, occupation, workclass,
relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column(
[age_buckets, education, occupation],
hash_bucket_size=int(1e6)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(native_country,
dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
if model_type == "wide":
m = tf.contrib.learn.LinearClassifier(model_dir=model_dir,
feature_columns=wide_columns)
elif model_type == "deep":
m = tf.contrib.learn.DNNClassifier(model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def input_fn(df):
"""Input builder function."""
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {k: tf.constant(df[k].values) for k in CONTINUOUS_COLUMNS}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in CATEGORICAL_COLUMNS}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols)
feature_cols.update(categorical_cols)
# Converts the label column into a constant Tensor.
label = tf.constant(df[LABEL_COLUMN].values)
# Returns the feature columns and the label.
return feature_cols, label
def train_and_eval(model_dir, model_type, train_steps, train_data, test_data):
"""Train and evaluate the model."""
train_file_name, test_file_name = maybe_download(train_data, test_data)
df_train = pd.read_csv(
tf.gfile.Open(train_file_name),
names=COLUMNS,
skipinitialspace=True,
engine="python")
df_test = pd.read_csv(
tf.gfile.Open(test_file_name),
names=COLUMNS,
skipinitialspace=True,
skiprows=1,
engine="python")
# remove NaN elements
df_train = df_train.dropna(how='any', axis=0)
df_test = df_test.dropna(how='any', axis=0)
df_train[LABEL_COLUMN] = (
df_train["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
df_test[LABEL_COLUMN] = (
df_test["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
print("model directory = %s" % model_dir)
m = build_estimator(model_dir, model_type)
m.fit(input_fn=lambda: input_fn(df_train), steps=train_steps)
results = m.evaluate(input_fn=lambda: input_fn(df_test), steps=1)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
FLAGS = None
def main(_):
train_and_eval(FLAGS.model_dir, FLAGS.model_type, FLAGS.train_steps,
FLAGS.train_data, FLAGS.test_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=200,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to the training data."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
v4hn/moveit | moveit_ros/benchmarks/scripts/moveit_benchmark_statistics.py | 2 | 25524 | #!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: Mark Moll, Ioan Sucan, Luis G. Torres
from sys import argv, exit
from os.path import basename, splitext
import sqlite3
import datetime
import matplotlib
matplotlib.use('pdf')
from matplotlib import __version__ as matplotlibversion
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
from math import floor
from optparse import OptionParser, OptionGroup
# Given a text line, split it into tokens (by space) and return the token
# at the desired index. Additionally, test that some expected tokens exist.
# Return None if they do not.
def readLogValue(filevar, desired_token_index, expected_tokens) :
start_pos = filevar.tell()
tokens = filevar.readline().split()
for token_index in expected_tokens:
if not tokens[token_index] == expected_tokens[token_index]:
# undo the read, if we failed to parse.
filevar.seek(start_pos)
return None
return tokens[desired_token_index]
def readOptionalLogValue(filevar, desired_token_index, expected_tokens = {}) :
return readLogValue(filevar, desired_token_index, expected_tokens)
def readRequiredLogValue(name, filevar, desired_token_index, expected_tokens = {}) :
result = readLogValue(filevar, desired_token_index, expected_tokens)
if result == None:
raise Exception("Unable to read " + name)
return result
def ensurePrefix(line, prefix):
if not line.startswith(prefix):
raise Exception("Expected prefix " + prefix + " was not found")
return line
def readOptionalMultilineValue(filevar):
start_pos = filevar.tell()
line = filevar.readline()
if not line.startswith("<<<|"):
filevar.seek(start_pos)
return None
value = ''
line = filevar.readline()
while not line.startswith('|>>>'):
value = value + line
line = filevar.readline()
if line == None:
raise Exception("Expected token |>>> missing")
return value
def readRequiredMultilineValue(filevar):
ensurePrefix(filevar.readline(), "<<<|")
value = ''
line = filevar.readline()
while not line.startswith('|>>>'):
value = value + line
line = filevar.readline()
if line == None:
raise Exception("Expected token |>>> missing")
return value
def readBenchmarkLog(dbname, filenames):
"""Parse benchmark log files and store the parsed data in a sqlite3 database."""
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
# create all tables if they don't already exist
c.executescript("""CREATE TABLE IF NOT EXISTS experiments
(id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR(512),
totaltime REAL, timelimit REAL, memorylimit REAL, runcount INTEGER,
version VARCHAR(128), hostname VARCHAR(1024), cpuinfo TEXT,
date DATETIME, seed INTEGER, setup TEXT);
CREATE TABLE IF NOT EXISTS plannerConfigs
(id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(512) NOT NULL, settings TEXT);
CREATE TABLE IF NOT EXISTS enums
(name VARCHAR(512), value INTEGER, description TEXT,
PRIMARY KEY (name, value));
CREATE TABLE IF NOT EXISTS runs
(id INTEGER PRIMARY KEY AUTOINCREMENT, experimentid INTEGER, plannerid INTEGER,
FOREIGN KEY (experimentid) REFERENCES experiments(id) ON DELETE CASCADE,
FOREIGN KEY (plannerid) REFERENCES plannerConfigs(id) ON DELETE CASCADE);
CREATE TABLE IF NOT EXISTS progress
(runid INTEGER, time REAL, PRIMARY KEY (runid, time),
FOREIGN KEY (runid) REFERENCES runs(id) ON DELETE CASCADE)""")
for filename in filenames:
print('Processing ' + filename)
logfile = open(filename,'r')
start_pos = logfile.tell()
libname = readOptionalLogValue(logfile, 0, {1 : "version"})
if libname == None:
libname = "OMPL"
logfile.seek(start_pos)
version = readOptionalLogValue(logfile, -1, {1 : "version"})
if version == None:
# set the version number to make Planner Arena happy
version = "0.0.0"
version = ' '.join([libname, version])
expname = readRequiredLogValue("experiment name", logfile, -1, {0 : "Experiment"})
hostname = readRequiredLogValue("hostname", logfile, -1, {0 : "Running"})
date = ' '.join(ensurePrefix(logfile.readline(), "Starting").split()[2:])
expsetup = readRequiredMultilineValue(logfile)
cpuinfo = readOptionalMultilineValue(logfile)
rseed = int(readRequiredLogValue("random seed", logfile, 0, {-2 : "random", -1 : "seed"}))
timelimit = float(readRequiredLogValue("time limit", logfile, 0, {-3 : "seconds", -2 : "per", -1 : "run"}))
memorylimit = float(readRequiredLogValue("memory limit", logfile, 0, {-3 : "MB", -2 : "per", -1 : "run"}))
nrrunsOrNone = readOptionalLogValue(logfile, 0, {-3 : "runs", -2 : "per", -1 : "planner"})
nrruns = -1
if nrrunsOrNone != None:
nrruns = int(nrrunsOrNone)
totaltime = float(readRequiredLogValue("total time", logfile, 0, {-3 : "collect", -2 : "the", -1 : "data"}))
numEnums = 0
numEnumsOrNone = readOptionalLogValue(logfile, 0, {-2 : "enum"})
if numEnumsOrNone != None:
numEnums = int(numEnumsOrNone)
for i in range(numEnums):
enum = logfile.readline()[:-1].split('|')
c.execute('SELECT * FROM enums WHERE name IS "%s"' % enum[0])
if c.fetchone() == None:
for j in range(len(enum)-1):
c.execute('INSERT INTO enums VALUES (?,?,?)',
(enum[0],j,enum[j+1]))
c.execute('INSERT INTO experiments VALUES (?,?,?,?,?,?,?,?,?,?,?,?)',
(None, expname, totaltime, timelimit, memorylimit, nrruns,
version, hostname, cpuinfo, date, rseed, expsetup) )
experimentId = c.lastrowid
numPlanners = int(readRequiredLogValue("planner count", logfile, 0, {-1 : "planners"}))
for i in range(numPlanners):
plannerName = logfile.readline()[:-1]
print('Parsing data for ' + plannerName)
# read common data for planner
numCommon = int(logfile.readline().split()[0])
settings = ''
for j in range(numCommon):
settings = settings + logfile.readline() + ';'
# find planner id
c.execute('SELECT id FROM plannerConfigs WHERE (name=? AND settings=?)',
(plannerName, settings,))
p = c.fetchone()
if p==None:
c.execute('INSERT INTO plannerConfigs VALUES (?,?,?)',
(None, plannerName, settings,))
plannerId = c.lastrowid
else:
plannerId = p[0]
# get current column names
c.execute('PRAGMA table_info(runs)')
columnNames = [col[1] for col in c.fetchall()]
# read properties and add columns as necessary
numProperties = int(logfile.readline().split()[0])
propertyNames = ['experimentid', 'plannerid']
for j in range(numProperties):
field = logfile.readline().split()
propertyType = field[-1]
propertyName = '_'.join(field[:-1])
if propertyName not in columnNames:
c.execute('ALTER TABLE runs ADD %s %s' % (propertyName, propertyType))
propertyNames.append(propertyName)
# read measurements
insertFmtStr = 'INSERT INTO runs (' + ','.join(propertyNames) + \
') VALUES (' + ','.join('?'*len(propertyNames)) + ')'
numRuns = int(logfile.readline().split()[0])
runIds = []
for j in range(numRuns):
values = tuple([experimentId, plannerId] + \
[None if len(x) == 0 or x == 'nan' or x == 'inf' else x
for x in logfile.readline().split('; ')[:-1]])
c.execute(insertFmtStr, values)
# extract primary key of each run row so we can reference them
# in the planner progress data table if needed
runIds.append(c.lastrowid)
nextLine = logfile.readline().strip()
# read planner progress data if it's supplied
if nextLine != '.':
# get current column names
c.execute('PRAGMA table_info(progress)')
columnNames = [col[1] for col in c.fetchall()]
# read progress properties and add columns as necesary
numProgressProperties = int(nextLine.split()[0])
progressPropertyNames = ['runid']
for i in range(numProgressProperties):
field = logfile.readline().split()
progressPropertyType = field[-1]
progressPropertyName = "_".join(field[:-1])
if progressPropertyName not in columnNames:
c.execute('ALTER TABLE progress ADD %s %s' %
(progressPropertyName, progressPropertyType))
progressPropertyNames.append(progressPropertyName)
# read progress measurements
insertFmtStr = 'INSERT INTO progress (' + \
','.join(progressPropertyNames) + ') VALUES (' + \
','.join('?'*len(progressPropertyNames)) + ')'
numRuns = int(logfile.readline().split()[0])
for j in range(numRuns):
dataSeries = logfile.readline().split(';')[:-1]
for dataSample in dataSeries:
values = tuple([runIds[j]] + \
[None if len(x) == 0 or x == 'nan' or x == 'inf' else x
for x in dataSample.split(',')[:-1]])
try:
c.execute(insertFmtStr, values)
except sqlite3.IntegrityError:
print('Ignoring duplicate progress data. Consider increasing ompl::tools::Benchmark::Request::timeBetweenUpdates.')
pass
logfile.readline()
logfile.close()
conn.commit()
c.close()
def plotAttribute(cur, planners, attribute, typename):
"""Create a plot for a particular attribute. It will include data for
all planners that have data for this attribute."""
labels = []
measurements = []
nanCounts = []
if typename == 'ENUM':
cur.execute('SELECT description FROM enums where name IS "%s"' % attribute)
descriptions = [ t[0] for t in cur.fetchall() ]
numValues = len(descriptions)
for planner in planners:
cur.execute('SELECT %s FROM runs WHERE plannerid = %s AND %s IS NOT NULL' \
% (attribute, planner[0], attribute))
measurement = [ t[0] for t in cur.fetchall() if t[0] != None ]
if len(measurement) > 0:
cur.execute('SELECT count(*) FROM runs WHERE plannerid = %s AND %s IS NULL' \
% (planner[0], attribute))
nanCounts.append(cur.fetchone()[0])
labels.append(planner[1])
if typename == 'ENUM':
scale = 100. / len(measurement)
measurements.append([measurement.count(i)*scale for i in range(numValues)])
else:
measurements.append(measurement)
if len(measurements)==0:
print('Skipping "%s": no available measurements' % attribute)
return
plt.clf()
ax = plt.gca()
if typename == 'ENUM':
width = .5
measurements = np.transpose(np.vstack(measurements))
colsum = np.sum(measurements, axis=1)
rows = np.where(colsum != 0)[0]
heights = np.zeros((1,measurements.shape[1]))
ind = range(measurements.shape[1])
legend_labels = []
for i in rows:
plt.bar(ind, measurements[i], width, bottom=heights[0],
color=matplotlib.cm.hot(int(floor(i*256/numValues))),
label=descriptions[i])
heights = heights + measurements[i]
xtickNames = plt.xticks([x+width/2. for x in ind], labels, rotation=30)
ax.set_ylabel(attribute.replace('_',' ') + ' (%)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
props = matplotlib.font_manager.FontProperties()
props.set_size('small')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop = props)
elif typename == 'BOOLEAN':
width = .5
measurementsPercentage = [sum(m) * 100. / len(m) for m in measurements]
ind = range(len(measurements))
plt.bar(ind, measurementsPercentage, width)
xtickNames = plt.xticks([x + width / 2. for x in ind], labels, rotation=30, fontsize=8)
ax.set_ylabel(attribute.replace('_',' ') + ' (%)')
plt.subplots_adjust(bottom=0.3) # Squish the plot into the upper 2/3 of the page. Leave room for labels
else:
if int(matplotlibversion.split('.')[0])<1:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5)
else:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5, bootstrap=1000)
ax.set_ylabel(attribute.replace('_',' '))
#xtickNames = plt.xticks(labels, rotation=30, fontsize=10)
#plt.subplots_adjust(bottom=0.3) # Squish the plot into the upper 2/3 of the page. Leave room for labels
xtickNames = plt.setp(ax,xticklabels=labels)
plt.setp(xtickNames, rotation=30)
for tick in ax.xaxis.get_major_ticks(): # shrink the font size of the x tick labels
tick.label.set_fontsize(8)
plt.subplots_adjust(bottom=0.3) # Squish the plot into the upper 2/3 of the page. Leave room for labels
ax.set_xlabel('Motion planning algorithm')
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
if max(nanCounts)>0:
maxy = max([max(y) for y in measurements])
for i in range(len(labels)):
x = i+width/2 if typename=='BOOLEAN' else i+1
ax.text(x, .95*maxy, str(nanCounts[i]), horizontalalignment='center', size='small')
plt.show()
def plotProgressAttribute(cur, planners, attribute):
"""Plot data for a single planner progress attribute. Will create an
average time-plot with error bars of the attribute over all runs for
each planner."""
import numpy.ma as ma
plt.clf()
ax = plt.gca()
ax.set_xlabel('time (s)')
ax.set_ylabel(attribute.replace('_',' '))
plannerNames = []
for planner in planners:
cur.execute("""SELECT count(progress.%s) FROM progress INNER JOIN runs
ON progress.runid = runs.id AND runs.plannerid=%s
AND progress.%s IS NOT NULL""" \
% (attribute, planner[0], attribute))
if cur.fetchone()[0] > 0:
plannerNames.append(planner[1])
cur.execute("""SELECT DISTINCT progress.runid FROM progress INNER JOIN runs
WHERE progress.runid=runs.id AND runs.plannerid=?""", (planner[0],))
runids = [t[0] for t in cur.fetchall()]
timeTable = []
dataTable = []
for r in runids:
# Select data for given run
cur.execute('SELECT time, %s FROM progress WHERE runid = %s ORDER BY time' % (attribute,r))
(time, data) = zip(*(cur.fetchall()))
timeTable.append(time)
dataTable.append(data)
# It's conceivable that the sampling process may have
# generated more samples for one run than another; in this
# case, truncate all data series to length of shortest
# one.
fewestSamples = min(len(time[:]) for time in timeTable)
times = np.array(timeTable[0][:fewestSamples])
dataArrays = np.array([data[:fewestSamples] for data in dataTable])
filteredData = ma.masked_array(dataArrays, np.equal(dataArrays, None), dtype=float)
means = np.mean(filteredData, axis=0)
stddevs = np.std(filteredData, axis=0, ddof=1)
# plot average with error bars
plt.errorbar(times, means, yerr=2*stddevs, errorevery=max(1, len(times) // 20))
ax.legend(plannerNames)
if len(plannerNames)>0:
plt.show()
else:
plt.clf()
def plotStatistics(dbname, fname):
"""Create a PDF file with box plots for all attributes."""
print("Generating plots...")
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute('SELECT id, name FROM plannerConfigs')
planners = [(t[0],t[1].replace('geometric_','').replace('control_',''))
for t in c.fetchall()]
c.execute('PRAGMA table_info(runs)')
colInfo = c.fetchall()[3:]
pp = PdfPages(fname)
for col in colInfo:
if col[2] == 'BOOLEAN' or col[2] == 'ENUM' or \
col[2] == 'INTEGER' or col[2] == 'REAL':
plotAttribute(c, planners, col[1], col[2])
pp.savefig(plt.gcf())
c.execute('PRAGMA table_info(progress)')
colInfo = c.fetchall()[2:]
for col in colInfo:
plotProgressAttribute(c, planners, col[1])
pp.savefig(plt.gcf())
plt.clf()
pagey = 0.9
pagex = 0.06
c.execute("""SELECT id, name, timelimit, memorylimit FROM experiments""")
experiments = c.fetchall()
for experiment in experiments:
c.execute("""SELECT count(*) FROM runs WHERE runs.experimentid = %d
GROUP BY runs.plannerid""" % experiment[0])
numRuns = [run[0] for run in c.fetchall()]
numRuns = numRuns[0] if len(set(numRuns)) == 1 else ','.join(numRuns)
plt.figtext(pagex, pagey, 'Experiment "%s"' % experiment[1])
plt.figtext(pagex, pagey-0.05, 'Number of averaged runs: %d' % numRuns)
plt.figtext(pagex, pagey-0.10, "Time limit per run: %g seconds" % experiment[2])
plt.figtext(pagex, pagey-0.15, "Memory limit per run: %g MB" % experiment[3])
pagey -= 0.22
plt.show()
pp.savefig(plt.gcf())
pp.close()
def saveAsMysql(dbname, mysqldump):
# See http://stackoverflow.com/questions/1067060/perl-to-python
import re
print("Saving as MySQL dump file...")
conn = sqlite3.connect(dbname)
mysqldump = open(mysqldump,'w')
# make sure all tables are dropped in an order that keepd foreign keys valid
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table'")
table_names = [ str(t[0]) for t in c.fetchall() ]
c.close()
last = ['experiments', 'planner_configs']
for table in table_names:
if table.startswith("sqlite"):
continue
if not table in last:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for table in last:
if table in table_names:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for line in conn.iterdump():
process = False
for nope in ('BEGIN TRANSACTION','COMMIT',
'sqlite_sequence','CREATE UNIQUE INDEX', 'CREATE VIEW'):
if nope in line: break
else:
process = True
if not process: continue
line = re.sub(r"[\n\r\t ]+", " ", line)
m = re.search('CREATE TABLE ([a-zA-Z0-9_]*)(.*)', line)
if m:
name, sub = m.groups()
sub = sub.replace('"','`')
line = '''CREATE TABLE IF NOT EXISTS %(name)s%(sub)s'''
line = line % dict(name=name, sub=sub)
# make sure we use an engine that supports foreign keys
line = line.rstrip("\n\t ;") + " ENGINE = InnoDB;\n"
else:
m = re.search('INSERT INTO "([a-zA-Z0-9_]*)"(.*)', line)
if m:
line = 'INSERT INTO %s%s\n' % m.groups()
line = line.replace('"', r'\"')
line = line.replace('"', "'")
line = re.sub(r"([^'])'t'(.)", "\\1THIS_IS_TRUE\\2", line)
line = line.replace('THIS_IS_TRUE', '1')
line = re.sub(r"([^'])'f'(.)", "\\1THIS_IS_FALSE\\2", line)
line = line.replace('THIS_IS_FALSE', '0')
line = line.replace('AUTOINCREMENT', 'AUTO_INCREMENT')
mysqldump.write(line)
mysqldump.close()
def computeViews(dbname):
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute('PRAGMA table_info(runs)')
# kinodynamic paths cannot be simplified (or least not easily),
# so simplification_time may not exist as a database column
if 'simplification_time' in [col[1] for col in c.fetchall()]:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, time + simplification_time AS total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
else:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, time AS total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
s1 = """SELECT plannerid, plannerName, experimentid, AVG(solved) AS avg_solved, AVG(total_time) AS avg_total_time
FROM (%s) GROUP BY plannerid, experimentid""" % s0
s2 = """SELECT plannerid, experimentid, MIN(avg_solved) AS avg_solved, avg_total_time
FROM (%s) GROUP BY plannerName, experimentid ORDER BY avg_solved DESC, avg_total_time ASC""" % s1
c.execute('DROP VIEW IF EXISTS bestPlannerConfigsPerExperiment')
c.execute('CREATE VIEW IF NOT EXISTS bestPlannerConfigsPerExperiment AS %s' % s2)
s1 = """SELECT plannerid, plannerName, AVG(solved) AS avg_solved, AVG(total_time) AS avg_total_time
FROM (%s) GROUP BY plannerid""" % s0
s2 = """SELECT plannerid, MIN(avg_solved) AS avg_solved, avg_total_time
FROM (%s) GROUP BY plannerName ORDER BY avg_solved DESC, avg_total_time ASC""" % s1
c.execute('DROP VIEW IF EXISTS bestPlannerConfigs')
c.execute('CREATE VIEW IF NOT EXISTS bestPlannerConfigs AS %s' % s2)
conn.commit()
c.close()
if __name__ == "__main__":
usage = """%prog [options] [<benchmark.log> ...]"""
parser = OptionParser("A script to parse benchmarking results.\n" + usage)
parser.add_option("-d", "--database", dest="dbname", default="benchmark.db",
help="Filename of benchmark database [default: %default]")
parser.add_option("-v", "--view", action="store_true", dest="view", default=False,
help="Compute the views for best planner configurations")
parser.add_option("-p", "--plot", dest="plot", default=None,
help="Create a PDF of plots with the filename provided")
parser.add_option("-m", "--mysql", dest="mysqldb", default=None,
help="Save SQLite3 database as a MySQL dump file")
(options, args) = parser.parse_args()
if len(args) == 0:
parser.error("No arguments were provided. Please provide full path of log file")
if len(args) == 1:
readBenchmarkLog(options.dbname, args)
# If we update the database, we recompute the views as well
options.view = True
if options.view:
computeViews(options.dbname)
if options.plot:
plotStatistics(options.dbname, options.plot)
if options.mysqldb:
saveAsMysql(options.dbname, options.mysqldb)
| bsd-3-clause |
uclmr/inferbeddings | data/kinships/make_folds.py | 2 | 2304 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import logging
import numpy as np
from sklearn.cross_validation import KFold, train_test_split
def read_triples(path):
with open(path, 'rt') as f:
lines = f.readlines()
triples = [(s.strip(), p.strip(), o.strip()) for [s, p, o] in [l.split() for l in lines]]
return triples
def main(argv):
def formatter(prog):
return argparse.HelpFormatter(prog, max_help_position=100, width=200)
argparser = argparse.ArgumentParser('K-Folder for Knowledge Graphs', formatter_class=formatter)
argparser.add_argument('triples', action='store', type=str, default=None)
args = argparser.parse_args(argv)
triples_path = args.triples
triples = read_triples(triples_path)
nb_triples = len(triples)
kf = KFold(n=nb_triples, n_folds=10, random_state=0, shuffle=True)
triples_np = np.array(triples)
for fold_no, (train_idx, test_idx) in enumerate(kf):
train_valid_triples = triples_np[train_idx]
test_triples = triples_np[test_idx]
train_triples, valid_triples, _, _ = train_test_split(train_valid_triples,
np.ones(train_valid_triples.shape[0]),
test_size=len(test_triples), random_state=0)
train_lines = ['{}\t{}\t{}'.format(s, p, o) for [s, p, o] in train_triples]
valid_lines = ['{}\t{}\t{}'.format(s, p, o) for [s, p, o] in valid_triples]
test_lines = ['{}\t{}\t{}'.format(s, p, o) for [s, p, o] in test_triples]
if not os.path.exists('folds/{}'.format(str(fold_no))):
os.mkdir('folds/{}'.format(str(fold_no)))
with open('folds/{}/nations_train.tsv'.format(str(fold_no)), 'w') as f:
f.writelines(['{}\n'.format(line) for line in train_lines])
with open('folds/{}/nations_valid.tsv'.format(str(fold_no)), 'w') as f:
f.writelines(['{}\n'.format(line) for line in valid_lines])
with open('folds/{}/nations_test.tsv'.format(str(fold_no)), 'w') as f:
f.writelines(['{}\n'.format(line) for line in test_lines])
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main(sys.argv[1:])
| mit |
phoebe-project/phoebe2-docs | 2.1/examples/diher_misaligned.py | 1 | 4328 | #!/usr/bin/env python
# coding: utf-8
# DI Her: Misaligned Binary
# ============================
#
# In this example, we'll reproduce Figure 8 in the misalignment release paper ([Horvat et al. 2018](http://phoebe-project.org/publications/2018Horvat+)).
#
# <img src="horvat+18_fig8.png" alt="Figure 8" width="400px"/>
#
# Setup
# -----------------------------
#
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.html) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger('error')
b = phoebe.default_binary()
# System Parameters
# ------------
#
# We'll adopt and set parameters from the following sources:
# * Albrecht et al. (2009), Nature: https://arxiv.org/pdf/0909.2861
# * https://en.wikipedia.org/wiki/DI_Herculis
# * Claret et al (2010) https://arxiv.org/pdf/1002.2949.pdf
# In[3]:
Nt = 2000
b.set_value('t0_supconj@orbit', 2442233.3481)
b.set_value('vgamma@system', 9.1) # [km/s] (Albrecht et al. 2009)
b.set_value('ntriangles@primary', Nt)
b.set_value('ntriangles@secondary', Nt)
mass1 = 5.1 # [M_sun] (Albrecht et al. 2009)
mass2 = 4.4 # [M_sun] (Albrecht et al. 2009)
P = 10.550164 # [d] (Albrecht et al. 2009)
mu_sun = 1.32712440018e20 # = G M_sun [m3 s^-2], Wiki Standard_gravitational_parameter
R_sun = 695700000 # [m] Wiki Sun
sma = (mu_sun*(mass1 + mass2)*(P*86400/(2*np.pi))**2)**(1./3)/R_sun # Kepler equation
incl = 89.3 # deg (Albrecht et al. 2009)
vp_sini = 109 # [km/s] (Albrecht et al. 2009)
vs_sini = 117 # [km/s] (Albrecht et al. 2009)
Rp = 2.68 # [R_sun] (Albrecht et al. 2009)
Rs = 2.48 # [R_sun] (Albrecht et al. 2009)
sini = np.sin(np.pi*incl/180)
vp = vp_sini*86400/sini # [km/s]
vs = vs_sini*86400/sini # [km/s]
Pp = 2*np.pi*Rp*R_sun/1000/vp
Ps = 2*np.pi*Rs*R_sun/1000/vs
Fp = P/Pp
Fs = P/Ps
b.set_value('q', mass2/mass1)
b.set_value('incl@binary', incl) # (Albrecht et al. 2009)
b.set_value('sma@binary', sma) # calculated
b.set_value('ecc@binary', 0.489) # (Albrecht et al. 2009)
b.set_value('per0@binary', 330.2) # (Albrecht et al. 2009)
b.set_value('period@binary', P) # calculated
b.set_value('syncpar@primary', Fp) # calculated
b.set_value('syncpar@secondary', Fs) # calculated
b.set_value('requiv@primary', Rp) # !!! requiv (Albrecht et al. 2009)
b.set_value('requiv@secondary', Rs) # !!! requiv (Albrecht et al. 2009)
b.set_value('teff@primary', 17300) # Wiki DI_Herculis
b.set_value('teff@secondary', 15400) # Wiki DI_Herculis
b.set_value('gravb_bol@primary', 1.)
b.set_value('gravb_bol@secondary', 1.)
# beta = 72 deg (Albrecht et al. 2009)
dOmega_p = 72
di_p = 62 - incl
b.set_value('pitch@primary', di_p) # di
b.set_value('yaw@primary', dOmega_p) # dOmega
# beta = - 84 deg (Albrecht et al. 2009)
dOmega_s = -84
di_s = 100 - incl
b.set_value('pitch@secondary', di_s) # di
b.set_value('yaw@secondary', dOmega_s) # dOmega
b.set_value_all('atm','extern_planckint')
b.set_value_all('irrad_method', 'none')
# Datasets
# ---------------
#
# Let's compute an LC and RV dataset sampled at 200 points in phase (with some aliasing).
# In[4]:
n = 200
times = b.to_time(np.linspace(-0.05, 1.05, n))
b.add_dataset('lc', times=times, dataset='lc01', ld_func='logarithmic', ld_coeffs = [0.5,0.5])
b.add_dataset('rv', times=times, dataset='rv01', ld_func='logarithmic', ld_coeffs = [0.5,0.5])
# Compute
# --------------
# In[5]:
b.run_compute(ltte=False)
# Plotting
# -------------
# In[6]:
afig, mplfig = b.plot(kind='lc', show=True)
# In[7]:
afig, mplfig = b.plot(kind='rv', show=True)
# In[ ]:
| gpl-3.0 |
reshama/data-science-from-scratch | code-python3/linear_algebra.py | 12 | 3566 | # -*- coding: iso-8859-15 -*-
import re, math, random # regexes, math functions, random numbers
import matplotlib.pyplot as plt # pyplot
from collections import defaultdict, Counter
from functools import partial, reduce
#
# functions for working with vectors
#
def vector_add(v, w):
"""adds two vectors componentwise"""
return [v_i + w_i for v_i, w_i in zip(v,w)]
def vector_subtract(v, w):
"""subtracts two vectors componentwise"""
return [v_i - w_i for v_i, w_i in zip(v,w)]
def vector_sum(vectors):
return reduce(vector_add, vectors)
def scalar_multiply(c, v):
return [c * v_i for v_i in v]
def vector_mean(vectors):
"""compute the vector whose i-th element is the mean of the
i-th elements of the input vectors"""
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
def dot(v, w):
"""v_1 * w_1 + ... + v_n * w_n"""
return sum(v_i * w_i for v_i, w_i in zip(v, w))
def sum_of_squares(v):
"""v_1 * v_1 + ... + v_n * v_n"""
return dot(v, v)
def magnitude(v):
return math.sqrt(sum_of_squares(v))
def squared_distance(v, w):
return sum_of_squares(vector_subtract(v, w))
def distance(v, w):
return math.sqrt(squared_distance(v, w))
#
# functions for working with matrices
#
def shape(A):
num_rows = len(A)
num_cols = len(A[0]) if A else 0
return num_rows, num_cols
def get_row(A, i):
return A[i]
def get_column(A, j):
return [A_i[j] for A_i in A]
def make_matrix(num_rows, num_cols, entry_fn):
"""returns a num_rows x num_cols matrix
whose (i,j)-th entry is entry_fn(i, j)"""
return [[entry_fn(i, j) for j in range(num_cols)]
for i in range(num_rows)]
def is_diagonal(i, j):
"""1's on the 'diagonal', 0's everywhere else"""
return 1 if i == j else 0
identity_matrix = make_matrix(5, 5, is_diagonal)
# user 0 1 2 3 4 5 6 7 8 9
#
friendships = [[0, 1, 1, 0, 0, 0, 0, 0, 0, 0], # user 0
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0], # user 1
[1, 1, 0, 1, 0, 0, 0, 0, 0, 0], # user 2
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0], # user 3
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0], # user 4
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0], # user 5
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 6
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 7
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1], # user 8
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]] # user 9
#####
# DELETE DOWN
#
def matrix_add(A, B):
if shape(A) != shape(B):
raise ArithmeticError("cannot add matrices with different shapes")
num_rows, num_cols = shape(A)
def entry_fn(i, j): return A[i][j] + B[i][j]
return make_matrix(num_rows, num_cols, entry_fn)
def make_graph_dot_product_as_vector_projection(plt):
v = [2, 1]
w = [math.sqrt(.25), math.sqrt(.75)]
c = dot(v, w)
vonw = scalar_multiply(c, w)
o = [0,0]
plt.arrow(0, 0, v[0], v[1],
width=0.002, head_width=.1, length_includes_head=True)
plt.annotate("v", v, xytext=[v[0] + 0.1, v[1]])
plt.arrow(0 ,0, w[0], w[1],
width=0.002, head_width=.1, length_includes_head=True)
plt.annotate("w", w, xytext=[w[0] - 0.1, w[1]])
plt.arrow(0, 0, vonw[0], vonw[1], length_includes_head=True)
plt.annotate(u"(v•w)w", vonw, xytext=[vonw[0] - 0.1, vonw[1] + 0.1])
plt.arrow(v[0], v[1], vonw[0] - v[0], vonw[1] - v[1],
linestyle='dotted', length_includes_head=True)
plt.scatter(*zip(v,w,o),marker='.')
plt.axis('equal')
plt.show()
| unlicense |
imcgreer/rapala | survey/summer2015/checkfields.py | 2 | 28485 | #!/usr/bin/env python
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import astropy.io.ascii as ascii_io
import fitsio
import bass
import bokextract
datadir = '/global/scratch2/sd/imcgreer/'
ndwfs_starfile = datadir+'ndwfs/starcat.fits'
bootes_sdss_starfile = datadir+'ndwfs/sdss_bootes_gstars.fits'
cfhtlswide_starfile = datadir+'cfhtls/CFHTLSW3_starcat.fits'
cfhtlsdeep_starfile = datadir+'cfhtls/CFHTLSD3_starcat.fits'
def cfhtw3_tiles(observed=True):
w3west,w3east = 15*(13.+50/60.), 15*(14+45./60)
w3south,w3north = 50.7, 56.2
return bass.region_tiles(w3west,w3east,w3south,w3north,observed=observed)
def ndwfs_tiles(observed=True):
ndwest,ndeast = 15*14.37, 15*14.62
ndsouth,ndnorth = 32.5, 36.1
return bass.region_tiles(ndwest,ndeast,ndsouth,ndnorth,observed=observed)
def panstarrs_md_tiles(observed=True):
tiles = {}
for field,ra,dec in [('MD03',130.592,+44.317),
('MD05',161.917,+58.083),
('MD06',185.000,+47.117),
('MD07',213.704,+53.083),
('MD08',242.787,+54.950)]:
dra = 3.5/np.cos(np.radians(dec))
tiles[field] = bass.region_tiles(ra-dra,ra+dra,dec-3.5,dec+3.5,
observed=observed)
return tiles
def check_fields_list():
files = [ t['utDate']+'/'+t['fileName']+'.fits.gz'
for tiles in [cfhtw3_tiles(),ndwfs_tiles()]
for t in tiles ]
with open('checkfields_tiles.txt','w') as f:
f.write('\n'.join(sorted(files)))
def srcor(ra1,dec1,ra2,dec2,sep,return_sep=False):
from astropy.coordinates import SkyCoord,match_coordinates_sky
from astropy import units as u
c1 = SkyCoord(ra1,dec1,unit=(u.degree,u.degree))
c2 = SkyCoord(ra2,dec2,unit=(u.degree,u.degree))
idx,d2d,d3c = match_coordinates_sky(c1,c2)
ii = np.where(d2d.arcsec < sep)[0]
if return_sep:
return ii,idx[ii],d2d.arcsec[ii]
else:
return ii,idx[ii]
def srcorXY(x1,y1,x2,y2,maxrad):
sep = np.sqrt( (x1[:,np.newaxis]-x2[np.newaxis,:])**2 +
(y1[:,np.newaxis]-y2[np.newaxis,:])**2 )
ii = sep.argmin(axis=1)
m1 = np.arange(len(x1))
jj = np.where(sep[m1,ii] < maxrad)[0]
return m1[jj],ii[jj]
def match_objects(objs,tiles):
objpars = [('g_number','f4'),('g_ra','f8'),('g_dec','f8'),
('g_x','f4'),('g_y','f4'),
('g_autoMag','f4'),('g_autoMagErr','f4'),
('g_autoFlux','f4'),('g_autoFluxErr','f4'),
('g_psfMag','f4'),('g_psfMagErr','f4'),
('g_psfFlux','f4'),('g_psfFluxErr','f4'),
('g_elongation','f4'),('g_ellipticity','f4'),
('g_flags','i4'),('g_fluxRad','f4')]
tilepars = [('g_utDate','S8'),('g_expTime','f4'),
('g_tileId','i4'),('g_ditherId','i4'),('g_ccdNum','i4')]
dtype = objs.dtype.descr + objpars + tilepars
skeys = ['NUMBER','ALPHA_J2000','DELTA_J2000','X_IMAGE','Y_IMAGE',
'MAG_AUTO','MAGERR_AUTO','FLUX_AUTO','FLUXERR_AUTO',
'MAG_PSF','MAGERR_PSF','FLUX_PSF','FLUXERR_PSF',
'ELONGATION','ELLIPTICITY',
'FLAGS','FLUX_RADIUS']
tkeys = ['utDate','expTime','tileId','ditherId']
matches = []
for ti,t in enumerate(tiles):
print 'matching tile %d/%d' % (ti+1,len(tiles))
for ccdNum in range(1,5):
catpath = os.path.join(bass.rdxdir,t['utDate'],'ccdproc3',
t['fileName']+'_ccd%d.cat.fits'%ccdNum)
if not os.path.exists(catpath):
print ' ... %s does not exist, skipping' % catpath
continue
cat = fitsio.read(catpath)
ii = np.where( (objs['ra']>cat['ALPHA_J2000'].min()+3e-3) &
(objs['ra']<cat['ALPHA_J2000'].max()-3e-3) &
(objs['dec']>cat['DELTA_J2000'].min()+3e-3) &
(objs['dec']<cat['DELTA_J2000'].max()-3e-3) )[0]
if len(ii)==0:
continue
m1,m2 = srcor(objs['ra'][ii],objs['dec'][ii],
cat['ALPHA_J2000'],cat['DELTA_J2000'],2.5)
print ' ccd%d %d/%d' % (ccdNum,len(m1),len(ii)),
matches.extend( [ tuple(objs[i]) +
tuple([cat[k][j] for k in skeys]) +
tuple([t[k] for k in tkeys]) + (ccdNum,)
for i,j in zip(ii[m1],m2) ] )
uu = np.delete(np.arange(len(ii)),m1)
matches.extend( [ tuple(objs[i]) +
tuple([0]*len(skeys)) +
tuple([t[k] for k in tkeys]) + (ccdNum,)
for i in ii[uu] ] )
print
matches = np.array(matches,dtype=dtype)
print 'finished with ',matches.size
return matches
def depth_plots(matches,g_ref,gname,bypriority=True,aper='psf',**kwargs):
assert aper in ['psf','auto']
fluxk = 'g_%sFlux' % aper
errk = 'g_%sFluxErr' % aper
#
m = np.where( (matches[fluxk]>0) & (matches[errk]>0) )[0]
gSNR = matches[fluxk][m] / matches[errk][m]
if bypriority:
fig1 = plt.figure(figsize=(10,8))
plt.subplots_adjust(0.07,0.07,0.97,0.96,0.27,0.27)
else:
fig1 = plt.figure(figsize=(5,4.5))
plt.subplots_adjust(0.13,0.12,0.98,0.94)
for i in range(4):
if bypriority:
ax = plt.subplot(2,2,i+1)
else:
if i>0: break
ax = plt.subplot(1,1,i+1)
if i==0:
ii = np.where(matches['g_ditherId'][m] > 0)[0]
else:
ii = np.where(matches['g_ditherId'][m] == i)[0]
ax.hexbin(g_ref[m[ii]],np.log10(gSNR[ii]),
bins='log',cmap=plt.cm.Blues)
ax.axhline(np.log10(5.0),c='r',lw=1.3,alpha=0.7)
ax.plot([24.0-2.5*np.log10(np.sqrt(3))]*2,np.log10([3,8]),c='m',lw=1.5)
ax.set_xlim(17.2,24.5)
ax.set_ylim(np.log10(2),np.log10(500))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.2))
ax.yaxis.set_major_locator(ticker.FixedLocator(np.log10(
[2,5,10,20,50,100,200])))
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda x,pos: '%d' % np.round(10**x)))
ax.set_xlabel(gname+' mag')
ax.set_ylabel('BASS %s flux/err' % aper.upper())
if i==0:
ax.set_title('all tiles')
else:
ax.set_title('P%d tiles' % i)
#
mbins = np.arange(18.,24.01,0.1)
fig2 = plt.figure(figsize=(8,4))
plt.subplots_adjust(0.07,0.14,0.97,0.97,0.25)
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
for i in range(4):
if i==0:
ii = np.where(matches['g_ditherId'] > 0)[0]
else:
if not bypriority: break
ii = np.where(matches['g_ditherId'] == i)[0]
jj = np.where(matches[errk][ii]>0)[0]
g5sig = ( matches[fluxk][ii[jj]] / matches[errk][ii[jj]] ) > 5.0
tot,_ = np.histogram(g_ref[ii],mbins)
det,_ = np.histogram(g_ref[ii[jj]],mbins)
det5,_ = np.histogram(g_ref[ii[jj[g5sig]]],mbins)
ax1.plot(mbins[:-1],det.astype(np.float)/tot,drawstyle='steps-pre',
c=['black','blue','green','DarkCyan'][i],lw=1.3,
label=['all','P1','P2','P3'][i])
ax2.plot(mbins[:-1],det5.astype(np.float)/tot,drawstyle='steps-pre',
c=['black','blue','green','DarkCyan'][i],lw=1.3,
label=['all','P1','P2','P3'][i])
ax1.set_xlabel(gname+' mag')
ax2.set_xlabel(gname+' mag')
ax1.set_ylabel('fraction detected')
ax2.set_ylabel('fraction detected 5 sig')
ax1.legend(loc='lower left')
if kwargs.get('saveplots',False):
figname = kwargs.get('figname','blah')
figext = kwargs.get('figtype','png')
fig1.savefig(figname+'_depth.'+figext)
fig2.savefig(figname+'_complete.'+figext)
##############################################################################
# #
# NDWFS #
# #
##############################################################################
def select_ndwfs_stars():
ndwfsdir = '/global/scratch2/sd/imcgreer/ndwfs/DR3/matchedFITS/'
dtype = [('number','i4'),('autoMag','3f4'),('autoMagErr','3f4'),
('ra','f8'),('dec','f8'),('rFWHM','f4'),('rClass','f4')]
starcat = []
rcols = ['NUMBER','MAG_AUTO','MAGERR_AUTO','ALPHA_J2000','DELTA_J2000',
'FWHM_IMAGE','CLASS_STAR']
cols = ['MAG_AUTO','MAGERR_AUTO']
for dec1 in range(32,36):
catfn = lambda b: 'NDWFS_%s_%d_%d_cat_m.fits.gz' % (b,dec1,dec1+1)
rfits = fitsio.FITS(ndwfsdir+catfn('R'))
bfits = fitsio.FITS(ndwfsdir+catfn('Bw'))
ifits = fitsio.FITS(ndwfsdir+catfn('I'))
w = rfits[1].where('FWHM_IMAGE < 7 && MAG_AUTO < 24.0 && FLAGS == 0')
print len(w)
rcat = rfits[1].read(rows=w,columns=rcols)
bcat = bfits[1].read(rows=w,columns=cols)
icat = ifits[1].read(rows=w,columns=cols)
stars = np.empty(len(w),dtype=dtype)
stars['number'] = rcat['NUMBER']
stars['ra'] = rcat['ALPHA_J2000']
stars['dec'] = rcat['DELTA_J2000']
stars['rFWHM'] = rcat['FWHM_IMAGE']
stars['rClass'] = rcat['CLASS_STAR']
for j,cat in enumerate([bcat,rcat,icat]):
stars['autoMag'][:,j] = cat['MAG_AUTO']
stars['autoMagErr'][:,j] = cat['MAGERR_AUTO']
starcat.append(stars)
starcat = np.concatenate(starcat)
fitsio.write(ndwfs_starfile,starcat,clobber=True)
def match_ndwfs_stars(matchRad=2.5):
stars = fitsio.read(ndwfs_starfile)
tiles = ndwfs_tiles(observed=True)
matches = match_objects(stars,tiles)
fitsio.write('ndwfs_match.fits',matches,clobber=True)
def ndwfs_depth(**kwargs):
kwargs.setdefault('figname','ndwfs')
ndwfsm = fitsio.read('ndwfs_match.fits')
Bw = ndwfsm['autoMag'][:,0]
Bw_minus_R = ndwfsm['autoMag'][:,0] - ndwfsm['autoMag'][:,1]
NDWFSg = np.choose(Bw_minus_R <= 1.45,
[ Bw - (0.23*Bw_minus_R + 0.25),
Bw - (0.38*Bw_minus_R + 0.05) ])
#
m = np.where( np.all(ndwfsm['autoMag'][:,:2]> 0,axis=1) &
np.all(ndwfsm['autoMag'][:,:2]<30,axis=1) )[0]
depth_plots(ndwfsm[m],NDWFSg[m],'NDWFS g-ish',**kwargs)
##############################################################################
# #
# CFHTLS #
# #
##############################################################################
def match_cfhtls_stars(matchRad=2.5,survey='wide'):
if survey=='wide':
stars = fitsio.read(cfhtlswide_starfile)
tiles = cfhtw3_tiles(observed=True)
fname = 'cfhtlswide'
else:
stars = fitsio.read(cfhtlsdeep_starfile)
fname = 'cfhtlsdeep'
matches = match_objects(stars,tiles)
fitsio.write('%s_match.fits'%fname,matches,clobber=True)
def cfhtls_depth(**kwargs):
kwargs.setdefault('figname','cfhtls')
cfhtlsm = fitsio.read('cfhtlswide_match.fits')
m = np.where( (cfhtlsm['psfMag'][:,1]> 0) &
(cfhtlsm['psfMag'][:,1]<30) )[0]
depth_plots(cfhtlsm[m],cfhtlsm['psfMag'][m,1],'CFHTLS g',bypriority=False,
**kwargs)
bok_gain_2015 = [ 1.3325, 1.5225, 1.415, 1.47 ]
bok_rn_2015 = [ 7.94, 9.54, 11.81, 8.91 ]
def cfhtls_depth_compare():
import itertools
import boketc
import bokdepth
tiles = cfhtw3_tiles(observed=True)
cfhtlsm = fitsio.read('stuff/cfhtlswide_match.fits')
m = np.where( (cfhtlsm['psfMag'][:,1]>20) &
(cfhtlsm['psfMag'][:,1]<30) )[0]
m = cfhtlsm[m]
for ccdNum in range(1,5):
ents = []
for ti,t in enumerate(tiles):
print ccdNum,ti,len(tiles)
ii = np.where( (m['g_tileId'] == t['tileId']) &
(m['g_ditherId'] == t['ditherId']) &
(m['g_ccdNum'] == ccdNum) &
(m['g_psfFlux'] != 0) )[0]
if len(ii)==0:
continue
impath = os.path.join(bass.rdxdir,t['utDate'],'ccdproc3',
t['fileName']+'_ccd%d_pv.fits'%ccdNum)
psfpath = os.path.join(bass.rdxdir,t['utDate'],'ccdproc3',
t['fileName']+'_ccd%d.ldac_cat.psf'%ccdNum)
if not os.path.exists(impath):
print ' ... %s does not exist, skipping' % impath
continue
gain = bok_gain_2015[ccdNum-1]
rdnoise = bok_rn_2015[ccdNum-1]
rmsADU,rmsEl,A,skyADU = bokdepth.calc_processed_image_rms(
impath,psfpath,
gain=gain, rdNoise=rdnoise,
retPars=True)
snr = m['g_psfFlux'][ii] / rmsADU
fwhm = 2*m['g_fluxRad']*1.1 * 0.455
skyADUps = skyADU / m['g_expTime'][ii]
nominal_snr = [ boketc.snr_singleexposure('g',m['psfMag'][i,1],
m['g_expTime'][i],
fwhm=fwhm[i],
skyADU=skyADUps[0],
profile='gaussian')
for i in ii ]
nominal_snr = np.array(nominal_snr)
# revise the ETC calculation using updated gain and RN values,
# as well as the noise-equivalent-gaussian determined from the
# pixel area of the PSF
NEG = np.sqrt(A/(4*np.pi)) * 0.455 * 2.355
revnominal_snr = [ boketc.snr_singleexposure('g',m['psfMag'][i,1],
m['g_expTime'][i],
fwhm=NEG,
skyADU=skyADUps[0],
profile='gaussian',
gain=gain,
rdnoise=rdnoise)
for i in ii ]
revnominal_snr = np.array(revnominal_snr)
objEl = m['g_psfFlux'][ii] * gain
est_snr = objEl / np.sqrt(objEl + rmsEl**2)
sex_snr = m['g_psfFlux'][ii] / m['g_psfFluxErr'][ii]
ents.extend( [ vals for vals in itertools.izip(ii,
m['psfMag'][ii,1],
[A]*len(ii),
skyADUps,fwhm,
snr,nominal_snr,
est_snr,sex_snr,
revnominal_snr) ] )
ents = np.array(ents,dtype=[('ii','i4'),('refMag','f4'),
('psfArea','f4'),('skyADUperSec','f4'),
('fwhm','f4'),
('snrRMS','f4'),('snrETC','f4'),
('snrSky','f4'),('snrSex','f4'),
('snrETCrev','f4')])
fitsio.write('cfhtlswide_snr.fits',ents,clobber=(ccdNum==1))
def plot_cfhtls_snr_ratio(snr1='snrRMS',snr2='snrETCrev'):
hdus = fitsio.FITS('cfhtlswide_snr.fits')
ccds = [hdu.read() for hdu in hdus[1:]]
plt.figure()
for pnum,ccd in enumerate(ccds,start=1):
ax = plt.subplot(2,2,pnum)
plt.hexbin(ccd['refMag'],ccd[snr1]/ccd[snr2],
extent=(20,23.5,0.5,1.5),cmap=plt.cm.Blues)
plt.axhline(1,c='r')
ax.xaxis.set_major_locator(ticker.MultipleLocator(1.0))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.2))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.05))
ax.set_title('CCD%d'%pnum)
plt.figtext(0.01,0.5,'SNR/SNR(ETC)',va='center',rotation='vertical')
plt.figtext(0.5,0.01,'g mag (CFHTLS)',ha='center')
##############################################################################
# #
# Pan-STARRS Medium Deeps #
# #
##############################################################################
def match_ps1mds(matchRad=2.5):
raise NotImplementedError
pstiles = panstarrs_md_tiles(observed=True)
for field,tiles in pstiles.items():
stars = fitsio.read(ps1md_starfile(field))
matches = match_objects(stars,tiles)
fitsio.write('ps1%s_match.fits'%field,matches,clobber=True)
##############################################################################
# #
# fake sources #
# #
##############################################################################
from astropy.io import fits
def fake_sdss_stars_on_tile(stars,tile,
nresample=200,magrange=(22.0,23.4),
stampSize=25,margin=50,aper='psf',
keepfakes=False,savestars=False):
pixlo = lambda _x: _x-stampSize/2
pixhi = lambda _x: _x-stampSize/2 + stampSize
fakemags = np.zeros(nresample*4,dtype=np.float32)
fakesnr = -np.ones_like(fakemags)
if aper=='auto':
magk,fluxk,errk = 'MAG_AUTO','FLUX_AUTO','FLUXERR_AUTO'
elif aper=='psf':
magk,fluxk,errk = 'MAG_PSF','FLUX_PSF','FLUXERR_PSF'
else:
raise ValueError
for ccdNum in range(1,5):
catpath = os.path.join(bass.rdxdir,tile['utDate'],'ccdproc3',
tile['fileName']+'_ccd%d.cat.fits'%ccdNum)
if not os.path.exists(catpath):
print ' ... %s does not exist, skipping' % catpath
continue
cat = fitsio.read(catpath)
impath = os.path.join(bass.rdxdir,tile['utDate'],'ccdproc3',
tile['fileName']+'_ccd%d.fits'%ccdNum)
_impath = impath.replace('.fits','_pv.fits')
fakeim = fits.open(_impath)
im = fakeim[0].data
nY,nX = im.shape
ii = np.where( (stars['ra']>cat['ALPHA_J2000'].min()+3e-3) &
(stars['ra']<cat['ALPHA_J2000'].max()-3e-3) &
(stars['dec']>cat['DELTA_J2000'].min()+3e-3) &
(stars['dec']<cat['DELTA_J2000'].max()-3e-3) )[0]
if len(ii)==0:
print 'no stars found on ccd #',ccdNum
continue
m1,m2 = srcor(stars['ra'][ii],stars['dec'][ii],
cat['ALPHA_J2000'],cat['DELTA_J2000'],2.5)
jj = np.where(cat['FLAGS'][m2] == 0)[0]
rindx = np.random.choice(len(jj),size=nresample,replace=True)
fakemag = magrange[0] + \
(magrange[1]-magrange[0])*np.random.random(nresample)
fscale = 10**(-0.4*(fakemag-stars['psfMag_g'][ii[m1[jj[rindx]]]]))
print 'matched %d/%d stars, max scale factor %.2e' % \
(len(m1),len(ii),fscale.max())
fakex = np.random.randint(margin,nX-margin,nresample)
fakey = np.random.randint(margin,nY-margin,nresample)
for x,y,fx,fy,fscl in zip(np.round(cat['X_IMAGE'][m2[jj[rindx]]]),
np.round(cat['Y_IMAGE'][m2[jj[rindx]]]),
fakex,fakey,fscale):
stamp = im[pixlo(y):pixhi(y),pixlo(x):pixhi(x)]
im[pixlo(fy):pixhi(fy),pixlo(fx):pixhi(fx)] += fscl*stamp
fakeimpath = impath.replace('.fits','_fake.fits')
fakecatpath = fakeimpath.replace('.fits','.cat.fits')
fakeim.writeto(fakeimpath,clobber=True)
bokextract.sextract(fakeimpath,frompv=False,redo=True,
withpsf=True,redopsf=False,
psfpath=impath.replace('.fits','.ldac_cat.psf'))
fakecat = fitsio.read(fakecatpath)
q1,q2 = srcorXY(fakex,fakey,fakecat['X_IMAGE'],fakecat['Y_IMAGE'],3.0)
snr = fakecat[fluxk][q2] / fakecat[errk][q2]
fakemags[nresample*(ccdNum-1):nresample*ccdNum] = fakemag
fakesnr[nresample*(ccdNum-1):nresample*ccdNum][q1] = snr
if True:
zpt = np.median(cat[magk][m2[jj]] - stars['psfMag_g'][ii[m1[jj]]])
zpt -= 25
foo = np.where(fakemag[q1] < 22.3)[0]
offset = np.median((-2.5*np.log10(fakecat[fluxk][q2[foo]]) - zpt) - fakemag[q1[foo]])
print 'fake star mag offset is ',offset
fakemags[nresample*(ccdNum-1):nresample*ccdNum] += offset
if False:
print ' --------- ZERO POINT CHECK -----------'
print cat[magk][m2[jj]][:10]
print -2.5*np.log10(cat[fluxk][m2[jj]])[:10] - zpt
print stars['psfMag_g'][ii[m1]][:10]
print ( (-2.5*np.log10(cat[fluxk][m2[jj]])[:10] - zpt) -
stars['psfMag_g'][ii[m1]][:10])
print -2.5*np.log10(fakecat[fluxk][q2[foo]]) - zpt
print fakemag[q1[foo]]
print ( (-2.5*np.log10(fakecat[fluxk][q2[foo]]) - zpt) -
fakemag[q1[foo]] )
print ( (-2.5*np.log10(fakecat[fluxk][q2[foo]]) - zpt) -
fakemag[q1[foo]] ).mean()
print snr[foo]
print
if not keepfakes:
os.unlink(fakeimpath)
os.unlink(fakecatpath)
if savestars:
np.savetxt(fakeimpath.replace('.fits','_stars.dat'),
np.vstack([fakemag,fakex,fakey]).transpose(),fmt='%9.3f')
return fakemags,fakesnr
def fake_ndwfs_stars(grange=(16.0,17.0),**kwargs):
aper = kwargs.setdefault('aper','psf')
magrange = kwargs.setdefault('magrange',(22.0,23.4))
nbins = 5
medges = np.linspace(magrange[0],magrange[1],nbins+1)
np.random.seed(1)
stars = fitsio.read('/global/scratch2/sd/imcgreer/ndwfs/sdss_bootes_gstars.fits')
fakedir = '/global/scratch2/sd/imcgreer/fakes/'
stars = stars[(stars['psfMag_g']>grange[0])&(stars['psfMag_g']<grange[1])]
tiles = ndwfs_tiles(observed=True)
summaryf = open(fakedir+'fakestars_%s_bytile.dat' % aper,'w')
summaryf.write('# %4s %1s %8s ' % ('tile','D','utdate'))
for i in range(nbins):
summaryf.write('%6.3f ' % ((medges[i]+medges[i+1])/2))
summaryf.write('\n')
for ti,tile in enumerate(tiles):
print 'faking stars in tile %d/%d' % (ti+1,len(tiles))
mag,snr = fake_sdss_stars_on_tile(stars,tile,**kwargs)
np.savetxt(fakedir+'fakestars_%s_%05d_%d_%s.dat' %
(aper,tile['tileId'],tile['ditherId'],tile['utDate']),
np.vstack([mag,snr]).transpose(),fmt='%8.3f')
summaryf.write(' %05d %1d %8s ' %
(tile['tileId'],tile['ditherId'],tile['utDate']))
ii = np.digitize(mag,medges)
# could divide by CCD
for i in range(nbins):
jj = np.where(ii==i+1)[0]
frac = np.sum(snr[jj]>5.0) / float(len(jj))
summaryf.write('%6.3f ' % frac)
summaryf.write('\n')
summaryf.close()
def ndwfs_sdss_matches():
''' for checking linearity '''
import basslog
stars = fitsio.read('/global/scratch2/sd/imcgreer/ndwfs/sdss_bootes_gstars.fits')
logs = basslog.load_Bok_logs('./logs/')
tiles = ndwfs_tiles(observed=True)
tiledb = bass.load_tiledb()
tid = np.array([int(tid) for tid in tiledb['TID']])
i1 = 0
m = np.zeros(1e5,dtype=[('sdss_id','i4'),('sdss_g_mag','f4'),
('bass_g_mag','f4'),('bass_g_err','f4'),
('bass_expTime','f4'),('bass_skyADU','f4'),
('bass_airmass','f4'),('bass_ebv','f4'),
('bass_ccdNum','i4'),('bass_ditherId','i4'),
('bass_fluxMax','f4'),('bass_FWHM','f4')])
for ti,tile in enumerate(tiles):
print 'tile %d/%d [%d]' % (ti+1,len(tiles),i1)
for ccdNum in range(1,5):
impath = os.path.join(bass.rdxdir,tile['utDate'],'ccdproc3',
tile['fileName']+'_ccd%d.fits'%ccdNum)
if not os.path.exists(impath):
print ' ... %s does not exist, skipping' % impath
continue
h = fitsio.read_header(impath)
sky = h['SKYVAL']
catpath = os.path.join(bass.rdxdir,tile['utDate'],'ccdproc3',
tile['fileName']+'_ccd%d.cat.fits'%ccdNum)
if not os.path.exists(catpath):
print ' ... %s does not exist, skipping' % catpath
continue
cat = fitsio.read(catpath)
ii = np.where( (stars['ra']>cat['ALPHA_J2000'].min()+3e-3) &
(stars['ra']<cat['ALPHA_J2000'].max()-3e-3) &
(stars['dec']>cat['DELTA_J2000'].min()+3e-3) &
(stars['dec']<cat['DELTA_J2000'].max()-3e-3) )[0]
if len(ii)==0:
print 'no stars found on ccd #',ccdNum
continue
m1,m2 = srcor(stars['ra'][ii],stars['dec'][ii],
cat['ALPHA_J2000'],cat['DELTA_J2000'],2.5)
jj = np.where(cat['FLAGS'][m2] == 0)[0]
i2 = i1 + len(jj)
m['sdss_id'][i1:i2] = ii[m1[jj]]
m['sdss_g_mag'][i1:i2] = stars['psfMag_g'][ii[m1[jj]]]
m['bass_g_mag'][i1:i2] = cat['MAG_PSF'][m2[jj]]
m['bass_g_err'][i1:i2] = cat['MAGERR_PSF'][m2[jj]]
m['bass_fluxMax'][i1:i2] = cat['FLUX_MAX'][m2[jj]]
m['bass_FWHM'][i1:i2] = np.median(cat['FWHM_IMAGE'][m2[jj]])
m['bass_expTime'][i1:i2] = tile['expTime']
i = np.where(logs[tile['utDate']]['fileName'] ==
tile['fileName'])[0][0]
m['bass_airmass'][i1:i2] = logs[tile['utDate']]['airmass'][i]
m['bass_ebv'][i1:i2] = tiledb['EBV'][tid==tile['tileId']][0]
m['bass_ccdNum'][i1:i2] = ccdNum
m['bass_ditherId'][i1:i2] = tile['ditherId']
m['bass_skyADU'][i1:i2] = sky
i1 = i2
m = m[:i1]
outdir = '/project/projectdirs/cosmo/staging/bok/ian/'
fitsio.write(outdir+'ndwfs_sdss.fits',m,clobber=True)
def get_phototiles_info():
import boklog
logs = boklog.load_Bok_logs('./logs/')
tiledb = bass.load_tiledb()
tid = np.array([int(tid) for tid in tiledb['TID']])
ccdNum = 1
photinfof = open('photo_tiles_info.txt','w')
photinfof.write('# %6s %10s %7s %7s %7s %10s %8s %7s\n' %
('UTD','file','airmass','E(B-V)','FWHMpix','skyADU','zpt','texp'))
for ti,tiles in enumerate([cfhtw3_tiles(),ndwfs_tiles()]):
if ti==0:
refcat = fitsio.read(cfhtlswide_starfile)
ii = np.where((refcat['psfMag'][:,1]>17) &
(refcat['psfMag'][:,1]<18.5))[0]
ref_ra = refcat['ra'][ii]
ref_dec = refcat['dec'][ii]
ref_mag = refcat['psfMag'][ii,1]
#ref_mag = refcat['psfMag'][ii,1] - A_ext['g']*refcat['E(B-V)'][ii]
else:
refcat = fitsio.read(bootes_sdss_starfile)
ii = np.where((refcat['psfMag_g']>16) &
(refcat['psfMag_g']<18.5))[0]
ref_ra = refcat['ra'][ii]
ref_dec = refcat['dec'][ii]
ref_mag = refcat['psfMag_g'][ii]
#ref_mag = refcat['psfMag_g'][ii] - refcat['extinction_g'][ii]
for tj,t in enumerate(tiles):
if t['ditherId'] != 1:
continue
# get E(B-V) from tile database
ebv = tiledb['EBV'][tid==t['tileId']][0]
# get conditions (airmass,exptime) from observing logs
try:
i = np.where(logs[t['utDate']]['fileName']==t['fileName'])[0][0]
except:
continue
airmass = logs[t['utDate']]['airmass'][i]
exptime = logs[t['utDate']]['expTime'][i]
# get sky value in ADU from FITS headers
impath = os.path.join(bass.rdxdir,t['utDate'],'ccdproc3',
t['fileName']+'_ccd%d.fits'%ccdNum)
h = fitsio.read_header(impath)
sky = h['SKYVAL']
# get FWHM and zero point from catalogs
catpath = os.path.join(bass.rdxdir,t['utDate'],'ccdproc3',
t['fileName']+'_ccd%d.cat.fits'%ccdNum)
cat = fitsio.read(catpath)
ii = np.where( (ref_ra>cat['ALPHA_J2000'].min()+3e-3) &
(ref_ra<cat['ALPHA_J2000'].max()-3e-3) &
(ref_dec>cat['DELTA_J2000'].min()+3e-3) &
(ref_dec<cat['DELTA_J2000'].max()-3e-3) )[0]
if len(ii)==0:
continue
m1,m2 = srcor(ref_ra[ii],ref_dec[ii],
cat['ALPHA_J2000'],cat['DELTA_J2000'],2)
if len(m1)==0:
continue
m1 = ii[m1]
ii = np.where(cat['FLAGS'][m2]==0)[0]
m1,m2 = m1[ii],m2[ii]
if len(m1)<5:
continue
print len(ii),' stars on tile ',t['utDate'],t['fileName']
fwhm = np.median(cat['FWHM_IMAGE'][m2])
zpt = 25 - np.median(cat['MAG_AUTO'][m2] - ref_mag[m1]) - \
2.5*np.log10(exptime)
photinfof.write('%8s %10s %7.2f %7.3f %7.2f %10.2f %8.3f %7.1f\n' %
(t['utDate'],t['fileName'],airmass,ebv,fwhm,sky,zpt,exptime))
photinfof.close()
def phototiles_stats(doplots=True):
import boketc
gain = boketc.G
pxscl = boketc.p
k = boketc.k_ext['g']
A = boketc.A_ext['g']
tiledat = ascii_io.read('photo_tiles_info.txt')
sky_ADUs = tiledat['skyADU'] / tiledat['texp']
sky_eps = sky_ADUs * gain
sky_magasec2 = -2.5*np.log10(sky_ADUs*pxscl**-2) + tiledat['zpt']
print sky_ADUs.mean(),sky_eps.mean(),sky_magasec2.mean()
zp0 = tiledat['zpt'] - k*(tiledat['airmass']-1) #- A*tiledat['E(B-V)']
print zp0.mean()
fwhm_asec = tiledat['FWHMpix'] * pxscl
if doplots:
fig = plt.figure(figsize=(8,6))
ax1 = plt.subplot(2,2,1)
ax1.hist(zp0)
#ax1.axvline(boketc.bok_zpt0_am00['g'],c='r',lw=2)
ax1.axvline(boketc.bok_zpt0_am10['g'],c='r',lw=2)
ax1 = plt.subplot(2,2,2)
ax1.hist(sky_magasec2)
ax1.axvline(boketc.kpno_sky_lun0['g'],c='r',lw=2)
ax1 = plt.subplot(2,2,3)
ax1.hist(fwhm_asec)
ax1.axvline(boketc.bok_medianFWHM['g'],c='r',lw=2)
if __name__=='__main__':
import sys
if sys.argv[1]=='match_ndwfs':
match_ndwfs_stars()
elif sys.argv[1]=='match_cfhtlswide':
match_cfhtls_stars(survey='wide')
elif sys.argv[1]=='fake_ndwfs':
if len(sys.argv)==2 or 'psf' in sys.argv[2:]:
aper = 'psf'
elif 'auto' in sys.argv[2:]:
aper = 'auto'
fake_ndwfs_stars(aper=aper)
elif sys.argv[1]=='photo_info':
get_phototiles_info()
else:
raise ValueError
| bsd-3-clause |
mne-tools/mne-python | mne/viz/tests/test_epochs.py | 2 | 18317 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Jaakko Leppakangas <[email protected]>
# Daniel McCloy <[email protected]>
#
# License: Simplified BSD
import os.path as op
import numpy as np
import pytest
import matplotlib.pyplot as plt
from mne import (read_events, Epochs, pick_types, read_cov, create_info,
EpochsArray)
from mne.channels import read_layout
from mne.fixes import _close_event
from mne.io import read_raw_fif, read_raw_ctf
from mne.utils import _click_ch_name
from mne.viz import plot_drop_log
from mne.viz.utils import _fake_click
from mne.datasets import testing
from mne.event import make_fixed_length_events
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 1.0
layout = read_layout('Vectorview-all')
test_base_dir = testing.data_path(download=False)
ctf_fname = op.join(test_base_dir, 'CTF', 'testdata_ctf.ds')
def _get_epochs(stop=5, meg=True, eeg=False, n_chan=20):
"""Get epochs."""
raw = read_raw_fif(raw_fname)
events = read_events(event_name)
picks = pick_types(raw.info, meg=meg, eeg=eeg, stim=False,
ecg=False, eog=False, exclude='bads')
# Use a subset of channels for plotting speed
picks = np.round(np.linspace(0, len(picks) + 1, n_chan)).astype(int)
with pytest.warns(RuntimeWarning, match='projection'):
epochs = Epochs(raw, events[:stop], event_id, tmin, tmax, picks=picks,
proj=False, preload=False)
epochs.info.normalize_proj() # avoid warnings
return epochs
@pytest.fixture()
def epochs():
"""Get minimal, pre-loaded epochs data suitable for most tests."""
return _get_epochs().load_data()
def test_plot_epochs_not_preloaded():
"""Test plotting non-preloaded epochs."""
epochs = _get_epochs()
assert epochs._data is None
epochs.plot()
assert epochs._data is None
def test_plot_epochs_basic(epochs, capsys):
"""Test epoch plotting."""
assert len(epochs.events) == 1
epochs.info['lowpass'] = 10. # allow heavy decim during plotting
fig = epochs.plot(scalings=None, title='Epochs')
ticks = [x.get_text() for x in fig.mne.ax_main.get_xticklabels(minor=True)]
assert ticks == ['2']
plt.close('all')
# covariance / whitening
cov = read_cov(cov_fname)
assert len(cov['names']) == 366 # all channels
assert cov['bads'] == []
assert epochs.info['bads'] == [] # all good
with pytest.warns(RuntimeWarning, match='projection'):
epochs.plot(noise_cov=cov)
plt.close('all')
# add a channel to the epochs.info['bads']
epochs.info['bads'] = [epochs.ch_names[0]]
with pytest.warns(RuntimeWarning, match='projection'):
epochs.plot(noise_cov=cov)
plt.close('all')
# add a channel to cov['bads']
cov['bads'] = [epochs.ch_names[1]]
with pytest.warns(RuntimeWarning, match='projection'):
epochs.plot(noise_cov=cov)
plt.close('all')
# have a data channel missing from the covariance
cov['names'] = cov['names'][:306]
cov['data'] = cov['data'][:306][:306]
with pytest.warns(RuntimeWarning, match='projection'):
epochs.plot(noise_cov=cov)
plt.close('all')
# other options
fig = epochs[0].plot(picks=[0, 2, 3], scalings=None)
fig.canvas.key_press_event('escape')
with pytest.raises(ValueError, match='No appropriate channels found'):
epochs.plot(picks=[])
# gh-5906
epochs = _get_epochs(None).load_data()
epochs.load_data()
assert len(epochs) == 7
epochs.info['bads'] = [epochs.ch_names[0]]
capsys.readouterr()
# test title error handling
with pytest.raises(TypeError, match='title must be None or a string, got'):
epochs.plot(title=7)
# test auto-generated title, and selection mode
epochs.plot(group_by='selection', title='')
@pytest.mark.parametrize('scalings', (dict(mag=1e-12, grad=1e-11, stim='auto'),
None, 'auto'))
def test_plot_epochs_scalings(epochs, scalings):
"""Test the valid options for scalings."""
epochs.plot(scalings=scalings)
def test_plot_epochs_colors(epochs):
"""Test epoch_colors, for compatibility with autoreject."""
epoch_colors = [['r'] * len(epochs.ch_names) for _ in
range(len(epochs.events))]
epochs.plot(epoch_colors=epoch_colors)
with pytest.raises(ValueError, match='length equal to the number of epo'):
epochs.plot(epoch_colors=[['r'], ['b']]) # epochs obj has only 1 epoch
with pytest.raises(ValueError, match=r'epoch colors for epoch \d+ has'):
epochs.plot(epoch_colors=[['r']]) # need 1 color for each channel
# also test event_color
epochs.plot(event_color='b')
def test_plot_epochs_scale_bar(epochs):
"""Test scale bar for epochs."""
fig = epochs.plot()
fig.canvas.key_press_event('s') # default is to not show scalebars
ax = fig.mne.ax_main
# only empty vline-text, mag & grad in this instance
assert len(ax.texts) == 3
texts = tuple(t.get_text().strip() for t in ax.texts)
wants = ('', '800.0 fT/cm', '2000.0 fT')
assert texts == wants
def test_plot_epochs_clicks(epochs, capsys):
"""Test plot_epochs mouse interaction."""
fig = epochs.plot(events=epochs.events)
data_ax = fig.mne.ax_main
x = fig.mne.traces[0].get_xdata()[3]
y = fig.mne.traces[0].get_ydata()[3]
n_epochs = len(epochs)
epoch_num = fig.mne.inst.selection[0]
# test (un)marking bad epochs
_fake_click(fig, data_ax, [x, y], xform='data') # mark a bad epoch
assert epoch_num in fig.mne.bad_epochs
_fake_click(fig, data_ax, [x, y], xform='data') # unmark it
assert epoch_num not in fig.mne.bad_epochs
_fake_click(fig, data_ax, [x, y], xform='data') # mark it bad again
assert epoch_num in fig.mne.bad_epochs
# test vline
fig.canvas.key_press_event('escape') # close and drop epochs
_close_event(fig) # XXX workaround, MPL Agg doesn't trigger close event
assert(n_epochs - 1 == len(epochs))
# test marking bad channels
epochs = _get_epochs(None).load_data() # need more than 1 epoch this time
fig = epochs.plot(n_epochs=3)
data_ax = fig.mne.ax_main
first_ch = data_ax.get_yticklabels()[0].get_text()
assert first_ch not in fig.mne.info['bads']
_click_ch_name(fig, ch_index=0, button=1) # click ch name to mark bad
assert first_ch in fig.mne.info['bads']
# test clicking scrollbars
_fake_click(fig, fig.mne.ax_vscroll, [0.5, 0.5])
_fake_click(fig, fig.mne.ax_hscroll, [0.5, 0.5])
# test moving bad epoch offscreen
fig.canvas.key_press_event('right') # move right
x = fig.mne.traces[0].get_xdata()[-3]
y = fig.mne.traces[0].get_ydata()[-3]
_fake_click(fig, data_ax, [x, y], xform='data') # mark a bad epoch
fig.canvas.key_press_event('left') # move back
out, err = capsys.readouterr()
assert 'out of bounds' not in out
assert 'out of bounds' not in err
fig.canvas.key_press_event('escape')
_close_event(fig) # XXX workaround, MPL Agg doesn't trigger close event
assert len(epochs) == 6
# test rightclick → image plot
fig = epochs.plot()
_click_ch_name(fig, ch_index=0, button=3) # show image plot
assert len(fig.mne.child_figs) == 1
# test scroll wheel
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
def test_plot_epochs_keypresses():
"""Test plot_epochs keypress interaction."""
epochs = _get_epochs(stop=15).load_data() # we need more than 1 epoch
epochs.drop_bad(dict(mag=4e-12)) # for histogram plot coverage
fig = epochs.plot(n_epochs=3)
data_ax = fig.mne.ax_main
# make sure green vlines are visible first (for coverage)
sample_idx = len(epochs.times) // 2 # halfway through the first epoch
x = fig.mne.traces[0].get_xdata()[sample_idx]
y = (fig.mne.traces[0].get_ydata()[sample_idx]
+ fig.mne.traces[1].get_ydata()[sample_idx]) / 2
_fake_click(fig, data_ax, [x, y], xform='data') # click between traces
# test keys
keys = ('pagedown', 'down', 'up', 'down', 'right', 'left', '-', '+', '=',
'd', 'd', 'pageup', 'home', 'shift+right', 'end', 'shift+left',
'z', 'z', 's', 's', 'f11', '?', 'h', 'j', 'b')
for key in keys * 2: # test twice → once in normal, once in butterfly view
fig.canvas.key_press_event(key)
_fake_click(fig, data_ax, [x, y], xform='data', button=3) # remove vlines
def test_plot_overlapping_epochs_with_events():
"""Test drawing of event lines in overlapping epochs."""
data = np.zeros(shape=(3, 2, 100)) # 3 epochs, 2 channels, 100 samples
sfreq = 100
info = create_info(
ch_names=('a', 'b'), ch_types=('misc', 'misc'), sfreq=sfreq)
# 90% overlap, so all 3 events should appear in all 3 epochs when plotted:
events = np.column_stack(([50, 60, 70], [0, 0, 0], [1, 2, 3]))
epochs = EpochsArray(data, info, tmin=-0.5, events=events)
fig = epochs.plot(events=events, picks='misc')
assert len(fig.mne.event_lines.get_segments()) == 9
def test_epochs_plot_sensors(epochs):
"""Test sensor plotting."""
epochs.plot_sensors()
def test_plot_epochs_nodata():
"""Test plotting of epochs when no data channels are present."""
data = np.random.RandomState(0).randn(10, 2, 1000)
info = create_info(2, 1000., 'stim')
epochs = EpochsArray(data, info)
with pytest.raises(ValueError, match='consider passing picks explicitly'):
epochs.plot()
def test_plot_epochs_image(epochs):
"""Test plotting of epochs image.
Note that some of these tests that should pass are triggering MPL
UserWarnings about tight_layout not being applied ("tight_layout cannot
make axes width small enough to accommodate all axes decorations"). Calling
`plt.close('all')` just before the offending test seems to prevent this
warning, though it's unclear why.
"""
figs = epochs.plot_image()
assert len(figs) == 2 # one fig per ch_type (test data has mag, grad)
assert len(plt.get_fignums()) == 2
figs = epochs.plot_image()
assert len(figs) == 2
assert len(plt.get_fignums()) == 4 # should create new figures
epochs.plot_image(picks='mag', sigma=0.1)
epochs.plot_image(picks=[0, 1], combine='mean',
ts_args=dict(show_sensors=False))
epochs.plot_image(picks=[1], order=[0], overlay_times=[0.1], vmin=0.01,
title='test')
plt.close('all')
epochs.plot_image(picks=[1], overlay_times=[0.1], vmin=-0.001, vmax=0.001)
plt.close('all')
epochs.plot_image(picks=[1], vmin=lambda x: x.min())
# test providing figure
fig, axs = plt.subplots(3, 1)
epochs.plot_image(picks=[1], fig=fig)
# test providing axes instance
epochs.plot_image(picks=[1], axes=axs[0], evoked=False, colorbar=False)
plt.close('all')
# test order=callable
epochs.plot_image(picks=[0, 1],
order=lambda times, data: np.arange(len(data))[::-1])
# test warning
with pytest.warns(RuntimeWarning, match='Only one channel in group'):
epochs.plot_image(picks=[1], combine='mean')
# group_by should be a dict
with pytest.raises(TypeError, match="dict or None"):
epochs.plot_image(group_by='foo')
# units and scalings keys must match
with pytest.raises(ValueError, match='Scalings and units must have the'):
epochs.plot_image(units=dict(hi=1), scalings=dict(ho=1))
plt.close('all')
# test invert_y
epochs.plot_image(ts_args=dict(invert_y=True))
# can't combine different sensor types
with pytest.raises(ValueError, match='Cannot combine sensors of differ'):
epochs.plot_image(group_by=dict(foo=[0, 1, 2]))
# can't pass both fig and axes
with pytest.raises(ValueError, match='one of "fig" or "axes" must be'):
epochs.plot_image(fig='foo', axes='bar')
# wrong number of axes in fig
with pytest.raises(ValueError, match='"fig" must contain . axes, got .'):
epochs.plot_image(fig=plt.figure())
# only 1 group allowed when fig is passed
with pytest.raises(ValueError, match='"group_by" can only have one group'):
fig, axs = plt.subplots(3, 1)
epochs.plot_image(fig=fig, group_by=dict(foo=[0, 1], bar=[5, 6]))
del fig, axs
plt.close('all')
# must pass correct number of axes (1, 2, or 3)
with pytest.raises(ValueError, match='is a list, can only plot one group'):
fig, axs = plt.subplots(1, 3)
epochs.plot_image(axes=axs)
for length, kwargs in ([3, dict()],
[2, dict(evoked=False)],
[2, dict(colorbar=False)],
[1, dict(evoked=False, colorbar=False)]):
fig, axs = plt.subplots(1, length + 1)
epochs.plot_image(picks='mag', axes=axs[:length], **kwargs)
with pytest.raises(ValueError, match='"axes" must be length ., got .'):
epochs.plot_image(picks='mag', axes=axs, **kwargs)
plt.close('all')
# mismatch between axes dict keys and group_by dict keys
with pytest.raises(ValueError, match='must match the keys in "group_by"'):
epochs.plot_image(axes=dict())
# wrong number of axes in dict
match = 'each value in "axes" must be a list of . axes, got .'
with pytest.raises(ValueError, match=match):
epochs.plot_image(axes=dict(foo=axs[:2], bar=axs[:3]),
group_by=dict(foo=[0, 1], bar=[5, 6]))
# bad value of "combine"
with pytest.raises(ValueError, match='"combine" must be None, a callable'):
epochs.plot_image(combine='foo')
# mismatched picks and overlay_times
with pytest.raises(ValueError, match='size of overlay_times parameter'):
epochs.plot_image(picks=[1], overlay_times=[0.1, 0.2])
# bad overlay times
with pytest.warns(RuntimeWarning, match='fall outside'):
epochs.plot_image(overlay_times=[999.])
# mismatched picks and order
with pytest.raises(ValueError, match='must match the length of the data'):
epochs.plot_image(picks=[1], order=[0, 1])
plt.close('all')
def test_plot_drop_log():
"""Test plotting a drop log."""
epochs = _get_epochs() # not loaded
with pytest.raises(ValueError, match='bad epochs have not yet been'):
epochs.plot_drop_log()
epochs.drop_bad()
epochs.plot_drop_log()
plot_drop_log((('One',), (), ()))
plot_drop_log((('One',), ('Two',), ()))
plot_drop_log((('One',), ('One', 'Two'), ()))
for arg in ([], ([],), (1,)):
with pytest.raises(TypeError, match='tuple of tuple of str'):
plot_drop_log(arg)
plt.close('all')
def test_plot_psd_epochs(epochs):
"""Test plotting epochs psd (+topomap)."""
epochs.plot_psd(average=True, spatial_colors=False)
epochs.plot_psd(average=False, spatial_colors=True)
epochs.plot_psd(average=False, spatial_colors=False)
# test plot_psd_topomap errors
with pytest.raises(RuntimeError, match='No frequencies in band'):
epochs.plot_psd_topomap(bands=[(0, 0.01, 'foo')])
plt.close('all')
# test defaults
fig = epochs.plot_psd_topomap()
assert len(fig.axes) == 10 # default: 5 bands (δ, θ, α, β, γ) + colorbars
# test joint vlim
fig = epochs.plot_psd_topomap(vlim='joint')
vmin_0 = fig.axes[0].images[0].norm.vmin
vmax_0 = fig.axes[0].images[0].norm.vmax
assert all(vmin_0 == ax.images[0].norm.vmin for ax in fig.axes[1:5])
assert all(vmax_0 == ax.images[0].norm.vmax for ax in fig.axes[1:5])
# test support for single-bin bands
fig = epochs.plot_psd_topomap(bands=[(20, '20 Hz'), (15, 25, '15-25 Hz')])
# test with a flat channel
err_str = 'for channel %s' % epochs.ch_names[2]
epochs.get_data()[0, 2, :] = 0
for dB in [True, False]:
with pytest.warns(UserWarning, match=err_str):
epochs.plot_psd(dB=dB)
def test_plot_psdtopo_nirs(fnirs_epochs):
"""Test plotting of PSD topography for nirs data."""
bands = [(0.2, '0.2 Hz'), (0.4, '0.4 Hz'), (0.8, '0.8 Hz')]
fig = fnirs_epochs.plot_psd_topomap(bands=bands)
assert len(fig.axes) == 6 # 3 band x (plot + cmap)
@testing.requires_testing_data
def test_plot_epochs_ctf():
"""Test of basic CTF plotting."""
raw = read_raw_ctf(ctf_fname, preload=True)
raw.pick_channels(['UDIO001', 'UPPT001', 'SCLK01-177',
'BG1-4304', 'MLC11-4304', 'MLC11-4304',
'EEG058', 'UADC007-4302'])
evts = make_fixed_length_events(raw)
epochs = Epochs(raw, evts, preload=True)
epochs.plot()
plt.close('all')
# test butterfly
fig = epochs.plot(butterfly=True)
keys = ('b', 'b', 'pagedown', 'down', 'up', 'down', 'right', 'left', '-',
'+', '=', 'd', 'd', 'pageup', 'home', 'end', 'z', 'z', 's', 's',
'f11', '?', 'h', 'j')
for key in keys:
fig.canvas.key_press_event(key)
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
fig.canvas.resize_event()
fig.canvas.key_press_event('escape') # close and drop epochs
@testing.requires_testing_data
def test_plot_psd_epochs_ctf():
"""Test plotting CTF epochs psd (+topomap)."""
raw = read_raw_ctf(ctf_fname, preload=True)
evts = make_fixed_length_events(raw)
epochs = Epochs(raw, evts, preload=True)
pytest.raises(RuntimeError, epochs.plot_psd_topomap,
bands=[(0, 0.01, 'foo')]) # no freqs in range
epochs.plot_psd_topomap()
# EEG060 is flat in this dataset
for dB in [True, False]:
with pytest.warns(UserWarning, match='for channel EEG060'):
epochs.plot_psd(dB=dB)
epochs.drop_channels(['EEG060'])
epochs.plot_psd(spatial_colors=False, average=False)
| bsd-3-clause |
fabioticconi/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
swharden/SWHLab | doc/uses/EPSCs-and-IPSCs/smooth histogram method/11.py | 1 | 1764 | """
MOST OF THIS CODE IS NOT USED
ITS COPY/PASTED AND LEFT HERE FOR CONVENIENCE
"""
import os
import sys
# in case our module isn't installed (running from this folder)
if not os.path.abspath('../../../') in sys.path:
sys.path.append('../../../') # helps spyder get docs
import swhlab
import swhlab.common as cm
import matplotlib.pyplot as plt
import numpy as np
import warnings # suppress VisibleDeprecationWarning warning
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
def analyzeSweep(abf,label=None):
Y=abf.sweepYsmartbase()[abf.pointsPerSec*.5:]
#Y=abf.sweepY[abf.pointsPerSec*.5:]
AV,SD=np.average(Y),np.std(Y)
dev=5 # number of stdevs from the avg to set the range
R1,R2=[(AV-SD)*dev,(AV+SD)*dev]
nBins=1000
hist,bins=np.histogram(Y,bins=nBins,range=[R1,R2],density=True)
histSmooth=abf.convolve(hist,cm.kernel_gaussian(nBins/5))
peakI=np.where(histSmooth==max(histSmooth))[0][0]
# center the peak at 0 pA
hist=np.roll(hist,int(nBins/2-peakI))
histSmooth=np.roll(histSmooth,int(nBins/2-peakI))
# normalize height to 1
hist,histSmooth=hist/max(histSmooth),histSmooth/max(histSmooth)
plt.plot(histSmooth,label=label,lw=3,alpha=.5)
if __name__=="__main__":
#abfFile=R"C:\Users\scott\Documents\important\demodata\abfs\16d07022.abf"
abfFile=R"X:\Data\2P01\2016\2016-09-01 PIR TGOT\16d07022.abf"
abf=swhlab.ABF(abfFile)
abf.kernel=abf.kernel_gaussian(sizeMS=500) # kernel for smart baseline
plt.figure(figsize=(10,10))
# for sweep in range(abf.sweeps):
for sweep in [175,200,375]:
abf.setsweep(sweep)
analyzeSweep(abf,label=str(sweep))
print("Sweep",sweep)
plt.legend()
plt.show()
print("DONE")
| mit |
belltailjp/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
bnaul/scikit-learn | examples/classification/plot_classifier_comparison.py | 34 | 5239 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0)),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1, max_iter=1000),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
| bsd-3-clause |
kdmurray91/scikit-bio | skbio/io/format/ordination.py | 7 | 14424 | r"""
Ordination results format (:mod:`skbio.io.format.ordination`)
=============================================================
.. currentmodule:: skbio.io.format.ordination
The ordination results file format (``ordination``) stores the results of an
ordination method in a human-readable, text-based format. The format supports
storing the results of various ordination methods available in scikit-bio,
including (but not necessarily limited to) PCoA, CA, RDA, and CCA.
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |:mod:`skbio.stats.ordination.OrdinationResults` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
The format is text-based, consisting of six attributes that describe the
ordination results:
- ``Eigvals``: 1-D
- ``Proportion explained``: 1-D
- ``Species``: 2-D
- ``Site``: 2-D
- ``Biplot``: 2-D
- ``Site constraints``: 2-D
The attributes in the file *must* be in this order.
Each attribute is defined in its own section of the file, where sections are
separated by a blank (or whitespace-only) line. Each attribute begins with a
header line, which contains the attribute's name (as listed above), followed by
a tab character, followed by one or more tab-separated dimensions (integers)
that describe the shape of the attribute's data.
The attribute's data follows its header line, and is stored in tab-separated
format. ``Species``, ``Site``, and ``Site constraints`` store species and site
IDs, respectively, as the first column, followed by the 2-D data array.
An example of this file format might look like::
Eigvals<tab>4
0.36<tab>0.18<tab>0.07<tab>0.08
Proportion explained<tab>4
0.46<tab>0.23<tab>0.10<tab>0.10
Species<tab>9<tab>4
Species0<tab>0.11<tab>0.28<tab>-0.20<tab>-0.00
Species1<tab>0.14<tab>0.30<tab>0.39<tab>-0.14
Species2<tab>-1.01<tab>0.09<tab>-0.19<tab>-0.10
Species3<tab>-1.03<tab>0.10<tab>0.22<tab>0.22
Species4<tab>1.05<tab>0.53<tab>-0.43<tab>0.22
Species5<tab>0.99<tab>0.57<tab>0.67<tab>-0.38
Species6<tab>0.25<tab>-0.17<tab>-0.20<tab>0.43
Species7<tab>0.14<tab>-0.85<tab>-0.01<tab>0.05
Species8<tab>0.41<tab>-0.70<tab>0.21<tab>-0.69
Site<tab>10<tab>4
Site0<tab>0.71<tab>-3.08<tab>0.21<tab>-1.24
Site1<tab>0.58<tab>-3.00<tab>-0.94<tab>2.69
Site2<tab>0.76<tab>-3.15<tab>2.13<tab>-3.11
Site3<tab>1.11<tab>1.07<tab>-1.87<tab>0.66
Site4<tab>-0.97<tab>-0.06<tab>-0.69<tab>-0.61
Site5<tab>1.04<tab>0.45<tab>-0.63<tab>0.28
Site6<tab>-0.95<tab>-0.08<tab>0.13<tab>-0.42
Site7<tab>0.94<tab>-0.10<tab>0.52<tab>-0.00
Site8<tab>-1.14<tab>0.49<tab>0.47<tab>1.17
Site9<tab>1.03<tab>1.03<tab>2.74<tab>-1.28
Biplot<tab>3<tab>3
-0.16<tab>0.63<tab>0.76
-0.99<tab>0.06<tab>-0.04
0.18<tab>-0.97<tab>0.03
Site constraints<tab>10<tab>4
Site0<tab>0.69<tab>-3.08<tab>-0.32<tab>-1.24
Site1<tab>0.66<tab>-3.06<tab>0.23<tab>2.69
Site2<tab>0.63<tab>-3.04<tab>0.78<tab>-3.11
Site3<tab>1.10<tab>0.50<tab>-1.55<tab>0.66
Site4<tab>-0.97<tab>0.06<tab>-1.12<tab>-0.61
Site5<tab>1.05<tab>0.53<tab>-0.43<tab>0.28
Site6<tab>-1.02<tab>0.10<tab>-0.00<tab>-0.42
Site7<tab>0.99<tab>0.57<tab>0.67<tab>-0.00
Site8<tab>-1.08<tab>0.13<tab>1.11<tab>1.17
Site9<tab>0.94<tab>0.61<tab>1.79<tab>-1.28
If a given result attribute is not present (e.g. ``Biplot``), it should still
be defined and declare its dimensions as 0. For example::
Biplot<tab>0<tab>0
All attributes are optional except for ``Eigvals``.
Examples
--------
Assume we have the following tab-delimited text file storing the
ordination results in ``ordination`` format::
Eigvals<tab>4
0.36<tab>0.18<tab>0.07<tab>0.08
Proportion explained<tab>4
0.46<tab>0.23<tab>0.10<tab>0.10
Species<tab>9<tab>4
Species0<tab>0.11<tab>0.28<tab>-0.20<tab>-0.00
Species1<tab>0.14<tab>0.30<tab>0.39<tab>-0.14
Species2<tab>-1.01<tab>0.09<tab>-0.19<tab>-0.10
Species3<tab>-1.03<tab>0.10<tab>0.22<tab>0.22
Species4<tab>1.05<tab>0.53<tab>-0.43<tab>0.22
Species5<tab>0.99<tab>0.57<tab>0.67<tab>-0.38
Species6<tab>0.25<tab>-0.17<tab>-0.20<tab>0.43
Species7<tab>0.14<tab>-0.85<tab>-0.01<tab>0.05
Species8<tab>0.41<tab>-0.70<tab>0.21<tab>-0.69
Site<tab>10<tab>4
Site0<tab>0.71<tab>-3.08<tab>0.21<tab>-1.24
Site1<tab>0.58<tab>-3.00<tab>-0.94<tab>2.69
Site2<tab>0.76<tab>-3.15<tab>2.13<tab>-3.11
Site3<tab>1.11<tab>1.07<tab>-1.87<tab>0.66
Site4<tab>-0.97<tab>-0.06<tab>-0.69<tab>-0.61
Site5<tab>1.04<tab>0.45<tab>-0.63<tab>0.28
Site6<tab>-0.95<tab>-0.08<tab>0.13<tab>-0.42
Site7<tab>0.94<tab>-0.10<tab>0.52<tab>-0.00
Site8<tab>-1.14<tab>0.49<tab>0.47<tab>1.17
Site9<tab>1.03<tab>1.03<tab>2.74<tab>-1.28
Biplot<tab>0<tab>0
Site constraints<tab>0<tab>0
Load the ordination results from the file:
>>> from io import StringIO
>>> from skbio import OrdinationResults
>>> or_f = StringIO(
... "Eigvals\t4\n"
... "0.36\t0.18\t0.07\t0.08\n"
... "\n"
... "Proportion explained\t4\n"
... "0.46\t0.23\t0.10\t0.10\n"
... "\n"
... "Species\t9\t4\n"
... "Species0\t0.11\t0.28\t-0.20\t-0.00\n"
... "Species1\t0.14\t0.30\t0.39\t-0.14\n"
... "Species2\t-1.01\t0.09\t-0.19\t-0.10\n"
... "Species3\t-1.03\t0.10\t0.22\t0.22\n"
... "Species4\t1.05\t0.53\t-0.43\t0.22\n"
... "Species5\t0.99\t0.57\t0.67\t-0.38\n"
... "Species6\t0.25\t-0.17\t-0.20\t0.43\n"
... "Species7\t0.14\t-0.85\t-0.01\t0.05\n"
... "Species8\t0.41\t-0.70\t0.21\t-0.69\n"
... "\n"
... "Site\t10\t4\n"
... "Site0\t0.71\t-3.08\t0.21\t-1.24\n"
... "Site1\t0.58\t-3.00\t-0.94\t2.69\n"
... "Site2\t0.76\t-3.15\t2.13\t-3.11\n"
... "Site3\t1.11\t1.07\t-1.87\t0.66\n"
... "Site4\t-0.97\t-0.06\t-0.69\t-0.61\n"
... "Site5\t1.04\t0.45\t-0.63\t0.28\n"
... "Site6\t-0.95\t-0.08\t0.13\t-0.42\n"
... "Site7\t0.94\t-0.10\t0.52\t-0.00\n"
... "Site8\t-1.14\t0.49\t0.47\t1.17\n"
... "Site9\t1.03\t1.03\t2.74\t-1.28\n"
... "\n"
... "Biplot\t0\t0\n"
... "\n"
... "Site constraints\t0\t0\n")
>>> ord_res = OrdinationResults.read(or_f)
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from skbio.stats.ordination import OrdinationResults
from skbio.io import create_format, OrdinationFormatError
ordination = create_format('ordination')
@ordination.sniffer()
def _ordination_sniffer(fh):
# Smells an ordination file if *all* of the following lines are present
# *from the beginning* of the file:
# - eigvals header (minimally parsed)
# - another line (contents ignored)
# - a whitespace-only line
# - proportion explained header (minimally parsed)
try:
_parse_header(fh, 'Eigvals', 1)
next_line = next(fh, None)
if next_line is not None:
_check_empty_line(fh)
_parse_header(fh, 'Proportion explained', 1)
return True, {}
except OrdinationFormatError:
pass
return False, {}
@ordination.reader(OrdinationResults)
def _ordination_to_ordination_results(fh):
eigvals = _parse_vector_section(fh, 'Eigvals')
if eigvals is None:
raise OrdinationFormatError("At least one eigval must be present.")
_check_empty_line(fh)
prop_expl = _parse_vector_section(fh, 'Proportion explained')
_check_length_against_eigvals(prop_expl, eigvals,
'proportion explained values')
_check_empty_line(fh)
species = _parse_array_section(fh, 'Species')
_check_length_against_eigvals(species, eigvals,
'coordinates per species')
_check_empty_line(fh)
site = _parse_array_section(fh, 'Site')
_check_length_against_eigvals(site, eigvals,
'coordinates per site')
_check_empty_line(fh)
# biplot does not have ids to parse (the other arrays do)
biplot = _parse_array_section(fh, 'Biplot', has_ids=False)
_check_empty_line(fh)
cons = _parse_array_section(fh, 'Site constraints')
if cons is not None and site is not None:
if not np.array_equal(cons.index, site.index):
raise OrdinationFormatError(
"Site constraints ids and site ids must be equal: %s != %s" %
(cons.index, site.index))
return OrdinationResults(
short_method_name='', long_method_name='', eigvals=eigvals,
features=species, samples=site, biplot_scores=biplot,
sample_constraints=cons, proportion_explained=prop_expl)
def _parse_header(fh, header_id, num_dimensions):
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for %s header." % header_id)
header = line.strip().split('\t')
# +1 for the header ID
if len(header) != num_dimensions + 1 or header[0] != header_id:
raise OrdinationFormatError("%s header not found." % header_id)
return header
def _check_empty_line(fh):
"""Check that the next line in `fh` is empty or whitespace-only."""
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for blank line separating "
"sections.")
if line.strip():
raise OrdinationFormatError("Expected an empty line.")
def _check_length_against_eigvals(data, eigvals, label):
if data is not None:
num_vals = data.shape[-1]
num_eigvals = eigvals.shape[-1]
if num_vals != num_eigvals:
raise OrdinationFormatError(
"There should be as many %s as eigvals: %d != %d" %
(label, num_vals, num_eigvals))
def _parse_vector_section(fh, header_id):
header = _parse_header(fh, header_id, 1)
# Parse how many values we are waiting for
num_vals = int(header[1])
if num_vals == 0:
# The ordination method didn't generate the vector, so set it to None
vals = None
else:
# Parse the line with the vector values
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for line containing values "
"for %s section." % header_id)
vals = pd.Series(np.asarray(line.strip().split('\t'),
dtype=np.float64))
if len(vals) != num_vals:
raise OrdinationFormatError(
"Expected %d values in %s section, but found %d." %
(num_vals, header_id, len(vals)))
return vals
def _parse_array_section(fh, header_id, has_ids=True):
"""Parse an array section of `fh` identified by `header_id`."""
# Parse the array header
header = _parse_header(fh, header_id, 2)
# Parse the dimensions of the array
rows = int(header[1])
cols = int(header[2])
ids = None
if rows == 0 and cols == 0:
# The ordination method didn't generate the array data for 'header', so
# set it to None
data = None
elif rows == 0 or cols == 0:
# Both dimensions should be 0 or none of them are zero
raise OrdinationFormatError("One dimension of %s is 0: %d x %d" %
(header_id, rows, cols))
else:
# Parse the data
data = np.empty((rows, cols), dtype=np.float64)
if has_ids:
ids = []
for i in range(rows):
# Parse the next row of data
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for row %d in %s "
"section." % (i + 1, header_id))
vals = line.strip().split('\t')
if has_ids:
ids.append(vals[0])
vals = vals[1:]
if len(vals) != cols:
raise OrdinationFormatError(
"Expected %d values, but found %d in row %d." %
(cols, len(vals), i + 1))
data[i, :] = np.asarray(vals, dtype=np.float64)
data = pd.DataFrame(data, index=ids)
return data
@ordination.writer(OrdinationResults)
def _ordination_results_to_ordination(obj, fh):
_write_vector_section(fh, 'Eigvals', obj.eigvals)
_write_vector_section(fh, 'Proportion explained', obj.proportion_explained)
_write_array_section(fh, 'Species', obj.features)
_write_array_section(fh, 'Site', obj.samples)
_write_array_section(fh, 'Biplot', obj.biplot_scores, has_ids=False)
_write_array_section(fh, 'Site constraints', obj.sample_constraints,
include_section_separator=False)
def _write_vector_section(fh, header_id, vector):
if vector is None:
shape = 0
else:
shape = vector.shape[0]
fh.write("%s\t%d\n" % (header_id, shape))
if vector is not None:
fh.write(_format_vector(vector.values))
fh.write("\n")
def _write_array_section(fh, header_id, data, has_ids=True,
include_section_separator=True):
# write section header
if data is None:
shape = (0, 0)
else:
shape = data.shape
fh.write("%s\t%d\t%d\n" % (header_id, shape[0], shape[1]))
# write section data
if data is not None:
if not has_ids:
for vals in data.values:
fh.write(_format_vector(vals))
else:
for id_, vals in zip(data.index, data.values):
fh.write(_format_vector(vals, id_))
if include_section_separator:
fh.write("\n")
def _format_vector(vector, id_=None):
formatted_vector = '\t'.join(np.asarray(vector, dtype=np.str))
if id_ is None:
return "%s\n" % formatted_vector
else:
return "%s\t%s\n" % (id_, formatted_vector)
| bsd-3-clause |
bsipocz/scikit-image | skimage/viewer/plugins/color_histogram.py | 40 | 3271 | import numpy as np
import matplotlib.pyplot as plt
from ... import color, exposure
from .plotplugin import PlotPlugin
from ..canvastools import RectangleTool
class ColorHistogram(PlotPlugin):
name = 'Color Histogram'
def __init__(self, max_pct=0.99, **kwargs):
super(ColorHistogram, self).__init__(height=400, **kwargs)
self.max_pct = max_pct
print(self.help())
def attach(self, image_viewer):
super(ColorHistogram, self).attach(image_viewer)
self.rect_tool = RectangleTool(self,
on_release=self.ab_selected)
self._on_new_image(image_viewer.image)
def _on_new_image(self, image):
self.lab_image = color.rgb2lab(image)
# Calculate color histogram in the Lab colorspace:
L, a, b = self.lab_image.T
left, right = -100, 100
ab_extents = [left, right, right, left]
self.mask = np.ones(L.shape, bool)
bins = np.arange(left, right)
hist, x_edges, y_edges = np.histogram2d(a.flatten(), b.flatten(),
bins, normed=True)
self.data = {'bins': bins, 'hist': hist, 'edges': (x_edges, y_edges),
'extents': (left, right, left, right)}
# Clip bin heights that dominate a-b histogram
max_val = pct_total_area(hist, percentile=self.max_pct)
hist = exposure.rescale_intensity(hist, in_range=(0, max_val))
self.ax.imshow(hist, extent=ab_extents, cmap=plt.cm.gray)
self.ax.set_title('Color Histogram')
self.ax.set_xlabel('b')
self.ax.set_ylabel('a')
def help(self):
helpstr = ("Color Histogram tool:",
"Select region of a-b colorspace to highlight on image.")
return '\n'.join(helpstr)
def ab_selected(self, extents):
x0, x1, y0, y1 = extents
self.data['extents'] = extents
lab_masked = self.lab_image.copy()
L, a, b = lab_masked.T
self.mask = ((a > y0) & (a < y1)) & ((b > x0) & (b < x1))
lab_masked[..., 1:][~self.mask.T] = 0
self.image_viewer.image = color.lab2rgb(lab_masked)
def output(self):
"""Return the image mask and the histogram data.
Returns
-------
mask : array of bool, same shape as image
The selected pixels.
data : dict
The data describing the histogram and the selected region.
The dictionary contains:
- 'bins' : array of float
The bin boundaries for both `a` and `b` channels.
- 'hist' : 2D array of float
The normalized histogram.
- 'edges' : tuple of array of float
The bin edges along each dimension
- 'extents' : tuple of float
The left and right and top and bottom of the selected region.
"""
return (self.mask, self.data)
def pct_total_area(image, percentile=0.80):
"""Return threshold value based on percentage of total area.
The specified percent of pixels less than the given intensity threshold.
"""
idx = int((image.size - 1) * percentile)
sorted_pixels = np.sort(image.flat)
return sorted_pixels[idx]
| bsd-3-clause |
classicboyir/BuildingMachineLearningSystemsWithPython | ch05/utils.py | 24 | 7111 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
try:
import ujson as json # UltraJSON if available
except:
import json
from matplotlib import pylab
import numpy as np
from data import CHART_DIR
def fetch_data(filename, col=None, line_count=-1, only_questions=False):
count = 0
for line in open(filename, "r"):
count += 1
if line_count > 0 and count > line_count:
break
data = Id, ParentId, IsQuestion, IsAccepted, TimeToAnswer, Score, Text, NumTextTokens, NumCodeLines, LinkCount, MisSpelledFraction = line.split(
"\t")
IsQuestion = int(IsQuestion)
if only_questions and not IsQuestion:
continue
if col:
if col < 6:
val = int(data[col])
else:
val = data[col]
yield val
else:
Id = int(Id)
assert Id >= 0, line
ParentId = int(ParentId)
IsAccepted = int(IsAccepted)
assert not IsQuestion == IsAccepted == 1, "%i %i --- %s" % (
IsQuestion, IsAccepted, line)
assert (ParentId == -1 and IsQuestion) or (
ParentId >= 0 and not IsQuestion), "%i %i --- %s" % (ParentId, IsQuestion, line)
TimeToAnswer = int(TimeToAnswer)
Score = int(Score)
NumTextTokens = int(NumTextTokens)
NumCodeLines = int(NumCodeLines)
LinkCount = int(LinkCount)
MisSpelledFraction = float(MisSpelledFraction)
yield Id, ParentId, IsQuestion, IsAccepted, TimeToAnswer, Score, Text, NumTextTokens, NumCodeLines, LinkCount, MisSpelledFraction
def fetch_posts(filename, with_index=True, line_count=-1):
count = 0
for line in open(filename, "r"):
count += 1
if line_count > 0 and count > line_count:
break
Id, Text = line.split("\t")
Text = Text.strip()
if with_index:
yield int(Id), Text
else:
yield Text
def load_meta(filename):
meta = json.load(open(filename, "r"))
keys = list(meta.keys())
# JSON only allows string keys, changing that to int
for key in keys:
meta[int(key)] = meta[key]
del meta[key]
# post Id to index in vectorized
id_to_idx = {}
# and back
idx_to_id = {}
for PostId, Info in meta.items():
id_to_idx[PostId] = idx = Info['idx']
idx_to_id[idx] = PostId
return meta, id_to_idx, idx_to_id
def plot_roc(auc_score, name, fpr, tpr):
pylab.figure(num=None, figsize=(6, 5))
pylab.plot([0, 1], [0, 1], 'k--')
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('False Positive Rate')
pylab.ylabel('True Positive Rate')
pylab.title('Receiver operating characteristic (AUC=%0.2f)\n%s' % (
auc_score, name))
pylab.legend(loc="lower right")
pylab.grid(True, linestyle='-', color='0.75')
pylab.fill_between(tpr, fpr, alpha=0.5)
pylab.plot(fpr, tpr, lw=1)
pylab.savefig(
os.path.join(CHART_DIR, "roc_" + name.replace(" ", "_") + ".png"))
def plot_pr(auc_score, name, precision, recall, label=None):
pylab.figure(num=None, figsize=(6, 5))
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('Recall')
pylab.ylabel('Precision')
pylab.title('P/R (AUC=%0.2f) / %s' % (auc_score, label))
pylab.fill_between(recall, precision, alpha=0.5)
pylab.grid(True, linestyle='-', color='0.75')
pylab.plot(recall, precision, lw=1)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(CHART_DIR, "pr_" + filename + ".png"))
def show_most_informative_features(vectorizer, clf, n=20):
c_f = sorted(zip(clf.coef_[0], vectorizer.get_feature_names()))
top = list(zip(c_f[:n], c_f[:-(n + 1):-1]))
for (c1, f1), (c2, f2) in top:
print("\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2))
def plot_feat_importance(feature_names, clf, name):
pylab.figure(num=None, figsize=(6, 5))
coef_ = clf.coef_
important = np.argsort(np.absolute(coef_.ravel()))
f_imp = feature_names[important]
coef = coef_.ravel()[important]
inds = np.argsort(coef)
f_imp = f_imp[inds]
coef = coef[inds]
xpos = np.array(list(range(len(coef))))
pylab.bar(xpos, coef, width=1)
pylab.title('Feature importance for %s' % (name))
ax = pylab.gca()
ax.set_xticks(np.arange(len(coef)))
labels = ax.set_xticklabels(f_imp)
for label in labels:
label.set_rotation(90)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(
CHART_DIR, "feat_imp_%s.png" % filename), bbox_inches="tight")
def plot_feat_hist(data_name_list, filename=None):
if len(data_name_list) > 1:
assert filename is not None
pylab.figure(num=None, figsize=(8, 6))
num_rows = int(1 + (len(data_name_list) - 1) / 2)
num_cols = int(1 if len(data_name_list) == 1 else 2)
pylab.figure(figsize=(5 * num_cols, 4 * num_rows))
for i in range(num_rows):
for j in range(num_cols):
pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
x, name = data_name_list[i * num_cols + j]
pylab.title(name)
pylab.xlabel('Value')
pylab.ylabel('Fraction')
# the histogram of the data
max_val = np.max(x)
if max_val <= 1.0:
bins = 50
elif max_val > 50:
bins = 50
else:
bins = max_val
n, bins, patches = pylab.hist(
x, bins=bins, normed=1, alpha=0.75)
pylab.grid(True)
if not filename:
filename = "feat_hist_%s.png" % name.replace(" ", "_")
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_bias_variance(data_sizes, train_errors, test_errors, name, title):
pylab.figure(num=None, figsize=(6, 5))
pylab.ylim([0.0, 1.0])
pylab.xlabel('Data set size')
pylab.ylabel('Error')
pylab.title("Bias-Variance for '%s'" % name)
pylab.plot(
data_sizes, test_errors, "--", data_sizes, train_errors, "b-", lw=1)
pylab.legend(["test error", "train error"], loc="upper right")
pylab.grid(True, linestyle='-', color='0.75')
pylab.savefig(
os.path.join(CHART_DIR, "bv_" + name.replace(" ", "_") + ".png"), bbox_inches="tight")
def plot_k_complexity(ks, train_errors, test_errors):
pylab.figure(num=None, figsize=(6, 5))
pylab.ylim([0.0, 1.0])
pylab.xlabel('k')
pylab.ylabel('Error')
pylab.title('Errors for for different values of $k$')
pylab.plot(
ks, test_errors, "--", ks, train_errors, "-", lw=1)
pylab.legend(["test error", "train error"], loc="upper right")
pylab.grid(True, linestyle='-', color='0.75')
pylab.savefig(
os.path.join(CHART_DIR, "kcomplexity.png"), bbox_inches="tight")
| mit |
olologin/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 14 | 2001 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
xclxxl414/rqalpha | test.py | 1 | 8227 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import sys
from datetime import datetime
import os
import csv
from six import iteritems
import pandas as pd
import numpy as np
import coverage
from rqalpha import run, run_func
from rqalpha.utils.logger import system_log
TEST_DIR = os.path.abspath("./tests/")
TEST_OUT = os.path.abspath("./tests/outs/")
pd.set_option("display.width", 160)
def run_tests(file_path=None):
if file_path is not None:
files = [file_path]
else:
files = [f for f in os.listdir(TEST_DIR) if f.find("test") == 0]
error_map = {}
for filename in files:
try:
r, result_data = run_test(filename)
if r is not None:
error_map[filename.replace(".py", "")] = result_data
except Exception as e:
system_log.exception()
error_map[filename.replace(".py", "")] = e
for filename, result_data in iteritems(error_map):
print(u"*" * 20, u"[{}]did not pass!".format(filename), u"*" * 20)
if isinstance(result_data, Exception):
system_log.error(result_data)
else:
df, old_df, result = result_data
# print("+" * 10, "old test Dataframe: ", "+" * 10)
# print(old_df.drop(result.columns[result.all()], axis=1))
# print("+" * 10, "new test Dataframe: ", "+" * 10)
# print(df.drop(result.columns[result.all()], axis=1))
print(result.all())
print(u"=" * 40)
print(u"[{}|{}] strategies has been passed!".format(len(files) - len(error_map), len(files)))
return len(error_map)
def run_test(filename):
config = {
"base": {
"strategy_file": os.path.join(TEST_DIR, filename)
}
}
print(u"Start test: " + str(config["base"]["strategy_file"]))
result_dict = run(config)['sys_analyser']
df = result_dict["portfolio"]
# del df['positions']
old_pickle_file = os.path.join(TEST_OUT, filename.replace(".py", ".pkl"))
if not os.path.exists(old_pickle_file):
if not os.path.exists(TEST_OUT):
os.makedirs(TEST_OUT)
pickle.dump(result_dict, open(old_pickle_file, "wb"), protocol=2)
return None, None
else:
old_result_dict = pd.read_pickle(old_pickle_file)
# 比较 portfolios
old_df = old_result_dict["portfolio"]
old_df = old_df.fillna(0)
old_df = old_df.replace([np.inf, -np.inf], 0)
df = df.fillna(0)
df = df.replace([np.inf, -np.inf], 0)
# del old_df["trades"]
# del df["trades"]
try:
del old_df["dividend_receivable"]
del df["dividend_receivable"]
except:
pass
df = df.round(0)
old_df = old_df.round(0)
result = df.eq(old_df)
if not result.all().all():
return result.all(), (df, old_df, result)
# 比较 summary
old_df = pd.DataFrame(data=[{"val": val} for val in old_result_dict["summary"].values()],
index=old_result_dict["summary"].keys()).sort_index().T.fillna(0)
df = pd.DataFrame(data=[{"val": val} for val in result_dict["summary"].values()],
index=result_dict["summary"].keys()).sort_index().T.fillna(0)
try:
del old_df['daily_pnl']
del old_df['daily_returns']
del old_df['dividend_receivable']
del old_df['strategy_file']
del df['strategy_file']
except:
pass
try:
del old_df['strategy_file']
del df['strategy_file']
except:
pass
result = df.eq(old_df)
if not result.all().all():
return result.all(), (old_result_dict, result_dict, result)
return None, None
def is_enable_coverage():
return os.environ.get('COVERAGE') == "enabled"
def test_api(specific_test=None):
# FIXME: Error msg is hard to understand @zjuguxi
print(u"Testing API......")
from tests.api import test_strategies as test_api_strategies
from tests.mod import test_strategies as test_mod_strategies
for strategy in test_api_strategies + test_mod_strategies:
if specific_test and strategy["name"] != specific_test:
continue
print("running", strategy["name"])
run_func(**strategy)
print(u"API test ends.")
def test_strategy():
run_tests()
def write_csv(path, fields):
old_test_times = []
if not os.path.exists(path):
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fields)
writer.writeheader()
with open(path) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
old_test_times.append(row)
if performance_path is not None:
if 0 < len(old_test_times) < 5 and time_spend > float(sum(float(i['time_spend']) for i in old_test_times)) / len(old_test_times) * 1.1:
print('Average time of last 5 runs:', float(sum(float(i['time_spend']) for i in old_test_times))/len(old_test_times))
print('Now time spend:', time_spend)
raise RuntimeError('Performance regresses!')
elif len(old_test_times) >= 5 and time_spend > float(sum(float(i['time_spend']) for i in old_test_times[-5:])) / 5 * 1.1:
print('Average time of last 5 runs:',
float(sum(float(i['time_spend']) for i in old_test_times[-5:])) / 5)
print('Now time spend:', time_spend)
raise RuntimeError('Performance regresses!')
else:
with open(path, 'a') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fields)
writer.writerow({'date_time': end_time, 'time_spend': time_spend})
def run_unit_tests():
from unittest import TextTestRunner
from tests.unittest import load_tests
TextTestRunner(verbosity=2).run(load_tests())
if __name__ == '__main__':
if is_enable_coverage():
print("enable coverage")
cov = coverage.Coverage()
cov.start()
performance_path = None
field_names = ['date_time', 'time_spend']
start_time = datetime.now()
if len(sys.argv) >= 2:
if sys.argv[1] == 'api':
try:
test_api(sys.argv[2])
except IndexError:
test_api()
end_time = datetime.now()
elif sys.argv[1] == 'strategy':
test_strategy()
end_time = datetime.now()
elif sys.argv[1] == 'performance':
# test_api()
test_strategy()
end_time = datetime.now()
performance_path = sys.argv[2]
time_spend = (end_time - start_time).total_seconds()
write_csv(performance_path, field_names)
elif sys.argv[1] == 'unittest':
run_unit_tests()
end_time = datetime.now()
else:
target_file = sys.argv[1]
run_tests(target_file)
end_time = datetime.now()
else:
run_unit_tests()
test_api()
error_count = run_tests()
end_time = datetime.now()
if error_count == 0:
time_csv_file_path = os.path.join(TEST_OUT, "time.csv")
time_spend = (end_time - start_time).total_seconds()
write_csv(time_csv_file_path, field_names)
else:
print('Failed!')
sys.exit(-1)
if is_enable_coverage():
cov.stop()
cov.save()
cov.html_report()
print("Total Spend: ", end_time - start_time)
| apache-2.0 |
vibhorag/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
ClimbsRocks/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 169 | 8809 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
mikecroucher/GPy | GPy/plotting/gpy_plot/plot_util.py | 4 | 15308 | #===============================================================================
# Copyright (c) 2012-2015, GPy authors (see AUTHORS.txt).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from scipy import sparse
import itertools
from ...models import WarpedGP
def in_ipynb():
try:
cfg = get_ipython().config
return 'IPKernelApp' in cfg
except NameError:
return False
def find_best_layout_for_subplots(num_subplots):
r, c = 1, 1
while (r*c) < num_subplots:
if (c==(r+1)) or (r==c):
c += 1
elif c==(r+2):
r += 1
c -= 1
return r, c
def helper_predict_with_model(self, Xgrid, plot_raw, apply_link, percentiles, which_data_ycols, predict_kw, samples=0):
"""
Make the right decisions for prediction with a model
based on the standard arguments of plotting.
This is quite complex and will take a while to understand,
so do not change anything in here lightly!!!
"""
# Put some standards into the predict_kw so that prediction is done automatically:
if predict_kw is None:
predict_kw = {}
if 'likelihood' not in predict_kw:
if plot_raw:
from ...likelihoods import Gaussian
from ...likelihoods.link_functions import Identity
lik = Gaussian(Identity(), 1e-9) # Make the likelihood not add any noise
else:
lik = None
predict_kw['likelihood'] = lik
if 'Y_metadata' not in predict_kw:
predict_kw['Y_metadata'] = {}
if 'output_index' not in predict_kw['Y_metadata']:
predict_kw['Y_metadata']['output_index'] = Xgrid[:,-1:].astype(np.int)
mu, _ = self.predict(Xgrid, **predict_kw)
if percentiles is not None:
percentiles = self.predict_quantiles(Xgrid, quantiles=percentiles, **predict_kw)
else: percentiles = []
if samples > 0:
fsamples = self.posterior_samples(Xgrid, full_cov=True, size=samples, **predict_kw)
fsamples = fsamples[which_data_ycols] if fsamples.ndim == 3 else fsamples
else:
fsamples = None
# Filter out the ycolums which we want to plot:
retmu = mu[:, which_data_ycols]
percs = [p[:, which_data_ycols] for p in percentiles]
if plot_raw and apply_link:
for i in range(len(which_data_ycols)):
retmu[:, [i]] = self.likelihood.gp_link.transf(mu[:, [i]])
for perc in percs:
perc[:, [i]] = self.likelihood.gp_link.transf(perc[:, [i]])
if fsamples is not None and fsamples.ndim == 3:
for s in range(fsamples.shape[-1]):
fsamples[i, :, s] = self.likelihood.gp_link.transf(fsamples[i, :, s])
elif fsamples is not None:
for s in range(fsamples.shape[-1]):
fsamples[:, s] = self.likelihood.gp_link.transf(fsamples[:, s])
return retmu, percs, fsamples
def helper_for_plot_data(self, X, plot_limits, visible_dims, fixed_inputs, resolution):
"""
Figure out the data, free_dims and create an Xgrid for
the prediction.
This is only implemented for two dimensions for now!
"""
#work out what the inputs are for plotting (1D or 2D)
if fixed_inputs is None:
fixed_inputs = []
fixed_dims = get_fixed_dims(fixed_inputs)
free_dims = get_free_dims(self, visible_dims, fixed_dims)
if len(free_dims) == 1:
#define the frame on which to plot
resolution = resolution or 200
Xnew, xmin, xmax = x_frame1D(X[:,free_dims], plot_limits=plot_limits, resolution=resolution)
Xgrid = np.zeros((Xnew.shape[0],self.input_dim))
Xgrid[:,free_dims] = Xnew
for i,v in fixed_inputs:
Xgrid[:,i] = v
x = Xgrid
y = None
elif len(free_dims) == 2:
#define the frame for plotting on
resolution = resolution or 35
Xnew, x, y, xmin, xmax = x_frame2D(X[:,free_dims], plot_limits, resolution)
Xgrid = np.zeros((Xnew.shape[0], self.input_dim))
Xgrid[:,free_dims] = Xnew
#xmin = Xgrid.min(0)[free_dims]
#xmax = Xgrid.max(0)[free_dims]
for i,v in fixed_inputs:
Xgrid[:,i] = v
else:
raise TypeError("calculated free_dims {} from visible_dims {} and fixed_dims {} is neither 1D nor 2D".format(free_dims, visible_dims, fixed_dims))
return fixed_dims, free_dims, Xgrid, x, y, xmin, xmax, resolution
def scatter_label_generator(labels, X, visible_dims, marker=None):
ulabels = []
for lab in labels:
if not lab in ulabels:
ulabels.append(lab)
if marker is not None:
marker = itertools.cycle(list(marker))
else:
m = None
try:
input_1, input_2, input_3 = visible_dims
except:
try:
# tuple or int?
input_1, input_2 = visible_dims
input_3 = None
except:
input_1 = visible_dims
input_2 = input_3 = None
for ul in ulabels:
from numbers import Number
if isinstance(ul, str):
try:
this_label = unicode(ul)
except NameError:
#python3
this_label = ul
elif isinstance(ul, Number):
this_label = 'class {!s}'.format(ul)
else:
this_label = ul
if marker is not None:
m = next(marker)
index = np.nonzero(labels == ul)[0]
if input_2 is None:
x = X[index, input_1]
y = np.zeros(index.size)
z = None
elif input_3 is None:
x = X[index, input_1]
y = X[index, input_2]
z = None
else:
x = X[index, input_1]
y = X[index, input_2]
z = X[index, input_3]
yield x, y, z, this_label, index, m
def subsample_X(X, labels, num_samples=1000):
"""
Stratified subsampling if labels are given.
This means due to rounding errors you might get a little differences between the
num_samples and the returned subsampled X.
"""
if X.shape[0] > num_samples:
print("Warning: subsampling X, as it has more samples then {}. X.shape={!s}".format(int(num_samples), X.shape))
if labels is not None:
subsample = []
for _, _, _, _, index, _ in scatter_label_generator(labels, X, (0, None, None)):
subsample.append(np.random.choice(index, size=max(2, int(index.size*(float(num_samples)/X.shape[0]))), replace=False))
subsample = np.hstack(subsample)
else:
subsample = np.random.choice(X.shape[0], size=1000, replace=False)
X = X[subsample]
labels = labels[subsample]
#=======================================================================
# <<<WORK IN PROGRESS>>>
# <<<DO NOT DELETE>>>
# plt.close('all')
# fig, ax = plt.subplots(1,1)
# from GPy.plotting.matplot_dep.dim_reduction_plots import most_significant_input_dimensions
# import matplotlib.patches as mpatches
# i1, i2 = most_significant_input_dimensions(m, None)
# xmin, xmax = 100, -100
# ymin, ymax = 100, -100
# legend_handles = []
#
# X = m.X.mean[:, [i1, i2]]
# X = m.X.variance[:, [i1, i2]]
#
# xmin = X[:,0].min(); xmax = X[:,0].max()
# ymin = X[:,1].min(); ymax = X[:,1].max()
# range_ = [[xmin, xmax], [ymin, ymax]]
# ul = np.unique(labels)
#
# for i, l in enumerate(ul):
# #cdict = dict(red =[(0., colors[i][0], colors[i][0]), (1., colors[i][0], colors[i][0])],
# # green=[(0., colors[i][0], colors[i][1]), (1., colors[i][1], colors[i][1])],
# # blue =[(0., colors[i][0], colors[i][2]), (1., colors[i][2], colors[i][2])],
# # alpha=[(0., 0., .0), (.5, .5, .5), (1., .5, .5)])
# #cmap = LinearSegmentedColormap('{}'.format(l), cdict)
# cmap = LinearSegmentedColormap.from_list('cmap_{}'.format(str(l)), [colors[i], colors[i]], 255)
# cmap._init()
# #alphas = .5*(1+scipy.special.erf(np.linspace(-2,2, cmap.N+3)))#np.log(np.linspace(np.exp(0), np.exp(1.), cmap.N+3))
# alphas = (scipy.special.erf(np.linspace(0,2.4, cmap.N+3)))#np.log(np.linspace(np.exp(0), np.exp(1.), cmap.N+3))
# cmap._lut[:, -1] = alphas
# print l
# x, y = X[labels==l].T
#
# heatmap, xedges, yedges = np.histogram2d(x, y, bins=300, range=range_)
# #heatmap, xedges, yedges = np.histogram2d(x, y, bins=100)
#
# im = ax.imshow(heatmap, extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]], cmap=cmap, aspect='auto', interpolation='nearest', label=str(l))
# legend_handles.append(mpatches.Patch(color=colors[i], label=l))
# ax.set_xlim(xmin, xmax)
# ax.set_ylim(ymin, ymax)
# plt.legend(legend_handles, [l.get_label() for l in legend_handles])
# plt.draw()
# plt.show()
#=======================================================================
return X, labels
def update_not_existing_kwargs(to_update, update_from):
"""
This function updates the keyword aguments from update_from in
to_update, only if the keys are not set in to_update.
This is used for updated kwargs from the default dicts.
"""
if to_update is None:
to_update = {}
to_update.update({k:v for k,v in update_from.items() if k not in to_update})
return to_update
def get_x_y_var(model):
"""
Either the the data from a model as
X the inputs,
X_variance the variance of the inputs ([default: None])
and Y the outputs
If (X, X_variance, Y) is given, this just returns.
:returns: (X, X_variance, Y)
"""
# model given
if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs():
X = model.X.mean.values
X_variance = model.X.variance.values
else:
try:
X = model.X.values
except AttributeError:
X = model.X
X_variance = None
try:
Y = model.Y.values
except AttributeError:
Y = model.Y
if isinstance(model, WarpedGP) and not model.predict_in_warped_space:
Y = model.Y_normalized
if sparse.issparse(Y): Y = Y.todense().view(np.ndarray)
return X, X_variance, Y
def get_free_dims(model, visible_dims, fixed_dims):
"""
work out what the inputs are for plotting (1D or 2D)
The visible dimensions are the dimensions, which are visible.
the fixed_dims are the fixed dimensions for this.
The free_dims are then the visible dims without the fixed dims.
"""
if visible_dims is None:
visible_dims = np.arange(model.input_dim)
dims = np.asanyarray(visible_dims)
if fixed_dims is not None:
dims = [dim for dim in dims if dim not in fixed_dims]
return np.asanyarray([dim for dim in dims if dim is not None])
def get_fixed_dims(fixed_inputs):
"""
Work out the fixed dimensions from the fixed_inputs list of tuples.
"""
return np.array([i for i,_ in fixed_inputs])
def get_which_data_ycols(model, which_data_ycols):
"""
Helper to get the data columns to plot.
"""
if which_data_ycols == 'all' or which_data_ycols is None:
return np.arange(model.output_dim)
return which_data_ycols
def get_which_data_rows(model, which_data_rows):
"""
Helper to get the data rows to plot.
"""
if which_data_rows == 'all' or which_data_rows is None:
return slice(None)
return which_data_rows
def x_frame1D(X,plot_limits=None,resolution=None):
"""
Internal helper function for making plots, returns a set of input values to plot as well as lower and upper limits
"""
assert X.shape[1] ==1, "x_frame1D is defined for one-dimensional inputs"
if plot_limits is None:
from GPy.core.parameterization.variational import VariationalPosterior
if isinstance(X, VariationalPosterior):
xmin,xmax = X.mean.min(0),X.mean.max(0)
else:
xmin,xmax = X.min(0),X.max(0)
xmin, xmax = xmin-0.25*(xmax-xmin), xmax+0.25*(xmax-xmin)
elif len(plot_limits) == 2:
xmin, xmax = map(np.atleast_1d, plot_limits)
else:
raise ValueError("Bad limits for plotting")
Xnew = np.linspace(xmin,xmax,resolution or 200)[:,None]
return Xnew, xmin, xmax
def x_frame2D(X,plot_limits=None,resolution=None):
"""
Internal helper function for making plots, returns a set of input values to plot as well as lower and upper limits
"""
assert X.shape[1]==2, "x_frame2D is defined for two-dimensional inputs"
if plot_limits is None:
xmin, xmax = X.min(0), X.max(0)
xmin, xmax = xmin-0.075*(xmax-xmin), xmax+0.075*(xmax-xmin)
elif len(plot_limits) == 2:
xmin, xmax = plot_limits
try:
xmin = xmin[0], xmin[1]
except:
# only one limit given, copy over to other lim
xmin = [plot_limits[0], plot_limits[0]]
xmax = [plot_limits[1], plot_limits[1]]
elif len(plot_limits) == 4:
xmin, xmax = (plot_limits[0], plot_limits[2]), (plot_limits[1], plot_limits[3])
else:
raise ValueError("Bad limits for plotting")
resolution = resolution or 50
xx, yy = np.mgrid[xmin[0]:xmax[0]:1j*resolution,xmin[1]:xmax[1]:1j*resolution]
Xnew = np.c_[xx.flat, yy.flat]
return Xnew, xx, yy, xmin, xmax
| bsd-3-clause |
MatthieuBizien/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 50 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
nsteinme/phy | phy/plot/tests/test_ccg.py | 2 | 1770 | # -*- coding: utf-8 -*-
"""Test CCG plotting."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from pytest import mark
import numpy as np
from ..ccg import _plot_ccg_mpl, CorrelogramView, plot_correlograms
from ...utils._color import _random_color
from ...io.mock import artificial_correlograms
from ...utils.testing import show_test
# Skip these tests in "make test-quick".
pytestmark = mark.long()
#------------------------------------------------------------------------------
# Tests matplotlib
#------------------------------------------------------------------------------
def test_plot_ccg():
n_bins = 51
ccg = np.random.randint(size=n_bins, low=10, high=50)
_plot_ccg_mpl(ccg, baseline=20, color='g')
def test_plot_correlograms():
n_bins = 51
ccg = np.random.uniform(size=(3, 3, n_bins))
c = plot_correlograms(ccg, lines=[-10, 0, 20], show=False)
show_test(c)
#------------------------------------------------------------------------------
# Tests VisPy
#------------------------------------------------------------------------------
def _test_correlograms(n_clusters=None):
n_samples = 51
correlograms = artificial_correlograms(n_clusters, n_samples)
c = CorrelogramView(keys='interactive')
c.cluster_ids = np.arange(n_clusters)
c.visual.correlograms = correlograms
c.visual.cluster_colors = np.array([_random_color()
for _ in range(n_clusters)])
c.lines = [-5, 0, 5]
show_test(c)
def test_correlograms_empty():
_test_correlograms(n_clusters=0)
def test_correlograms_full():
_test_correlograms(n_clusters=3)
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/e308.py | 2 | 5382 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
SEQ_LENGTH = 512
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.0,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
independently_center_inputs = True,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=True,
input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
'std': np.array([ 0.12636775], dtype=np.float32)},
target_stats={
'mean': np.array([ 0.04066789, 0.01881946,
0.24639061, 0.17608672, 0.10273963],
dtype=np.float32),
'std': np.array([ 0.11449792, 0.07338708,
0.26608968, 0.33463112, 0.21250485],
dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
updates_func=momentum,
learning_rate=1e-05,
learning_rate_changes_by_iteration={
500: 5e-06
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
plotter=MDNPlotter
)
def exp_a(name):
# 3 appliances
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
lail3344/sms-tools | lectures/08-Sound-transformations/plots-code/hps-transformation.py | 24 | 3018 | # function call to the transformation functions of relevance for the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import hpsModel as HPS
import hpsTransformations as HPST
import harmonicTransformations as HT
import utilFunctions as UF
inputFile='../../../sounds/sax-phrase-short.wav'
window='blackman'
M=601
N=1024
t=-100
minSineDur=0.1
nH=100
minf0=350
maxf0=700
f0et=5
harmDevSlope=0.01
stocf=0.1
Ns = 512
H = 128
(fs, x) = UF.wavread(inputFile)
w = get_window(window, M)
hfreq, hmag, hphase, mYst = HPS.hpsModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf)
timeScaling = np.array([0, 0, 2.138, 2.138-1.5, 3.146, 3.146])
yhfreq, yhmag, ystocEnv = HPST.hpsTimeScale(hfreq, hmag, mYst, timeScaling)
y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs)
UF.wavwrite(y,fs, 'hps-transformation.wav')
plt.figure(figsize=(12, 9))
maxplotfreq = 14900.0
# plot the input sound
plt.subplot(4,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.title('x (sax-phrase-short.wav')
# plot spectrogram stochastic compoment
plt.subplot(4,1,2)
numFrames = int(mYst[:,0].size)
sizeEnv = int(mYst[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(mYst[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('harmonics + stochastic residual')
# plot spectrogram of transformed stochastic compoment
plt.subplot(4,1,3)
numFrames = int(ystocEnv[:,0].size)
sizeEnv = int(ystocEnv[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(ystocEnv[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot transformed harmonic on top of stochastic spectrogram
harms = yhfreq*np.less(yhfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('timescaled harmonics + stochastic residual')
# plot the output sound
plt.subplot(4,1,4)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.title('output sound: y')
plt.tight_layout()
plt.savefig('hps-transformation.png')
plt.show()
| agpl-3.0 |
bollu/vispy | vispy/color/colormap.py | 13 | 38233 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division # just to be safe...
import inspect
import numpy as np
from .color_array import ColorArray
from ..ext.six import string_types
from ..ext.cubehelix import cubehelix
from ..ext.husl import husl_to_rgb
###############################################################################
# Color maps
# Utility functions for interpolation in NumPy.
def _vector_or_scalar(x, type='row'):
"""Convert an object to either a scalar or a row or column vector."""
if isinstance(x, (list, tuple)):
x = np.array(x)
if isinstance(x, np.ndarray):
assert x.ndim == 1
if type == 'column':
x = x[:, None]
return x
def _vector(x, type='row'):
"""Convert an object to a row or column vector."""
if isinstance(x, (list, tuple)):
x = np.array(x, dtype=np.float32)
elif not isinstance(x, np.ndarray):
x = np.array([x], dtype=np.float32)
assert x.ndim == 1
if type == 'column':
x = x[:, None]
return x
def _find_controls(x, controls=None, clip=None):
x_controls = np.clip(np.searchsorted(controls, x) - 1, 0, clip)
return x_controls.astype(np.int32)
# Normalization
def _normalize(x, cmin=None, cmax=None, clip=True):
"""Normalize an array from the range [cmin, cmax] to [0,1],
with optional clipping."""
if not isinstance(x, np.ndarray):
x = np.array(x)
if cmin is None:
cmin = x.min()
if cmax is None:
cmax = x.max()
if cmin == cmax:
return .5 * np.ones(x.shape)
else:
cmin, cmax = float(cmin), float(cmax)
y = (x - cmin) * 1. / (cmax - cmin)
if clip:
y = np.clip(y, 0., 1.)
return y
# Interpolation functions in NumPy.
def _mix_simple(a, b, x):
"""Mix b (with proportion x) with a."""
x = np.clip(x, 0.0, 1.0)
return (1.0 - x)*a + x*b
def _interpolate_multi(colors, x, controls):
x = x.ravel()
n = len(colors)
# For each element in x, the control index of its bin's left boundary.
x_step = _find_controls(x, controls, n-2)
# The length of each bin.
controls_length = np.diff(controls).astype(np.float32)
# Prevent division by zero error.
controls_length[controls_length == 0.] = 1.
# Like x, but relative to each bin.
_to_clip = x - controls[x_step]
_to_clip /= controls_length[x_step]
x_rel = np.clip(_to_clip, 0., 1.)
return (colors[x_step],
colors[x_step + 1],
x_rel[:, None])
def mix(colors, x, controls=None):
a, b, x_rel = _interpolate_multi(colors, x, controls)
return _mix_simple(a, b, x_rel)
def smoothstep(edge0, edge1, x):
""" performs smooth Hermite interpolation
between 0 and 1 when edge0 < x < edge1. """
# Scale, bias and saturate x to 0..1 range
x = np.clip((x - edge0)/(edge1 - edge0), 0.0, 1.0)
# Evaluate polynomial
return x*x*(3 - 2*x)
def step(colors, x, controls=None):
x = x.ravel()
"""Step interpolation from a set of colors. x belongs in [0, 1]."""
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(colors)
assert ncolors == len(controls) - 1
assert ncolors >= 2
x_step = _find_controls(x, controls, ncolors-1)
return colors[x_step, ...]
# GLSL interpolation functions.
def _glsl_mix(controls=None):
"""Generate a GLSL template function from a given interpolation patterns
and control points."""
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(controls)
assert ncolors >= 2
if ncolors == 2:
s = " return mix($color_0, $color_1, t);\n"
else:
s = ""
for i in range(ncolors-1):
if i == 0:
ifs = 'if (t < %.6f)' % (controls[i+1])
elif i == (ncolors-2):
ifs = 'else'
else:
ifs = 'else if (t < %.6f)' % (controls[i+1])
adj_t = '(t - %s) / %s' % (controls[i],
controls[i+1] - controls[i])
s += ("%s {\n return mix($color_%d, $color_%d, %s);\n} " %
(ifs, i, i+1, adj_t))
return "vec4 colormap(float t) {\n%s\n}" % s
def _glsl_step(controls=None):
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(controls) - 1
assert ncolors >= 2
s = ""
for i in range(ncolors-1):
if i == 0:
ifs = 'if (t < %.6f)' % (controls[i+1])
elif i == (ncolors-2):
ifs = 'else'
else:
ifs = 'else if (t < %.6f)' % (controls[i+1])
s += """%s {\n return $color_%d;\n} """ % (ifs, i)
return """vec4 colormap(float t) {\n%s\n}""" % s
# Mini GLSL template system for colors.
def _process_glsl_template(template, colors):
"""Replace $color_i by color #i in the GLSL template."""
for i in range(len(colors) - 1, -1, -1):
color = colors[i]
assert len(color) == 4
vec4_color = 'vec4(%.3f, %.3f, %.3f, %.3f)' % tuple(color)
template = template.replace('$color_%d' % i, vec4_color)
return template
class BaseColormap(object):
"""Class representing a colormap:
t \in [0, 1] --> rgba_color
Parameters
----------
colors : list of lists, tuples, or ndarrays
The control colors used by the colormap (shape = (ncolors, 4)).
Notes
-----
Must be overriden. Child classes need to implement:
glsl_map : string
The GLSL function for the colormap. Use $color_0 to refer
to the first color in `colors`, and so on. These are vec4 vectors.
map(item) : function
Takes a (N, 1) vector of values in [0, 1], and returns a rgba array
of size (N, 4).
"""
# Control colors used by the colormap.
colors = None
# GLSL string with a function implementing the color map.
glsl_map = None
def __init__(self, colors=None):
# Ensure the colors are arrays.
if colors is not None:
self.colors = colors
if not isinstance(self.colors, ColorArray):
self.colors = ColorArray(self.colors)
# Process the GLSL map function by replacing $color_i by the
if len(self.colors) > 0:
self.glsl_map = _process_glsl_template(self.glsl_map,
self.colors.rgba)
def map(self, item):
"""Return a rgba array for the requested items.
This function must be overriden by child classes.
This function doesn't need to implement argument checking on `item`.
It can always assume that `item` is a (N, 1) array of values between
0 and 1.
Parameters
----------
item : ndarray
An array of values in [0,1].
Returns
-------
rgba : ndarray
An array with rgba values, with one color per item. The shape
should be ``item.shape + (4,)``.
Notes
-----
Users are expected to use a colormap with ``__getitem__()`` rather
than ``map()`` (which implements a lower-level API).
"""
raise NotImplementedError()
def __getitem__(self, item):
if isinstance(item, tuple):
raise ValueError('ColorArray indexing is only allowed along '
'the first dimension.')
# Ensure item is either a scalar or a column vector.
item = _vector(item, type='column')
# Clip the values in [0, 1].
item = np.clip(item, 0., 1.)
colors = self.map(item)
return ColorArray(colors)
def __setitem__(self, item, value):
raise RuntimeError("It is not possible to set items to "
"BaseColormap instances.")
def _repr_html_(self):
n = 100
html = ("""
<style>
table.vispy_colormap {
height: 30px;
border: 0;
margin: 0;
padding: 0;
}
table.vispy_colormap td {
width: 3px;
border: 0;
margin: 0;
padding: 0;
}
</style>
<table class="vispy_colormap">
""" +
'\n'.join([(("""<td style="background-color: %s;"
title="%s"></td>""") % (color, color))
for color in self[np.linspace(0., 1., n)].hex]) +
"""
</table>
""")
return html
def _default_controls(ncolors):
"""Generate linearly spaced control points from a set of colors."""
return np.linspace(0., 1., ncolors)
# List the parameters of every supported interpolation mode.
_interpolation_info = {
'linear': {
'ncontrols': lambda ncolors: ncolors, # take ncolors as argument
'glsl_map': _glsl_mix, # take 'controls' as argument
'map': mix,
},
'zero': {
'ncontrols': lambda ncolors: (ncolors+1),
'glsl_map': _glsl_step,
'map': step,
}
}
class Colormap(BaseColormap):
"""A colormap defining several control colors and an interpolation scheme.
Parameters
----------
colors : list of colors | ColorArray
The list of control colors. If not a ``ColorArray``, a new
``ColorArray`` instance is created from this list. See the
documentation of ``ColorArray``.
controls : array-like
The list of control points for the given colors. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
Examples
--------
Here is a basic example:
>>> from vispy.color import Colormap
>>> cm = Colormap(['r', 'g', 'b'])
>>> cm[0.], cm[0.5], cm[np.linspace(0., 1., 100)]
"""
def __init__(self, colors, controls=None, interpolation='linear'):
self.interpolation = interpolation
ncontrols = self._ncontrols(len(colors))
# Default controls.
if controls is None:
controls = _default_controls(ncontrols)
assert len(controls) == ncontrols
self._controls = np.array(controls, dtype=np.float32)
self.glsl_map = self._glsl_map_generator(self._controls)
super(Colormap, self).__init__(colors)
@property
def interpolation(self):
"""The interpolation mode of the colormap"""
return self._interpolation
@interpolation.setter
def interpolation(self, val):
if val not in _interpolation_info:
raise ValueError('The interpolation mode can only be one of: ' +
', '.join(sorted(_interpolation_info.keys())))
# Get the information of the interpolation mode.
info = _interpolation_info[val]
# Get the function that generates the GLSL map, as a function of the
# controls array.
self._glsl_map_generator = info['glsl_map']
# Number of controls as a function of the number of colors.
self._ncontrols = info['ncontrols']
# Python map function.
self._map_function = info['map']
self._interpolation = val
def map(self, x):
"""The Python mapping function from the [0,1] interval to a
list of rgba colors
Parameters
----------
x : array-like
The values to map.
Returns
-------
colors : list
List of rgba colors.
"""
return self._map_function(self.colors.rgba, x, self._controls)
class CubeHelixColormap(Colormap):
def __init__(self, start=0.5, rot=1, gamma=1.0, reverse=True, nlev=32,
minSat=1.2, maxSat=1.2, minLight=0., maxLight=1., **kwargs):
"""Cube helix colormap
A full implementation of Dave Green's "cubehelix" for Matplotlib.
Based on the FORTRAN 77 code provided in
D.A. Green, 2011, BASI, 39, 289.
http://adsabs.harvard.edu/abs/2011arXiv1108.5083G
User can adjust all parameters of the cubehelix algorithm.
This enables much greater flexibility in choosing color maps, while
always ensuring the color map scales in intensity from black
to white. A few simple examples:
Default color map settings produce the standard "cubehelix".
Create color map in only blues by setting rot=0 and start=0.
Create reverse (white to black) backwards through the rainbow once
by setting rot=1 and reverse=True.
Parameters
----------
start : scalar, optional
Sets the starting position in the color space. 0=blue, 1=red,
2=green. Defaults to 0.5.
rot : scalar, optional
The number of rotations through the rainbow. Can be positive
or negative, indicating direction of rainbow. Negative values
correspond to Blue->Red direction. Defaults to -1.5
gamma : scalar, optional
The gamma correction for intensity. Defaults to 1.0
reverse : boolean, optional
Set to True to reverse the color map. Will go from black to
white. Good for density plots where shade~density. Defaults to
False
nlev : scalar, optional
Defines the number of discrete levels to render colors at.
Defaults to 32.
sat : scalar, optional
The saturation intensity factor. Defaults to 1.2
NOTE: this was formerly known as "hue" parameter
minSat : scalar, optional
Sets the minimum-level saturation. Defaults to 1.2
maxSat : scalar, optional
Sets the maximum-level saturation. Defaults to 1.2
startHue : scalar, optional
Sets the starting color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in start parameter
endHue : scalar, optional
Sets the ending color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in rot parameter
minLight : scalar, optional
Sets the minimum lightness value. Defaults to 0.
maxLight : scalar, optional
Sets the maximum lightness value. Defaults to 1.
"""
super(CubeHelixColormap, self).__init__(
cubehelix(start=start, rot=rot, gamma=gamma, reverse=reverse,
nlev=nlev, minSat=minSat, maxSat=maxSat,
minLight=minLight, maxLight=maxLight, **kwargs))
class _Fire(BaseColormap):
colors = [(1.0, 1.0, 1.0, 1.0),
(1.0, 1.0, 0.0, 1.0),
(1.0, 0.0, 0.0, 1.0)]
glsl_map = """
vec4 fire(float t) {
return mix(mix($color_0, $color_1, t),
mix($color_1, $color_2, t*t), t);
}
"""
def map(self, t):
a, b, d = self.colors.rgba
c = _mix_simple(a, b, t)
e = _mix_simple(b, d, t**2)
return _mix_simple(c, e, t)
class _Grays(BaseColormap):
glsl_map = """
vec4 grays(float t) {
return vec4(t, t, t, 1.0);
}
"""
def map(self, t):
if isinstance(t, np.ndarray):
return np.hstack([t, t, t, np.ones(t.shape)]).astype(np.float32)
else:
return np.array([t, t, t, 1.0], dtype=np.float32)
class _Ice(BaseColormap):
glsl_map = """
vec4 ice(float t) {
return vec4(t, t, 1.0, 1.0);
}
"""
def map(self, t):
if isinstance(t, np.ndarray):
return np.hstack([t, t, np.ones(t.shape),
np.ones(t.shape)]).astype(np.float32)
else:
return np.array([t, t, 1.0, 1.0], dtype=np.float32)
class _Hot(BaseColormap):
colors = [(0., .33, .66, 1.0),
(.33, .66, 1., 1.0)]
glsl_map = """
vec4 hot(float t) {
return vec4(smoothstep($color_0.rgb, $color_1.rgb, vec3(t, t, t)),
1.0);
}
"""
def map(self, t):
rgba = self.colors.rgba
smoothed = smoothstep(rgba[0, :3], rgba[1, :3], t)
return np.hstack((smoothed, np.ones((len(t), 1))))
class _Winter(BaseColormap):
colors = [(0.0, 0.0, 1.0, 1.0),
(0.0, 1.0, 0.5, 1.0)]
glsl_map = """
vec4 winter(float t) {
return mix($color_0, $color_1, sqrt(t));
}
"""
def map(self, t):
return _mix_simple(self.colors.rgba[0],
self.colors.rgba[1],
np.sqrt(t))
class _SingleHue(Colormap):
"""A colormap which is solely defined by the given hue and value.
Given the color hue and value, this color map increases the saturation
of a color. The start color is almost white but still contains a hint of
the given color, and at the end the color is fully saturated.
Parameters
----------
hue : scalar, optional
The hue refers to a "true" color, without any shading or tinting.
Must be in the range [0, 360]. Defaults to 200 (blue).
saturation_range : array-like, optional
The saturation represents how "pure" a color is. Less saturation means
more white light mixed in the color. A fully saturated color means
the pure color defined by the hue. No saturation means completely
white. This colormap changes the saturation, and with this parameter
you can specify the lower and upper bound. Default is [0.2, 0.8].
value : scalar, optional
The value defines the "brightness" of a color: a value of 0.0 means
completely black while a value of 1.0 means the color defined by the
hue without shading. Must be in the range [0, 1.0]. The default value
is 1.0.
Notes
-----
For more information about the hue values see the `wikipedia page`_.
.. _wikipedia page: https://en.wikipedia.org/wiki/Hue
"""
def __init__(self, hue=200, saturation_range=[0.1, 0.8], value=1.0):
colors = ColorArray([
(hue, saturation_range[0], value),
(hue, saturation_range[1], value)
], color_space='hsv')
super(_SingleHue, self).__init__(colors)
class _HSL(Colormap):
"""A colormap which is defined by n evenly spaced points in
a circular color space.
This means that we change the hue value while keeping the
saturation and value constant.
Parameters
---------
n_colors : int, optional
The number of colors to generate.
hue_start : int, optional
The hue start value. Must be in the range [0, 360], the default is 0.
saturation : float, optional
The saturation component of the colors to generate. The default is
fully saturated (1.0). Must be in the range [0, 1.0].
value : float, optional
The value (brightness) component of the colors to generate. Must
be in the range [0, 1.0], and the default is 1.0
controls : array-like, optional
The list of control points for the colors to generate. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str, optional
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
"""
def __init__(self, ncolors=6, hue_start=0, saturation=1.0, value=1.0,
controls=None, interpolation='linear'):
hues = np.linspace(0, 360, ncolors + 1)[:-1]
hues += hue_start
hues %= 360
colors = ColorArray([(hue, saturation, value) for hue in hues],
color_space='hsv')
super(_HSL, self).__init__(colors, controls=controls,
interpolation=interpolation)
class _HUSL(Colormap):
"""A colormap which is defined by n evenly spaced points in
the HUSL hue space.
Parameters
---------
n_colors : int, optional
The number of colors to generate.
hue_start : int, optional
The hue start value. Must be in the range [0, 360], the default is 0.
saturation : float, optional
The saturation component of the colors to generate. The default is
fully saturated (1.0). Must be in the range [0, 1.0].
value : float, optional
The value component of the colors to generate or "brightness". Must
be in the range [0, 1.0], and the default is 0.7.
controls : array-like, optional
The list of control points for the colors to generate. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str, optional
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
Notes
-----
For more information about HUSL colors see http://husl-colors.org
"""
def __init__(self, ncolors=6, hue_start=0, saturation=1.0, value=0.7,
controls=None, interpolation='linear'):
hues = np.linspace(0, 360, ncolors + 1)[:-1]
hues += hue_start
hues %= 360
saturation *= 99
value *= 99
colors = ColorArray(
[husl_to_rgb(hue, saturation, value) for hue in hues],
)
super(_HUSL, self).__init__(colors, controls=controls,
interpolation=interpolation)
class _Diverging(Colormap):
def __init__(self, h_pos=20, h_neg=250, saturation=1.0, value=0.7,
center="light"):
saturation *= 99
value *= 99
start = husl_to_rgb(h_neg, saturation, value)
mid = ((0.133, 0.133, 0.133) if center == "dark" else
(0.92, 0.92, 0.92))
end = husl_to_rgb(h_pos, saturation, value)
colors = ColorArray([start, mid, end])
super(_Diverging, self).__init__(colors)
# https://github.com/matplotlib/matplotlib/pull/4707/files#diff-893cf0348279e9f4570488a7a297ab1eR774
# Taken from original Viridis colormap data in matplotlib implementation
# Sampled 128 points from the raw data-set of 256 samples.
# Sub sampled to 128 points since 256 points causes VisPy to freeze.
# HACK: Ideally, all 256 points should be included, with VisPy generating
# a 1D texture lookup for ColorMap, rather than branching code.
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
_colormaps = dict(
# Some colormap presets
autumn=Colormap([(1., 0., 0., 1.), (1., 1., 0., 1.)]),
blues=Colormap([(1., 1., 1., 1.), (0., 0., 1., 1.)]),
cool=Colormap([(0., 1., 1., 1.), (1., 0., 1., 1.)]),
greens=Colormap([(1., 1., 1., 1.), (0., 1., 0., 1.)]),
reds=Colormap([(1., 1., 1., 1.), (1., 0., 0., 1.)]),
spring=Colormap([(1., 0., 1., 1.), (1., 1., 0., 1.)]),
summer=Colormap([(0., .5, .4, 1.), (1., 1., .4, 1.)]),
fire=_Fire(),
grays=_Grays(),
hot=_Hot(),
ice=_Ice(),
winter=_Winter(),
light_blues=_SingleHue(),
orange=_SingleHue(hue=35),
viridis=Colormap(ColorArray(_viridis_data[::2])),
# Diverging presets
coolwarm=Colormap(ColorArray(
[
(226, 0.59, 0.92), (222, 0.44, 0.99), (218, 0.26, 0.97),
(30, 0.01, 0.87),
(20, 0.3, 0.96), (15, 0.5, 0.95), (8, 0.66, 0.86)
],
color_space="hsv"
)),
PuGr=_Diverging(145, 280, 0.85, 0.30),
GrBu=_Diverging(255, 133, 0.75, 0.6),
GrBu_d=_Diverging(255, 133, 0.75, 0.6, "dark"),
RdBu=_Diverging(220, 20, 0.75, 0.5),
# Configurable colormaps
cubehelix=CubeHelixColormap,
single_hue=_SingleHue,
hsl=_HSL,
husl=_HUSL,
diverging=_Diverging
)
def get_colormap(name, *args, **kwargs):
"""Obtain a colormap
Some colormaps can have additional configuration parameters. Refer to
their corresponding documentation for more information.
Parameters
----------
name : str | Colormap
Colormap name. Can also be a Colormap for pass-through.
Examples
--------
>>> get_colormap('autumn')
>>> get_colormap('single_hue', hue=10)
"""
if isinstance(name, BaseColormap):
cmap = name
else:
if not isinstance(name, string_types):
raise TypeError('colormap must be a Colormap or string name')
if name not in _colormaps:
raise KeyError('colormap name %s not found' % name)
cmap = _colormaps[name]
if inspect.isclass(cmap):
cmap = cmap(*args, **kwargs)
return cmap
def get_colormaps():
"""Return the list of colormap names."""
return _colormaps.copy()
| bsd-3-clause |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/core/indexes/multi.py | 1 | 133109 | from functools import wraps
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, index as libindex, lib
from pandas._libs.hashtable import duplicated_int64
from pandas._typing import AnyArrayLike, DtypeObj, Label, Scalar, Shape
from pandas.compat.numpy import function as nv
from pandas.errors import InvalidIndexError, PerformanceWarning, UnsortedIndexError
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.cast import coerce_indexer_dtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_categorical_dtype,
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame, ABCDatetimeIndex, ABCTimedeltaIndex
from pandas.core.dtypes.missing import array_equivalent, isna
import pandas.core.algorithms as algos
from pandas.core.arrays import Categorical
from pandas.core.arrays.categorical import factorize_from_iterables
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
_index_shared_docs,
ensure_index,
get_unanimous_names,
)
from pandas.core.indexes.frozen import FrozenList
from pandas.core.indexes.numeric import Int64Index
import pandas.core.missing as missing
from pandas.core.ops.invalid import make_invalid_op
from pandas.core.sorting import (
get_group_index,
indexer_from_factorized,
lexsort_indexer,
)
from pandas.io.formats.printing import (
format_object_attrs,
format_object_summary,
pprint_thing,
)
if TYPE_CHECKING:
from pandas import Series
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
{"klass": "MultiIndex", "target_klass": "MultiIndex or list of tuples"}
)
class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine):
"""
This class manages a MultiIndex by mapping label combinations to positive
integers.
"""
_base = libindex.UInt64Engine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine):
"""
This class manages those (extreme) cases in which the number of possible
label combinations overflows the 64 bits integers, and uses an ObjectEngine
containing Python integers.
"""
_base = libindex.ObjectEngine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one Python integer (each), in a
strictly monotonic way (i.e. respecting the lexicographic order of
integer combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
int, or 1-dimensional array of dtype object
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits. Since this can overflow uint64, first make sure we are
# working with Python integers:
codes = codes.astype("object") << self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer (per row):
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
def names_compat(meth):
"""
A decorator to allow either `name` or `names` keyword but not both.
This makes it easier to share code with base class.
"""
@wraps(meth)
def new_meth(self_or_cls, *args, **kwargs):
if "name" in kwargs and "names" in kwargs:
raise TypeError("Can only provide one of `names` and `name`")
elif "name" in kwargs:
kwargs["names"] = kwargs.pop("name")
return meth(self_or_cls, *args, **kwargs)
return new_meth
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes : sequence of arrays
Integers for each level designating which label at each location.
.. versionadded:: 0.24.0
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level).
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat).
copy : bool, default False
Copy the meta-data.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
Attributes
----------
names
levels
codes
nlevels
levshape
Methods
-------
from_arrays
from_tuples
from_product
from_frame
set_levels
set_codes
to_frame
to_flat_index
is_lexsorted
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
get_locs
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables.
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Index : The base pandas Index type.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`_
for more.
Examples
--------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
"""
_hidden_attrs = Index._hidden_attrs | frozenset()
# initialize to zero-length tuples to make everything work
_typ = "multiindex"
_names = FrozenList()
_levels = FrozenList()
_codes = FrozenList()
_comparables = ["names"]
rename = Index.set_names
sortorder: Optional[int]
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
levels=None,
codes=None,
sortorder=None,
names=None,
dtype=None,
copy=False,
name=None,
verify_integrity: bool = True,
):
# compat with Index
if name is not None:
names = name
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
if len(levels) != len(codes):
raise ValueError("Length of levels and codes must be the same.")
if len(levels) == 0:
raise ValueError("Must pass non-zero number of levels/codes")
result = object.__new__(MultiIndex)
result._cache = {}
# we've already validated levels and codes, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
result._names = [None] * len(levels)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
new_codes = result._verify_integrity()
result._codes = new_codes
result._reset_identity()
return result
def _validate_codes(self, level: List, code: List):
"""
Reassign code values as -1 if their corresponding levels are NaN.
Parameters
----------
code : list
Code to reassign.
level : list
Level to check for missing values (NaN, NaT, None).
Returns
-------
new code where code value = -1 if it corresponds
to a level with missing values (NaN, NaT, None).
"""
null_mask = isna(level)
if np.any(null_mask):
code = np.where(null_mask[code], -1, code)
return code
def _verify_integrity(
self, codes: Optional[List] = None, levels: Optional[List] = None
):
"""
Parameters
----------
codes : optional list
Codes to check for validity. Defaults to current codes.
levels : optional list
Levels to check for validity. Defaults to current levels.
Raises
------
ValueError
If length of levels and codes don't match, if the codes for any
level would exceed level bounds, or there are any duplicate levels.
Returns
-------
new codes where code value = -1 if it corresponds to a
NaN level.
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
codes = codes or self.codes
levels = levels or self.levels
if len(levels) != len(codes):
raise ValueError(
"Length of levels and codes must match. NOTE: "
"this index is in an inconsistent state."
)
codes_length = len(codes[0])
for i, (level, level_codes) in enumerate(zip(levels, codes)):
if len(level_codes) != codes_length:
raise ValueError(
f"Unequal code lengths: {[len(code_) for code_ in codes]}"
)
if len(level_codes) and level_codes.max() >= len(level):
raise ValueError(
f"On level {i}, code max ({level_codes.max()}) >= length of "
f"level ({len(level)}). NOTE: this index is in an "
"inconsistent state"
)
if len(level_codes) and level_codes.min() < -1:
raise ValueError(f"On level {i}, code value ({level_codes.min()}) < -1")
if not level.is_unique:
raise ValueError(
f"Level values must be unique: {list(level)} on level {i}"
)
if self.sortorder is not None:
if self.sortorder > self._lexsort_depth():
raise ValueError(
"Value for sortorder must be inferior or equal to actual "
f"lexsort_depth: sortorder {self.sortorder} "
f"with lexsort_depth {self._lexsort_depth()}"
)
codes = [
self._validate_codes(level, code) for level, code in zip(levels, codes)
]
new_codes = FrozenList(codes)
return new_codes
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=lib.no_default) -> "MultiIndex":
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
error_msg = "Input must be a list / sequence of array-likes."
if not is_list_like(arrays):
raise TypeError(error_msg)
elif is_iterator(arrays):
arrays = list(arrays)
# Check if elements of array are list-like
for array in arrays:
if not is_list_like(array):
raise TypeError(error_msg)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError("all arrays must be same length")
codes, levels = factorize_from_iterables(arrays)
if names is lib.no_default:
names = [getattr(arr, "name", None) for arr in arrays]
return cls(
levels=levels,
codes=codes,
sortorder=sortorder,
names=names,
verify_integrity=False,
)
@classmethod
@names_compat
def from_tuples(
cls,
tuples,
sortorder: Optional[int] = None,
names: Optional[Sequence[Label]] = None,
):
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> tuples = [(1, 'red'), (1, 'blue'),
... (2, 'red'), (2, 'blue')]
>>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
if not is_list_like(tuples):
raise TypeError("Input must be a list / sequence of tuple-likes.")
elif is_iterator(tuples):
tuples = list(tuples)
arrays: List[Sequence[Label]]
if len(tuples) == 0:
if names is None:
raise TypeError("Cannot infer number of levels from empty list")
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = tuples._values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrays = zip(*tuples)
return cls.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=lib.no_default):
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
.. versionchanged:: 1.0.0
If not explicitly provided, names will be inferred from the
elements of iterables if an element has a name attribute
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ['green', 'purple']
>>> pd.MultiIndex.from_product([numbers, colors],
... names=['number', 'color'])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
"""
from pandas.core.reshape.util import cartesian_product
if not is_list_like(iterables):
raise TypeError("Input must be a list / sequence of iterables.")
elif is_iterator(iterables):
iterables = list(iterables)
codes, levels = factorize_from_iterables(iterables)
if names is lib.no_default:
names = [getattr(it, "name", None) for it in iterables]
# codes are all ndarrays, so cartesian_product is lossless
codes = cartesian_product(codes)
return cls(levels, codes, sortorder=sortorder, names=names)
@classmethod
def from_frame(cls, df, sortorder=None, names=None):
"""
Make a MultiIndex from a DataFrame.
.. versionadded:: 0.24.0
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
sortorder : int, optional
Level of sortedness (must be lexicographically sorted by that
level).
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
... ['NJ', 'Temp'], ['NJ', 'Precip']],
... columns=['a', 'b'])
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> pd.MultiIndex.from_frame(df)
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> pd.MultiIndex.from_frame(df, names=['state', 'observation'])
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
"""
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
column_names, columns = zip(*df.items())
names = column_names if names is None else names
return cls.from_arrays(columns, sortorder=sortorder, names=names)
# --------------------------------------------------------------------
@cache_readonly
def _values(self):
# We override here, since our parent uses _data, which we don't use.
values = []
for i in range(self.nlevels):
vals = self._get_level_values(i)
if is_categorical_dtype(vals.dtype):
vals = vals._internal_get_values()
if isinstance(vals.dtype, ExtensionDtype) or isinstance(
vals, (ABCDatetimeIndex, ABCTimedeltaIndex)
):
vals = vals.astype(object)
vals = np.array(vals, copy=False)
values.append(vals)
arr = lib.fast_zip(values)
return arr
@property
def values(self):
return self._values
@property
def array(self):
"""
Raises a ValueError for `MultiIndex` because there's no single
array backing a MultiIndex.
Raises
------
ValueError
"""
raise ValueError(
"MultiIndex has no single backing array. Use "
"'MultiIndex.to_numpy()' to get a NumPy array of tuples."
)
@property
def shape(self) -> Shape:
"""
Return a tuple of the shape of the underlying data.
"""
# overriding the base Index.shape definition to avoid materializing
# the values (GH-27384, GH-27775)
return (len(self),)
def __len__(self) -> int:
return len(self.codes[0])
# --------------------------------------------------------------------
# Levels Methods
@cache_readonly
def levels(self):
# Use cache_readonly to ensure that self.get_locs doesn't repeatedly
# create new IndexEngine
# https://github.com/pandas-dev/pandas/issues/31648
result = [
x._shallow_copy(name=name) for x, name in zip(self._levels, self._names)
]
for level in result:
# disallow midx.levels[0].name = "foo"
level._no_setting_name = True
return FrozenList(result)
def _set_levels(
self,
levels,
level=None,
copy: bool = False,
validate: bool = True,
verify_integrity: bool = False,
) -> None:
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate:
if len(levels) == 0:
raise ValueError("Must set non-zero number of levels.")
if level is None and len(levels) != self.nlevels:
raise ValueError("Length of levels must match number of levels.")
if level is not None and len(levels) != len(level):
raise ValueError("Length of levels must match length of level.")
if level is None:
new_levels = FrozenList(
ensure_index(lev, copy=copy)._shallow_copy() for lev in levels
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_levels_list = list(self._levels)
for lev_num, lev in zip(level_numbers, levels):
new_levels_list[lev_num] = ensure_index(lev, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels_list)
if verify_integrity:
new_codes = self._verify_integrity(levels=new_levels)
self._codes = new_codes
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._reset_cache()
def set_levels(self, levels, level=None, inplace=None, verify_integrity=True):
"""
Set new levels on MultiIndex. Defaults to returning new index.
Parameters
----------
levels : sequence or list of sequence
New level(s) to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
.. deprecated:: 1.2.0
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc) or None
The same type as the caller or None if ``inplace=True``.
Examples
--------
>>> idx = pd.MultiIndex.from_tuples(
... [
... (1, "one"),
... (1, "two"),
... (2, "one"),
... (2, "two"),
... (3, "one"),
... (3, "two")
... ],
... names=["foo", "bar"]
... )
>>> idx
MultiIndex([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two'),
(3, 'one'),
(3, 'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2]])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b', 'c'], level=0)
MultiIndex([('a', 'one'),
('a', 'two'),
('b', 'one'),
('b', 'two'),
('c', 'one'),
('c', 'two')],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b'], level='bar')
MultiIndex([(1, 'a'),
(1, 'b'),
(2, 'a'),
(2, 'b'),
(3, 'a'),
(3, 'b')],
names=['foo', 'bar'])
If any of the levels passed to ``set_levels()`` exceeds the
existing length, all of the values from that argument will
be stored in the MultiIndex levels, though the values will
be truncated in the MultiIndex output.
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels
FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])
"""
if inplace is not None:
warnings.warn(
"inplace is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
else:
inplace = False
if is_list_like(levels) and not isinstance(levels, Index):
levels = list(levels)
if level is not None and not is_list_like(level):
if not is_list_like(levels):
raise TypeError("Levels must be list-like")
if is_list_like(levels[0]):
raise TypeError("Levels must be list-like")
level = [level]
levels = [levels]
elif level is None or is_list_like(level):
if not is_list_like(levels) or not is_list_like(levels[0]):
raise TypeError("Levels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_levels(
levels, level=level, validate=True, verify_integrity=verify_integrity
)
if not inplace:
return idx
@property
def nlevels(self) -> int:
"""
Integer number of levels in this MultiIndex.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.nlevels
3
"""
return len(self._levels)
@property
def levshape(self):
"""
A tuple with the length of each level.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.levshape
(1, 1, 1)
"""
return tuple(len(x) for x in self.levels)
# --------------------------------------------------------------------
# Codes Methods
@property
def codes(self):
return self._codes
def _set_codes(
self,
codes,
level=None,
copy: bool = False,
validate: bool = True,
verify_integrity: bool = False,
) -> None:
if validate:
if level is None and len(codes) != self.nlevels:
raise ValueError("Length of codes must match number of levels")
if level is not None and len(codes) != len(level):
raise ValueError("Length of codes must match length of levels.")
if level is None:
new_codes = FrozenList(
_coerce_indexer_frozen(level_codes, lev, copy=copy).view()
for lev, level_codes in zip(self._levels, codes)
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_codes_list = list(self._codes)
for lev_num, level_codes in zip(level_numbers, codes):
lev = self.levels[lev_num]
new_codes_list[lev_num] = _coerce_indexer_frozen(
level_codes, lev, copy=copy
)
new_codes = FrozenList(new_codes_list)
if verify_integrity:
new_codes = self._verify_integrity(codes=new_codes)
self._codes = new_codes
self._reset_cache()
def set_codes(self, codes, level=None, inplace=None, verify_integrity=True):
"""
Set new codes on MultiIndex. Defaults to returning new index.
.. versionadded:: 0.24.0
New name for deprecated method `set_labels`.
Parameters
----------
codes : sequence or list of sequence
New codes to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
.. deprecated:: 1.2.0
verify_integrity : bool (default True)
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc) or None
The same type as the caller or None if ``inplace=True``.
Examples
--------
>>> idx = pd.MultiIndex.from_tuples(
... [(1, "one"), (1, "two"), (2, "one"), (2, "two")], names=["foo", "bar"]
... )
>>> idx
MultiIndex([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([1, 0, 1, 0], level=0)
MultiIndex([(2, 'one'),
(1, 'two'),
(2, 'one'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([0, 0, 1, 1], level='bar')
MultiIndex([(1, 'one'),
(1, 'one'),
(2, 'two'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
"""
if inplace is not None:
warnings.warn(
"inplace is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
else:
inplace = False
if level is not None and not is_list_like(level):
if not is_list_like(codes):
raise TypeError("Codes must be list-like")
if is_list_like(codes[0]):
raise TypeError("Codes must be list-like")
level = [level]
codes = [codes]
elif level is None or is_list_like(level):
if not is_list_like(codes) or not is_list_like(codes[0]):
raise TypeError("Codes must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_codes(codes, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# --------------------------------------------------------------------
# Index Internals
@cache_readonly
def _engine(self):
# Calculate the number of bits needed to represent labels in each
# level, as log2 of their sizes (including -1 for NaN):
sizes = np.ceil(np.log2([len(level) + 1 for level in self.levels]))
# Sum bit counts, starting from the _right_....
lev_bits = np.cumsum(sizes[::-1])[::-1]
# ... in order to obtain offsets such that sorting the combination of
# shifted codes (one for each level, resulting in a unique integer) is
# equivalent to sorting lexicographically the codes themselves. Notice
# that each level needs to be shifted by the number of bits needed to
# represent the _previous_ ones:
offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64")
# Check the total number of bits needed for our representation:
if lev_bits[0] > 64:
# The levels would overflow a 64 bit uint - use Python integers:
return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
return MultiIndexUIntEngine(self.levels, self.codes, offsets)
@property
def _constructor(self):
return type(self).from_tuples
@doc(Index._shallow_copy)
def _shallow_copy(self, values=None, name=lib.no_default):
names = name if name is not lib.no_default else self.names
if values is not None:
return type(self).from_tuples(values, sortorder=None, names=names)
result = type(self)(
levels=self.levels,
codes=self.codes,
sortorder=None,
names=names,
verify_integrity=False,
)
result._cache = self._cache.copy()
result._cache.pop("levels", None) # GH32669
return result
# --------------------------------------------------------------------
def copy(
self,
names=None,
dtype=None,
levels=None,
codes=None,
deep=False,
name=None,
):
"""
Make a copy of this object. Names, dtype, levels and codes can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
.. deprecated:: 1.2.0
levels : sequence, optional
.. deprecated:: 1.2.0
codes : sequence, optional
.. deprecated:: 1.2.0
deep : bool, default False
name : Label
Kept for compatibility with 1-dimensional Index. Should not be used.
Returns
-------
MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
names = self._validate_names(name=name, names=names, deep=deep)
if levels is not None:
warnings.warn(
"parameter levels is deprecated and will be removed in a future "
"version. Use the set_levels method instead.",
FutureWarning,
stacklevel=2,
)
if codes is not None:
warnings.warn(
"parameter codes is deprecated and will be removed in a future "
"version. Use the set_codes method instead.",
FutureWarning,
stacklevel=2,
)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if codes is None:
codes = deepcopy(self.codes)
levels = levels if levels is not None else self.levels
codes = codes if codes is not None else self.codes
new_index = type(self)(
levels=levels,
codes=codes,
sortorder=self.sortorder,
names=names,
verify_integrity=False,
)
new_index._cache = self._cache.copy()
new_index._cache.pop("levels", None) # GH32669
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def __array__(self, dtype=None) -> np.ndarray:
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
@doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
return True
except (LookupError, TypeError, ValueError):
return False
@cache_readonly
def dtype(self) -> np.dtype:
return np.dtype("O")
def _is_memory_usage_qualified(self) -> bool:
""" return a boolean if we need a qualified .info display """
def f(level):
return "mixed" in level or "string" in level or "unicode" in level
return any(f(level) for level in self._inferred_type_levels)
@doc(Index.memory_usage)
def memory_usage(self, deep: bool = False) -> int:
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation unnecessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self) -> int:
""" return the number of bytes in the underlying data """
return self._nbytes(False)
def _nbytes(self, deep: bool = False) -> int:
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.codes)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# --------------------------------------------------------------------
# Rendering Methods
def _formatter_func(self, tup):
"""
Formats each item in tup according to its level's formatter function.
"""
formatter_funcs = [level._formatter_func for level in self.levels]
return tuple(func(val) for func, val in zip(formatter_funcs, tup))
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
return format_object_summary(
self, self._formatter_func, name=name, line_break_each_value=True
)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
return format_object_attrs(self, include_dtype=False)
def _format_native_types(self, na_rep="nan", **kwargs):
new_levels = []
new_codes = []
# go through the levels and format them
for level, level_codes in zip(self.levels, self.codes):
level = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = level_codes == -1
if mask.any():
nan_index = len(level)
level = np.append(level, na_rep)
assert not level_codes.flags.writeable # i.e. copy is needed
level_codes = level_codes.copy() # make writeable
level_codes[mask] = nan_index
new_levels.append(level)
new_codes.append(level_codes)
if len(new_levels) == 1:
# a single-level multi-index
return Index(new_levels[0].take(new_codes[0]))._format_native_types()
else:
# reconstruct the multi-index
mi = MultiIndex(
levels=new_levels,
codes=new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
return mi._values
def format(
self,
name: Optional[bool] = None,
formatter: Optional[Callable] = None,
na_rep: Optional[str] = None,
names: bool = False,
space: int = 2,
sparsify=None,
adjoin: bool = True,
) -> List:
if name is not None:
names = name
if len(self) == 0:
return []
stringified_levels = []
for lev, level_codes in zip(self.levels, self.codes):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(level_codes).format(formatter=formatter)
# we have some NA
mask = level_codes == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [
pprint_thing(na if isna(x) else x, escape_chars=("\t", "\r", "\n"))
for x in algos.take_1d(lev._values, level_codes)
]
stringified_levels.append(formatted)
result_levels = []
for lev, lev_name in zip(stringified_levels, self.names):
level = []
if names:
level.append(
pprint_thing(lev_name, escape_chars=("\t", "\r", "\n"))
if lev_name is not None
else ""
)
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ""
# GH3547 use value of sparsify as sentinel if it's "Falsey"
assert isinstance(sparsify, bool) or sparsify is lib.no_default
if sparsify in [False, lib.no_default]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = sparsify_labels(
result_levels, start=int(names), sentinel=sentinel
)
if adjoin:
from pandas.io.formats.format import get_adjustment
adj = get_adjustment()
return adj.adjoin(space, *result_levels).split("\n")
else:
return result_levels
# --------------------------------------------------------------------
# Names Methods
def _get_names(self):
return FrozenList(self._names)
def _set_names(self, names, level=None, validate=True):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : boolean, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError("Names should be list-like for a MultiIndex")
names = list(names)
if validate:
if level is not None and len(names) != len(level):
raise ValueError("Length of names must match length of level.")
if level is None and len(names) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(lev) for lev in level]
# set the name
for lev, name in zip(level, names):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError(
f"{type(self).__name__}.name must be a hashable type"
)
# pandas\core\indexes\multi.py:1448: error: Cannot determine type
# of '__setitem__' [has-type]
self._names[lev] = name # type: ignore[has-type]
# If .levels has been accessed, the names in our cache will be stale.
self._reset_cache()
names = property(
fset=_set_names,
fget=_get_names,
doc="""
Names of levels in MultiIndex.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
names=['x', 'y', 'z'])
>>> mi.names
FrozenList(['x', 'y', 'z'])
""",
)
# --------------------------------------------------------------------
@doc(Index._get_grouper_for_level)
def _get_grouper_for_level(self, mapper, level):
indexer = self.codes[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
codes, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_codes, uniques = algos.factorize(indexer[mask], sort=True)
codes = np.empty(len(indexer), dtype=indexer.dtype)
codes[mask] = ok_codes
codes[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
else:
# break references back to us so that setting the name
# on the output of a groupby doesn't reflect back here.
level_index = level_index.copy()
if level_index._can_hold_na:
grouper = level_index.take(codes, fill_value=True)
else:
grouper = level_index.take(codes)
return grouper, codes, level_index
@cache_readonly
def inferred_type(self) -> str:
return "mixed"
def _get_level_number(self, level) -> int:
count = self.names.count(level)
if (count > 1) and not is_integer(level):
raise ValueError(
f"The name {level} occurs multiple times, use a level number"
)
try:
level = self.names.index(level)
except ValueError as err:
if not is_integer(level):
raise KeyError(f"Level {level} not found") from err
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"{orig_level} is not a valid level number"
) from err
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"not {level + 1}"
) from err
return level
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
if any(-1 in code for code in self.codes):
return False
if all(level.is_monotonic for level in self.levels):
# If each level is sorted, we can operate on the codes directly. GH27495
return libalgos.is_lexsorted(
[x.astype("int64", copy=False) for x in self.codes]
)
# reversed() because lexsort() wants the most significant key last.
values = [
self._get_level_values(i)._values for i in reversed(range(len(self.levels)))
]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self._values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
@cache_readonly
def _inferred_type_levels(self):
""" return a list of the inferred types, one for each level """
return [i.inferred_type for i in self.levels]
@doc(Index.duplicated)
def duplicated(self, keep="first"):
shape = map(len, self.levels)
ids = get_group_index(self.codes, shape, sort=False, xnull=False)
return duplicated_int64(ids, keep)
def fillna(self, value=None, downcast=None):
"""
fillna is not implemented for MultiIndex
"""
raise NotImplementedError("isna is not defined for MultiIndex")
@doc(Index.dropna)
def dropna(self, how="any"):
nans = [level_codes == -1 for level_codes in self.codes]
if how == "any":
indexer = np.any(nans, axis=0)
elif how == "all":
indexer = np.all(nans, axis=0)
else:
raise ValueError(f"invalid how option: {how}")
new_codes = [level_codes[~indexer] for level_codes in self.codes]
return self.set_codes(codes=new_codes)
def _get_level_values(self, level, unique=False):
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int level
unique : bool, default False
if True, drop duplicated values
Returns
-------
values : ndarray
"""
lev = self.levels[level]
level_codes = self.codes[level]
name = self._names[level]
if unique:
level_codes = algos.unique(level_codes)
filled = algos.take_1d(lev._values, level_codes, fill_value=lev._na_value)
return lev._shallow_copy(filled, name=name)
def get_level_values(self, level):
"""
Return vector of label values for requested level.
Length of returned vector is equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
Values is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
--------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
@doc(Index.unique)
def unique(self, level=None):
if level is None:
return super().unique()
else:
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
def to_frame(self, index=True, name=None):
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
.. versionadded:: 0.24.0
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of str, optional
The passed names should substitute index level names.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
See Also
--------
DataFrame : Two-dimensional, size-mutable, potentially heterogeneous
tabular data.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a', 'b'], ['c', 'd']])
>>> mi
MultiIndex([('a', 'c'),
('b', 'd')],
)
>>> df = mi.to_frame()
>>> df
0 1
a c a c
b d b d
>>> df = mi.to_frame(index=False)
>>> df
0 1
0 a c
1 b d
>>> df = mi.to_frame(name=['x', 'y'])
>>> df
x y
a c a c
b d b d
"""
from pandas import DataFrame
if name is not None:
if not is_list_like(name):
raise TypeError("'name' must be a list / sequence of column names.")
if len(name) != len(self.levels):
raise ValueError(
"'name' should have same length as number of levels on index."
)
idx_names = name
else:
idx_names = self.names
# Guarantee resulting column order - PY36+ dict maintains insertion order
result = DataFrame(
{
(level if lvlname is None else lvlname): self._get_level_values(level)
for lvlname, level in zip(idx_names, range(len(self.levels)))
},
copy=False,
)
if index:
result.index = self
return result
def to_flat_index(self):
"""
Convert a MultiIndex to an Index of Tuples containing the level values.
.. versionadded:: 0.24.0
Returns
-------
pd.Index
Index with the MultiIndex data represented in Tuples.
Notes
-----
This method will simply return the caller if called by anything other
than a MultiIndex.
Examples
--------
>>> index = pd.MultiIndex.from_product(
... [['foo', 'bar'], ['baz', 'qux']],
... names=['a', 'b'])
>>> index.to_flat_index()
Index([('foo', 'baz'), ('foo', 'qux'),
('bar', 'baz'), ('bar', 'qux')],
dtype='object')
"""
return Index(self._values, tupleize_cols=False)
@property
def _is_all_dates(self) -> bool:
return False
def is_lexsorted(self) -> bool:
"""
Return True if the codes are lexicographically sorted.
Returns
-------
bool
Examples
--------
In the below examples, the first level of the MultiIndex is sorted because
a<b<c, so there is no need to look at the next level.
>>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'e', 'f']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'f', 'e']]).is_lexsorted()
True
In case there is a tie, the lexicographical sorting looks
at the next level of the MultiIndex.
>>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'b', 'c']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'c', 'b']]).is_lexsorted()
False
>>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],
... ['aa', 'bb', 'aa', 'bb']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],
... ['bb', 'aa', 'aa', 'bb']]).is_lexsorted()
False
"""
return self.lexsort_depth == self.nlevels
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
return self.sortorder
return self._lexsort_depth()
def _lexsort_depth(self) -> int:
"""
Compute and return the lexsort_depth, the number of levels of the
MultiIndex that are sorted lexically
Returns
-------
int
"""
int64_codes = [ensure_int64(level_codes) for level_codes in self.codes]
for k in range(self.nlevels, 0, -1):
if libalgos.is_lexsorted(int64_codes[:k]):
return k
return 0
def _sort_levels_monotonic(self):
"""
This is an *internal* function.
Create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.sort_values()
MultiIndex([('a', 'aa'),
('a', 'bb'),
('b', 'aa'),
('b', 'bb')],
)
"""
if self.is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_codes = []
for lev, level_codes in zip(self.levels, self.codes):
if not lev.is_monotonic:
try:
# indexer to reorder the levels
indexer = lev.argsort()
except TypeError:
pass
else:
lev = lev.take(indexer)
# indexer to reorder the level codes
indexer = ensure_int64(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
level_codes = algos.take_1d(ri, level_codes)
new_levels.append(lev)
new_codes.append(level_codes)
return MultiIndex(
new_levels,
new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def remove_unused_levels(self):
"""
Create new MultiIndex from current that removes unused levels.
Unused level(s) means levels that are not expressed in the
labels. The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will
also be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_product([range(2), list('ab')])
>>> mi
MultiIndex([(0, 'a'),
(0, 'b'),
(1, 'a'),
(1, 'b')],
)
>>> mi[2:]
MultiIndex([(1, 'a'),
(1, 'b')],
)
The 0 from the first level is not represented
and can be removed
>>> mi2 = mi[2:].remove_unused_levels()
>>> mi2.levels
FrozenList([[1], ['a', 'b']])
"""
new_levels = []
new_codes = []
changed = False
for lev, level_codes in zip(self.levels, self.codes):
# Since few levels are typically unused, bincount() is more
# efficient than unique() - however it only accepts positive values
# (and drops order):
uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
# We have unused levels
changed = True
# Recalculate uniques, now preserving order.
# Can easily be cythonized by exploiting the already existing
# "uniques" and stop parsing "level_codes" when all items
# are found:
uniques = algos.unique(level_codes)
if has_na:
na_idx = np.where(uniques == -1)[0]
# Just ensure that -1 is in first position:
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
# codes get mapped from uniques to 0:len(uniques)
# -1 (if present) is mapped to last position
code_mapping = np.zeros(len(lev) + has_na)
# ... and reassigned value -1:
code_mapping[uniques] = np.arange(len(uniques)) - has_na
level_codes = code_mapping[level_codes]
# new levels are simple
lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_codes.append(level_codes)
result = self.view()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_codes(new_codes, validate=False)
return result
# --------------------------------------------------------------------
# Pickling Methods
def __reduce__(self):
"""Necessary for making this object picklable"""
d = {
"levels": list(self.levels),
"codes": list(self.codes),
"sortorder": self.sortorder,
"names": list(self.names),
}
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
def __getitem__(self, key):
if is_scalar(key):
key = com.cast_scalar_indexer(key, warn_float=True)
retval = []
for lev, level_codes in zip(self.levels, self.codes):
if level_codes[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[level_codes[key]])
return tuple(retval)
else:
if com.is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
sortorder = self.sortorder
else:
# cannot be sure whether the result will be sorted
sortorder = None
if isinstance(key, Index):
key = np.asarray(key)
new_codes = [level_codes[key] for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take((), kwargs)
indices = ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)
na_value = -1
if allow_fill:
taken = [lab.take(indices) for lab in self.codes]
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label
label_values[mask] = na_value
masked.append(np.asarray(label_values))
taken = masked
else:
taken = [lab.take(indices) for lab in self.codes]
return MultiIndex(
levels=self.levels, codes=taken, names=self.names, verify_integrity=False
)
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all(
(isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other
):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self._values,) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except (TypeError, IndexError):
return Index(new_tuples)
def argsort(self, *args, **kwargs) -> np.ndarray:
return self._values.argsort(*args, **kwargs)
@Appender(_index_shared_docs["repeat"] % _index_doc_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat((), {"axis": axis})
repeats = ensure_platform_int(repeats)
return MultiIndex(
levels=self.levels,
codes=[
level_codes.view(np.ndarray).astype(np.intp).repeat(repeats)
for level_codes in self.codes
],
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def where(self, cond, other=None):
raise NotImplementedError(".where is not supported for MultiIndex operations")
def drop(self, codes, level=None, errors="raise"):
"""
Make new MultiIndex with passed list of codes deleted
Parameters
----------
codes : array-like
Must be a list of tuples when level is not specified
level : int or level name, default None
errors : str, default 'raise'
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(codes, level, errors)
if not isinstance(codes, (np.ndarray, Index)):
try:
codes = com.index_labels_to_array(codes, dtype=object)
except ValueError:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
step = loc.step if loc.step is not None else 1
inds.extend(range(loc.start, loc.stop, step))
elif com.is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn(
"dropping on a non-lexsorted multi-index "
"without a level parameter may impact performance.",
PerformanceWarning,
stacklevel=3,
)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = f"unsupported indexer of type {type(loc)}"
raise AssertionError(msg)
except KeyError:
if errors != "ignore":
raise
return self.delete(inds)
def _drop_from_level(self, codes, level, errors="raise"):
codes = com.index_labels_to_array(codes)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(codes)
# If nan should be dropped it will equal -1 here. We have to check which values
# are not nan and equal -1, this means they are missing in the index
nan_codes = isna(codes)
values[(np.equal(nan_codes, False)) & (values == -1)] = -2
if index.shape[0] == self.shape[0]:
values[np.equal(nan_codes, True)] = -2
not_found = codes[values == -2]
if len(not_found) != 0 and errors != "ignore":
raise KeyError(f"labels {not_found} not found in level")
mask = ~algos.isin(self.codes[i], values)
return self[mask]
def swaplevel(self, i=-2, j=-1):
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int, str, default -2
First level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
j : int, str, default -1
Second level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
Returns
-------
MultiIndex
A new MultiIndex.
See Also
--------
Series.swaplevel : Swap levels i and j in a MultiIndex.
Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a
particular axis.
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.swaplevel(0, 1)
MultiIndex([('bb', 'a'),
('aa', 'a'),
('bb', 'b'),
('aa', 'b')],
)
"""
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_codes[i], new_codes[j] = new_codes[j], new_codes[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def reorder_levels(self, order):
"""
Rearrange levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([[1, 2], [3, 4]], names=['x', 'y'])
>>> mi
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.reorder_levels(order=[1, 0])
MultiIndex([(3, 1),
(4, 2)],
names=['y', 'x'])
>>> mi.reorder_levels(order=['y', 'x'])
MultiIndex([(3, 1),
(4, 2)],
names=['y', 'x'])
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(
f"Length of order must be same as number of levels ({self.nlevels}), "
f"got {len(order)}"
)
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def _get_codes_for_sorting(self):
"""
we are categorizing our codes by using the
available categories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
def cats(level_codes):
return np.arange(
np.array(level_codes).max() + 1 if len(level_codes) else 0,
dtype=level_codes.dtype,
)
return [
Categorical.from_codes(level_codes, cats(level_codes), ordered=True)
for level_codes in self.codes
]
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level.
The result will respect the original ordering of the associated
factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level.
If list-like must be names or ints of levels.
ascending : bool, default True
False to sort in descending order.
Can also be a list to specify a directed ordering.
sort_remaining : sort by the remaining levels after level
Returns
-------
sorted_index : pd.MultiIndex
Resulting index.
indexer : np.ndarray
Indices of output values in original index.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([[0, 0], [2, 1]])
>>> mi
MultiIndex([(0, 2),
(0, 1)],
)
>>> mi.sortlevel()
(MultiIndex([(0, 1),
(0, 2)],
), array([1, 0]))
>>> mi.sortlevel(sort_remaining=False)
(MultiIndex([(0, 2),
(0, 1)],
), array([0, 1]))
>>> mi.sortlevel(1)
(MultiIndex([(0, 1),
(0, 2)],
), array([1, 0]))
>>> mi.sortlevel(1, ascending=False)
(MultiIndex([(0, 2),
(0, 1)],
), array([0, 1]))
"""
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
indexer = lexsort_indexer(
[self.codes[lev] for lev in level], orders=ascending
)
# level ordering
else:
codes = list(self.codes)
shape = list(self.levshape)
# partition codes and shape
primary = tuple(codes[lev] for lev in level)
primshp = tuple(shape[lev] for lev in level)
# Reverse sorted to retain the order of
# smaller indices that needs to be removed
for lev in sorted(level, reverse=True):
codes.pop(lev)
shape.pop(lev)
if sort_remaining:
primary += primary + tuple(codes)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp, compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = ensure_platform_int(indexer)
new_codes = [level_codes.take(indexer) for level_codes in self.codes]
new_index = MultiIndex(
codes=new_codes,
levels=self.levels,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
return new_index, indexer
def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray or None
Indices of output values in original index.
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, "names")
if level is not None:
if method is not None:
raise TypeError("Fill method not supported if level passed")
# GH7774: preserve dtype/tz if target is empty and not an Index.
# target may be an iterator
target = ibase.ensure_has_len(target)
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop("freq", None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs)
else:
target = ensure_index(target)
target, indexer, _ = self._join_level(
target, level, how="right", return_indexers=True, keep_order=False
)
else:
target = ensure_index(target)
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
else:
raise ValueError("cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (
preserve_names
and target.nlevels == self.nlevels
and target.names != self.names
):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
# --------------------------------------------------------------------
# Indexing Methods
def _check_indexing_error(self, key):
if not is_hashable(key) or is_iterator(key):
# We allow tuples if they are hashable, whereas other Index
# subclasses require scalar.
# We have to explicitly exclude generators, as these are hashable.
raise InvalidIndexError(key)
def _should_fallback_to_positional(self) -> bool:
"""
Should integer key(s) be treated as positional?
"""
# GH#33355
return self.levels[0]._should_fallback_to_positional()
def _get_values_for_loc(self, series: "Series", loc, key):
"""
Do a positional lookup on the given Series, returning either a scalar
or a Series.
Assumes that `series.index is self`
"""
new_values = series._values[loc]
if is_scalar(loc):
return new_values
if len(new_values) == 1 and not self.nlevels > 1:
# If more than one level left, we can not return a scalar
return new_values[0]
new_index = self[loc]
new_index = maybe_droplevels(new_index, key)
new_ser = series._constructor(new_values, index=new_index, name=series.name)
return new_ser.__finalize__(series)
def _convert_listlike_indexer(self, keyarr):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
indexer, keyarr = super()._convert_listlike_indexer(keyarr)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0], tuple):
level = 0
_, indexer = self.reindex(keyarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(self))
check = self.levels[0].get_indexer(keyarr)
mask = check == -1
if mask.any():
raise KeyError(f"{keyarr[mask]} not in index")
return indexer, keyarr
def _get_partial_string_timestamp_match_key(self, key):
"""
Translate any partial string timestamp matches in key, returning the
new key.
Only relevant for MultiIndex.
"""
# GH#10331
if isinstance(key, str) and self.levels[0]._supports_partial_string_indexing:
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = (key,) + (slice(None),) * (len(self.levels) - 1)
if isinstance(key, tuple):
# Convert (..., '2016-01-01', ...) in tuple to
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
if (
isinstance(component, str)
and self.levels[i]._supports_partial_string_indexing
):
new_key.append(slice(component, component, None))
else:
new_key.append(component)
key = tuple(new_key)
return key
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = ensure_index(target)
# empty indexer
if is_list_like(target) and not len(target):
return ensure_platform_int(np.array([]))
if not isinstance(target, MultiIndex):
try:
target = MultiIndex.from_tuples(target)
except (TypeError, ValueError):
# let's instead try with a straight Index
if method is None:
return Index(self._values).get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
if not self.is_unique:
raise ValueError("Reindexing only valid with uniquely valued Index objects")
if method == "pad" or method == "backfill":
if tolerance is not None:
raise NotImplementedError(
"tolerance not implemented yet for MultiIndex"
)
indexer = self._engine.get_indexer(
values=self._values, target=target, method=method, limit=limit
)
elif method == "nearest":
raise NotImplementedError(
"method='nearest' not implemented yet "
"for MultiIndex; see GitHub issue 9365"
)
else:
indexer = self._engine.get_indexer(target)
return ensure_platform_int(indexer)
def get_slice_bound(
self, label: Union[Hashable, Sequence[Hashable]], side: str, kind: str
) -> int:
"""
For an ordered MultiIndex, compute slice bound
that corresponds to given label.
Returns leftmost (one-past-the-rightmost if `side=='right') position
of given label.
Parameters
----------
label : object or tuple of objects
side : {'left', 'right'}
kind : {'loc', 'getitem'}
Returns
-------
int
Index of label.
Notes
-----
This method only works if level 0 index of the MultiIndex is lexsorted.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbc'), list('gefd')])
Get the locations from the leftmost 'b' in the first level
until the end of the multiindex:
>>> mi.get_slice_bound('b', side="left", kind="loc")
1
Like above, but if you get the locations from the rightmost
'b' in the first level and 'f' in the second level:
>>> mi.get_slice_bound(('b','f'), side="right", kind="loc")
3
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
if not isinstance(label, tuple):
label = (label,)
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super().slice_locs(start, end, step, kind=kind)
def _partial_tup_index(self, tup, side="left"):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth "
f"({self.lexsort_depth})"
)
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.codes)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev and not isna(lab):
if not lev.is_type_compatible(lib.infer_dtype([lab], skipna=False)):
raise TypeError(f"Level type mismatch: {lab}")
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == "right" and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = self._get_loc_single_level_index(lev, lab)
if isinstance(idx, slice) and k < n - 1:
# Get start and end value from slice, necessary when a non-integer
# interval is given as input GH#37707
start = idx.start
end = idx.stop
elif k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
elif isinstance(idx, slice):
idx = idx.start
return start + section.searchsorted(idx, side=side)
else:
return start + section.searchsorted(idx, side=side)
def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:
"""
If key is NA value, location of index unify as -1.
Parameters
----------
level_index: Index
key : label
Returns
-------
loc : int
If key is NA value, loc is -1
Else, location of key in index.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
"""
if is_scalar(key) and isna(key):
return -1
else:
return level_index.get_loc(key)
def get_loc(self, key, method=None):
"""
Get location for a label or a tuple of labels.
The location is returned as an integer/slice or boolean
mask.
Parameters
----------
key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Notes
-----
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_loc('b')
slice(1, 3, None)
>>> mi.get_loc(('b', 'e'))
1
"""
if method is not None:
raise NotImplementedError(
"only the default get_loc method is "
"currently supported for MultiIndex"
)
hash(key)
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != np.intp:
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype="bool")
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError(
f"Key length ({keylen}) exceeds index depth ({self.nlevels})"
)
if keylen == self.nlevels and self.is_unique:
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self.lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = (
self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))
)
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn(
"indexing past lexsort depth may impact performance.",
PerformanceWarning,
stacklevel=10,
)
loc = np.arange(start, stop, dtype=np.intp)
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self._get_loc_single_level_index(
self.levels[i], k
)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)
def get_loc_level(self, key, level=0, drop_level: bool = True):
"""
Get location and sliced index for requested label(s)/level(s).
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
If ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False]), Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
"""
if not isinstance(level, (list, tuple)):
level = self._get_level_number(level)
else:
level = [self._get_level_number(lev) for lev in level]
return self._get_loc_level(key, level=level, drop_level=drop_level)
def _get_loc_level(
self, key, level: Union[int, List[int]] = 0, drop_level: bool = True
):
"""
get_loc_level but with `level` known to be positional, not name-based.
"""
# different name to distinguish from maybe_droplevels
def maybe_mi_droplevels(indexer, levels, drop_level: bool):
if not drop_level:
return self[indexer]
# kludge around
orig_index = new_index = self[indexer]
for i in sorted(levels, reverse=True):
try:
new_index = new_index._drop_level_numbers([i])
except ValueError:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError(
"Key for location must have same length as number of levels"
)
result = None
for lev, k in zip(level, key):
loc, new_index = self._get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_mi_droplevels(result, level, drop_level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_mi_droplevels(indexer, [0], drop_level)
return indexer, new_index
except (TypeError, InvalidIndexError):
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [
i for i in range(len(key)) if key[i] != slice(None, None)
]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
if len(key) == self.nlevels and self.is_unique:
# Complete key in unique index -> standard get_loc
try:
return (self._engine.get_loc(key), None)
except KeyError as e:
raise KeyError(key) from e
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_mi_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level: int = 0, indexer=None):
# `level` kwarg is _always_ positional, never name
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
level_codes = self.codes[level]
def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
# given the inputs and the codes/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
if step is not None and step < 0:
# Switch elements for negative step size
start, stop = stop - 1, start - 1
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(codes):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = codes.take(ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)
m = np.asarray(m)
else:
m = np.zeros(len(codes), dtype=bool)
m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returning a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
elif isinstance(start, slice):
stop = len(level_index)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(
key.start, key.stop, key.step, kind="loc"
)
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
start = getattr(start, "start", start)
stop = getattr(stop, "stop", stop)
return convert_indexer(start, stop, step)
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = level_codes.searchsorted(start, side="left")
j = level_codes.searchsorted(stop, side="right")
return slice(i, j, step)
else:
idx = self._get_loc_single_level_index(level_index, key)
if level > 0 or self.lexsort_depth == 0:
# Desired level is not sorted
locs = np.array(level_codes == idx, dtype=bool, copy=False)
if not locs.any():
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return locs
if isinstance(idx, slice):
start = idx.start
end = idx.stop
else:
start = level_codes.searchsorted(idx, side="left")
end = level_codes.searchsorted(idx, side="right")
if start == end:
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return slice(start, end)
def get_locs(self, seq):
"""
Get location for a sequence of labels.
Parameters
----------
seq : label, slice, list, mask or a sequence of such
You should use one of the above for each level.
If a level should not be used, set it to ``slice(None)``.
Returns
-------
numpy.ndarray
NumPy array of integers suitable for passing to iloc.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_locs('b') # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([slice(None), ['e', 'f']]) # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([[True, False, True], slice('e', 'f')]) # doctest: +SKIP
array([2], dtype=int64)
"""
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self.lexsort_depth:
raise UnsortedIndexError(
"MultiIndex slicing requires the index to be lexsorted: slicing "
f"on levels {true_slices}, lexsort depth {self.lexsort_depth}"
)
# indexer
# this is the list of all values that we want to select
n = len(self)
indexer = None
def _convert_to_indexer(r) -> Int64Index:
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif com.is_bool_indexer(r):
if len(r) != n:
raise ValueError(
"cannot index with a boolean indexer "
"that is not the same length as the "
"index"
)
r = r.nonzero()[0]
return Int64Index(r)
def _update_indexer(
idxr: Optional[Index], indexer: Optional[Index], key
) -> Index:
if indexer is None:
indexer = Index(np.arange(n))
if idxr is None:
return indexer
indexer_intersection = indexer.intersection(idxr)
if indexer_intersection.empty and not idxr.empty and not indexer.empty:
raise KeyError(key)
return indexer_intersection
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
indexer = _update_indexer(
_convert_to_indexer(k), indexer=indexer, key=seq
)
elif is_list_like(k):
# a collection of labels to include from this level (these
# are or'd)
indexers: Optional[Int64Index] = None
for x in k:
try:
idxrs = _convert_to_indexer(
self._get_level_indexer(x, level=i, indexer=indexer)
)
indexers = (idxrs if indexers is None else indexers).union(
idxrs, sort=False
)
except KeyError:
# ignore not founds
continue
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer, key=seq)
else:
# no matches we are done
return np.array([], dtype=np.int64)
elif com.is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer, key=seq)
elif isinstance(k, slice):
# a slice, include BOTH of the labels
indexer = _update_indexer(
_convert_to_indexer(
self._get_level_indexer(k, level=i, indexer=indexer)
),
indexer=indexer,
key=seq,
)
else:
# a single label
indexer = _update_indexer(
_convert_to_indexer(
self.get_loc_level(k, level=i, drop_level=False)[0]
),
indexer=indexer,
key=seq,
)
# empty indexer
if indexer is None:
return np.array([], dtype=np.int64)
assert isinstance(indexer, Int64Index), type(indexer)
indexer = self._reorder_indexer(seq, indexer)
return indexer._values
# --------------------------------------------------------------------
def _reorder_indexer(
self,
seq: Tuple[Union[Scalar, Iterable, AnyArrayLike], ...],
indexer: Int64Index,
) -> Int64Index:
"""
Reorder an indexer of a MultiIndex (self) so that the label are in the
same order as given in seq
Parameters
----------
seq : label/slice/list/mask or a sequence of such
indexer: an Int64Index indexer of self
Returns
-------
indexer : a sorted Int64Index indexer of self ordered as seq
"""
# If the index is lexsorted and the list_like label in seq are sorted
# then we do not need to sort
if self.is_lexsorted():
need_sort = False
for i, k in enumerate(seq):
if is_list_like(k):
if not need_sort:
k_codes = self.levels[i].get_indexer(k)
k_codes = k_codes[k_codes >= 0] # Filter absent keys
# True if the given codes are not ordered
need_sort = (k_codes[:-1] > k_codes[1:]).any()
elif isinstance(k, slice) and k.step is not None and k.step < 0:
need_sort = True
# Bail out if both index and seq are sorted
if not need_sort:
return indexer
n = len(self)
keys: Tuple[np.ndarray, ...] = ()
# For each level of the sequence in seq, map the level codes with the
# order they appears in a list-like sequence
# This mapping is then use to reorder the indexer
for i, k in enumerate(seq):
if is_scalar(k):
# GH#34603 we want to treat a scalar the same as an all equal list
k = [k]
if com.is_bool_indexer(k):
new_order = np.arange(n)[indexer]
elif is_list_like(k):
# Generate a map with all level codes as sorted initially
key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(
self.levels[i]
)
# Set order as given in the indexer list
level_indexer = self.levels[i].get_indexer(k)
level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys
key_order_map[level_indexer] = np.arange(len(level_indexer))
new_order = key_order_map[self.codes[i][indexer]]
elif isinstance(k, slice) and k.step is not None and k.step < 0:
new_order = np.arange(n)[k][indexer]
elif isinstance(k, slice) and k.start is None and k.stop is None:
# slice(None) should not determine order GH#31330
new_order = np.ones((n,))[indexer]
else:
# For all other case, use the same order as the level
new_order = np.arange(n)[indexer]
keys = (new_order,) + keys
# Find the reordering using lexsort on the keys mapping
ind = np.lexsort(keys)
return indexer[ind]
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError("after < before")
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_codes = [level_codes[left:right] for level_codes in self.codes]
new_codes[0] = new_codes[0] - i
return MultiIndex(
levels=new_levels,
codes=new_codes,
names=self._names,
verify_integrity=False,
)
def equals(self, other: object) -> bool:
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if len(self) != len(other):
return False
if not isinstance(other, MultiIndex):
# d-level MultiIndex can equal d-tuple Index
if not is_object_dtype(other.dtype):
# other cannot contain tuples, so cannot match self
return False
return array_equivalent(self._values, other._values)
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
self_codes = self_codes[self_codes != -1]
self_values = algos.take_nd(
np.asarray(self.levels[i]._values), self_codes, allow_fill=False
)
other_codes = other.codes[i]
other_codes = other_codes[other_codes != -1]
other_values = algos.take_nd(
np.asarray(other.levels[i]._values), other_codes, allow_fill=False
)
# since we use NaT both datetime64 and timedelta64 we can have a
# situation where a level is typed say timedelta64 in self (IOW it
# has other values than NaT) but types datetime64 in other (where
# its all NaT) but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
if not array_equivalent(self_values, other_values):
return False
return True
def equal_levels(self, other) -> bool:
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
# --------------------------------------------------------------------
# Set Methods
def union(self, other, sort=None):
"""
Form the union of two MultiIndex objects
Parameters
----------
other : MultiIndex or array / Index of tuples
sort : False or None, default None
Whether to sort the resulting Index.
* None : Sort the result, except when
1. `self` and `other` are equal.
2. `self` has length 0.
3. Some values in `self` or `other` cannot be compared.
A RuntimeWarning is issued in this case.
* False : do not sort the result.
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
Index
Examples
--------
>>> idx1 = pd.MultiIndex.from_arrays(
... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]]
... )
>>> idx1
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue')],
)
>>> idx2 = pd.MultiIndex.from_arrays(
... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]]
... )
>>> idx2
MultiIndex([(3, 'Red'),
(3, 'Green'),
(2, 'Red'),
(2, 'Green')],
)
>>> idx1.union(idx2)
MultiIndex([(1, 'Blue'),
(1, 'Red'),
(2, 'Blue'),
(2, 'Green'),
(2, 'Red'),
(3, 'Green'),
(3, 'Red')],
)
>>> idx1.union(idx2, sort=False)
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue'),
(3, 'Red'),
(3, 'Green'),
(2, 'Green')],
)
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self.rename(result_names)
return self._union(other, sort=sort)
def _union(self, other, sort):
other, result_names = self._convert_can_do_setop(other)
# TODO: Index.union returns other when `len(self)` is 0.
if not is_object_dtype(other.dtype):
raise NotImplementedError(
"Can only union MultiIndex with MultiIndex or Index of tuples, "
"try mi.to_flat_index().union(other) instead."
)
uniq_tuples = lib.fast_unique_multiple([self._values, other._values], sort=sort)
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
return is_object_dtype(dtype)
def intersection(self, other, sort=False):
"""
Form the intersection of two MultiIndex objects.
Parameters
----------
other : MultiIndex or array / Index of tuples
sort : False or None, default False
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default from ``True`` to ``False``, to match
behaviour from before 0.24.0
Returns
-------
Index
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if self.equals(other):
if self.has_duplicates:
return self.unique().rename(result_names)
return self.rename(result_names)
return self._intersection(other, sort=sort)
def _intersection(self, other, sort=False):
other, result_names = self._convert_can_do_setop(other)
if not self._is_comparable_dtype(other.dtype):
# The intersection is empty
return self[:0].rename(result_names)
lvals = self._values
rvals = other._values
uniq_tuples = None # flag whether _inner_indexer was successful
if self.is_monotonic and other.is_monotonic:
try:
inner_tuples = self._inner_indexer(lvals, rvals)[0]
sort = False # inner_tuples is already sorted
except TypeError:
pass
else:
uniq_tuples = algos.unique(inner_tuples)
if uniq_tuples is None:
other_uniq = set(rvals)
seen = set()
# pandas\core\indexes\multi.py:3503: error: "add" of "set" does not
# return a value [func-returns-value]
uniq_tuples = [
x
for x in lvals
if x in other_uniq
and not (x in seen or seen.add(x)) # type: ignore[func-returns-value]
]
if sort is None:
uniq_tuples = sorted(uniq_tuples)
if len(uniq_tuples) == 0:
return MultiIndex(
levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def difference(self, other, sort=None):
"""
Compute set difference of two MultiIndex objects
Parameters
----------
other : MultiIndex
sort : False or None, default None
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
diff : MultiIndex
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0:
return self.rename(result_names)
if self.equals(other):
return MultiIndex(
levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)
difference = this._values.take(label_diff)
if sort is None:
difference = sorted(difference)
if len(difference) == 0:
return MultiIndex(
levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
def _convert_can_do_setop(self, other):
result_names = self.names
if not isinstance(other, Index):
if len(other) == 0:
return self[:0], self.names
else:
msg = "other must be a MultiIndex or a list of tuples"
try:
other = MultiIndex.from_tuples(other, names=self.names)
except (ValueError, TypeError) as err:
# ValueError raised by tuples_to_object_array if we
# have non-object dtype
raise TypeError(msg) from err
else:
result_names = get_unanimous_names(self, other)
return other, result_names
def symmetric_difference(self, other, result_name=None, sort=None):
# On equal symmetric_difference MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
tups = Index.symmetric_difference(self, other, result_name, sort)
if len(tups) == 0:
return type(self)(
levels=[[] for _ in range(self.nlevels)],
codes=[[] for _ in range(self.nlevels)],
names=tups.name,
)
return type(self).from_tuples(tups, names=tups.name)
# --------------------------------------------------------------------
@doc(Index.astype)
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_categorical_dtype(dtype):
msg = "> 1 ndim Categorical are not supported at this time"
raise NotImplementedError(msg)
elif not is_object_dtype(dtype):
raise TypeError(
"Setting a MultiIndex dtype to anything other than object "
"is not supported"
)
elif copy is True:
return self._shallow_copy()
return self
def _validate_fill_value(self, item):
if not isinstance(item, tuple):
# Pad the key with empty strings if lower levels of the key
# aren't specified:
item = (item,) + ("",) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError("Item must have length equal to number of levels.")
return item
def insert(self, loc: int, item):
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
item = self._validate_fill_value(item)
new_levels = []
new_codes = []
for k, level, level_codes in zip(item, self.levels, self.codes):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other codes
lev_loc = len(level)
try:
level = level.insert(lev_loc, k)
except TypeError:
# TODO: Should this be done inside insert?
# TODO: smarter casting rules?
level = level.astype(object).insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))
return MultiIndex(
levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False
)
def delete(self, loc):
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
verify_integrity=False,
)
@doc(Index.isin)
def isin(self, values, level=None):
if level is None:
values = MultiIndex.from_tuples(values, names=self.names)._values
return algos.isin(self._values, values)
else:
num = self._get_level_number(level)
levs = self.get_level_values(num)
if levs.size == 0:
return np.zeros(len(levs), dtype=np.bool_)
return levs.isin(values)
# ---------------------------------------------------------------
# Arithmetic/Numeric Methods - Disabled
__add__ = make_invalid_op("__add__")
__radd__ = make_invalid_op("__radd__")
__iadd__ = make_invalid_op("__iadd__")
__sub__ = make_invalid_op("__sub__")
__rsub__ = make_invalid_op("__rsub__")
__isub__ = make_invalid_op("__isub__")
__pow__ = make_invalid_op("__pow__")
__rpow__ = make_invalid_op("__rpow__")
__mul__ = make_invalid_op("__mul__")
__rmul__ = make_invalid_op("__rmul__")
__floordiv__ = make_invalid_op("__floordiv__")
__rfloordiv__ = make_invalid_op("__rfloordiv__")
__truediv__ = make_invalid_op("__truediv__")
__rtruediv__ = make_invalid_op("__rtruediv__")
__mod__ = make_invalid_op("__mod__")
__rmod__ = make_invalid_op("__rmod__")
__divmod__ = make_invalid_op("__divmod__")
__rdivmod__ = make_invalid_op("__rdivmod__")
# Unary methods disabled
__neg__ = make_invalid_op("__neg__")
__pos__ = make_invalid_op("__pos__")
__abs__ = make_invalid_op("__abs__")
__inv__ = make_invalid_op("__inv__")
def sparsify_labels(label_list, start: int = 0, sentinel=""):
pivoted = list(zip(*label_list))
k = len(label_list)
result = pivoted[: start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1 :]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return list(zip(*result))
def _get_na_rep(dtype) -> str:
return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype, "NaN")
def maybe_droplevels(index, key):
"""
Attempt to drop level or levels from the given index.
Parameters
----------
index: Index
key : scalar or tuple
Returns
-------
Index
"""
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index._drop_level_numbers([0])
except ValueError:
# we have dropped too much, so back out
return original_index
else:
try:
index = index._drop_level_numbers([0])
except ValueError:
pass
return index
def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:
"""
Coerce the array_like indexer to the smallest integer dtype that can encode all
of the given categories.
Parameters
----------
array_like : array-like
categories : array-like
copy : bool
Returns
-------
np.ndarray
Non-writeable.
"""
array_like = coerce_indexer_dtype(array_like, categories)
if copy:
array_like = array_like.copy()
array_like.flags.writeable = False
return array_like
| gpl-2.0 |
ininex/geofire-python | resource/lib/python2.7/site-packages/gcloud/monitoring/_dataframe.py | 7 | 4353 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Time series as :mod:`pandas` dataframes."""
import itertools
TOP_RESOURCE_LABELS = (
'project_id',
'aws_account',
'location',
'region',
'zone',
)
def _build_dataframe(time_series_iterable,
label=None, labels=None): # pragma: NO COVER
"""Build a :mod:`pandas` dataframe out of time series.
:type time_series_iterable:
iterable over :class:`~gcloud.monitoring.timeseries.TimeSeries`
:param time_series_iterable:
An iterable (e.g., a query object) yielding time series.
:type label: string or None
:param label:
The label name to use for the dataframe header. This can be the name
of a resource label or metric label (e.g., ``"instance_name"``), or
the string ``"resource_type"``.
:type labels: list of strings, or None
:param labels:
A list or tuple of label names to use for the dataframe header.
If more than one label name is provided, the resulting dataframe
will have a multi-level column header.
Specifying neither ``label`` or ``labels`` results in a dataframe
with a multi-level column header including the resource type and
all available resource and metric labels.
Specifying both ``label`` and ``labels`` is an error.
:rtype: :class:`pandas.DataFrame`
:returns: A dataframe where each column represents one time series.
"""
import pandas # pylint: disable=import-error
if labels is not None:
if label is not None:
raise ValueError('Cannot specify both "label" and "labels".')
elif not labels:
raise ValueError('"labels" must be non-empty or None.')
columns = []
headers = []
for time_series in time_series_iterable:
pandas_series = pandas.Series(
data=[point.value for point in time_series.points],
index=[point.end_time for point in time_series.points],
)
columns.append(pandas_series)
headers.append(time_series.header())
# Implement a smart default of using all available labels.
if label is None and labels is None:
resource_labels = set(itertools.chain.from_iterable(
header.resource.labels for header in headers))
metric_labels = set(itertools.chain.from_iterable(
header.metric.labels for header in headers))
labels = (['resource_type'] +
_sorted_resource_labels(resource_labels) +
sorted(metric_labels))
# Assemble the columns into a DataFrame.
dataframe = pandas.DataFrame.from_records(columns).T
# Convert the timestamp strings into a DatetimeIndex.
dataframe.index = pandas.to_datetime(dataframe.index)
# Build a multi-level stack of column headers. Some labels may
# be undefined for some time series.
levels = []
for key in labels or [label]:
level = [header.labels.get(key, '') for header in headers]
levels.append(level)
# Build a column Index or MultiIndex. Do not include level names
# in the column header if the user requested a single-level header
# by specifying "label".
dataframe.columns = pandas.MultiIndex.from_arrays(
levels,
names=labels or None)
# Sort the rows just in case (since the API doesn't guarantee the
# ordering), and sort the columns lexicographically.
return dataframe.sort_index(axis=0).sort_index(axis=1)
def _sorted_resource_labels(labels):
"""Sort label names, putting well-known resource labels first."""
head = [label for label in TOP_RESOURCE_LABELS if label in labels]
tail = sorted(label for label in labels
if label not in TOP_RESOURCE_LABELS)
return head + tail
| mit |
sragain/pcmc-nips | infer.py | 1 | 3056 | import numpy as np
import matplotlib.pyplot as plt
from random import random
import lib.mnl_utils,lib.mmnl_utils,lib.pcmc_utils
import pickle
import os,sys
def split_samples(samples,nep,split=.25,alpha=.1):
"""splits a list of samples into nep dictionaries containing their summary
statistics
Arguments:
samples- list of (Set, choice) tuples
nep- number of ways to split input data
split- proprotion of data assigned as test data
alpha- amount of additive smoothing applied to data
"""
Ctest = {}
splitidx = int((1-split)*len(samples))
testsamples = samples[splitidx:]
for (S,choice) in testsamples:
if S not in Ctest:
Ctest[S]=np.zeros(len(S))
Ctest[S][choice]+=1
trainsamples = samples[:splitidx]
trainlist = [{} for i in range(nep)]
a = len(trainsamples)/nep
for i in range(nep):
for (S,choice) in trainsamples[i*a:(i+1)*a]:
if S not in trainlist[i]:
trainlist[i][S]=np.ones(len(S))*alpha
trainlist[i][S][choice]+=1
return trainlist,Ctest
def run_sims(samples,n=6,nsim=10,nep=5,maxiter=25,split=.25,alpha=.1):
"""
computes learning error on input models for input data for nsim simluations
consisting of traning and computing test error on nep splits of the data
Arguments:
samples- list of samples
n- number of choices in union of choice sets
nsim- number of simulations to run
nep- number of episodes per simulation
maxiter- iterations allowed to scipy.minimize when performing MLE
split- proportion of samples used for testing
alpha- amount of additive smoothing applied
"""
mnl_errors=np.empty((nsim,nep))
mmnl_errors=np.empty((nsim,nep))
pcmc_errors=np.empty((nsim,nep))
for sim in range(nsim):
print 'sim number %d' %(sim+1)
np.random.shuffle(samples)
#throw away any inferred parameters
mnl_params = None;pcmc_params = None;mmnl_params = None
#split data
trainlist,Ctest = split_samples(samples,nep,split=split,alpha=alpha)
Ctrain={}
for ep in range(nep):
#add new training data
for S in trainlist[ep]:
if S not in Ctrain:
Ctrain[S]=trainlist[ep][S]
else:
Ctrain[S]+=trainlist[ep][S]
#infer parameters
mnl_params = lib.mnl_utils.ILSR(C=Ctrain,n=n)
mmnl_params = lib.mmnl_utils.infer(C=Ctrain,n=n,x=mmnl_params,maxiter=maxiter)
pcmc_params = lib.pcmc_utils.infer(C=Ctrain,x=pcmc_params,n=n,maxiter=maxiter,delta=1)
#track errors
mnl_errors[sim,ep]=lib.mnl_utils.comp_error(x=mnl_params,C=Ctest)
mmnl_errors[sim,ep]=lib.mmnl_utils.comp_error(x=mmnl_params,C=Ctest,n=n)
pcmc_errors[sim,ep]=lib.pcmc_utils.comp_error(x=pcmc_params,C=Ctest)
np.save('mnl_errors.npy',mnl_errors)
np.save('mmnl_errors.npy',mmnl_errors)
np.save('pcmc_errors.npy',pcmc_errors)
np.save('pcmc_params.npy',lib.pcmc_utils.comp_Q(pcmc_params))
if __name__=='__main__':
nsim=100;nep=15;n=6;alpha=.1;samples=pickle.load(open('worklist.p','rb'));split=.25
#nsim=100;nep=15;n=8;alpha=5;samples=pickle.load(open('shoplist.p','rb'));split=.25
run_sims(samples=samples,n=n,nsim=nsim,nep=nep,split=split,alpha=alpha)
| mit |
rbalda/neural_ocr | env/lib/python2.7/site-packages/mpl_toolkits/axes_grid1/axes_rgb.py | 3 | 7028 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from .axes_divider import make_axes_locatable, Size, locatable_axes_factory
import sys
from .mpl_axes import Axes
def make_rgb_axes(ax, pad=0.01, axes_class=None, add_all=True):
"""
pad : fraction of the axes height.
"""
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
if axes_class is None:
try:
axes_class = locatable_axes_factory(ax._axes_class)
except AttributeError:
axes_class = locatable_axes_factory(type(ax))
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
for t in ax1.yaxis.get_ticklabels() + ax1.xaxis.get_ticklabels():
t.set_visible(False)
try:
for axis in ax1.axis.values():
axis.major_ticklabels.set_visible(False)
except AttributeError:
pass
ax_rgb.append(ax1)
if add_all:
fig = ax.get_figure()
for ax1 in ax_rgb:
fig.add_axes(ax1)
return ax_rgb
def imshow_rgb(ax, r, g, b, **kwargs):
ny, nx = r.shape
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = ax.imshow(RGB, **kwargs)
return im_rgb
class RGBAxesBase(object):
"""base class for a 4-panel imshow (RGB, R, G, B)
Layout:
+---------------+-----+
| | R |
+ +-----+
| RGB | G |
+ +-----+
| | B |
+---------------+-----+
Attributes
----------
_defaultAxesClass : matplotlib.axes.Axes
defaults to 'Axes' in RGBAxes child class.
No default in abstract base class
RGB : _defaultAxesClass
The axes object for the three-channel imshow
R : _defaultAxesClass
The axes object for the red channel imshow
G : _defaultAxesClass
The axes object for the green channel imshow
B : _defaultAxesClass
The axes object for the blue channel imshow
"""
def __init__(self, *kl, **kwargs):
"""
Parameters
----------
pad : float
fraction of the axes height to put as padding.
defaults to 0.0
add_all : bool
True: Add the {rgb, r, g, b} axes to the figure
defaults to True.
axes_class : matplotlib.axes.Axes
kl :
Unpacked into axes_class() init for RGB
kwargs :
Unpacked into axes_class() init for RGB, R, G, B axes
"""
pad = kwargs.pop("pad", 0.0)
add_all = kwargs.pop("add_all", True)
try:
axes_class = kwargs.pop("axes_class", self._defaultAxesClass)
except AttributeError:
new_msg = ("A subclass of RGBAxesBase must have a "
"_defaultAxesClass attribute. If you are not sure which "
"axes class to use, consider using "
"mpl_toolkits.axes_grid1.mpl_axes.Axes.")
six.reraise(AttributeError, AttributeError(new_msg),
sys.exc_info()[2])
ax = axes_class(*kl, **kwargs)
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax, **kwargs)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
ax1.axis[:].toggle(ticklabels=False)
ax_rgb.append(ax1)
self.RGB = ax
self.R, self.G, self.B = ax_rgb
if add_all:
fig = ax.get_figure()
fig.add_axes(ax)
self.add_RGB_to_figure()
self._config_axes()
def _config_axes(self, line_color='w', marker_edge_color='w'):
"""Set the line color and ticks for the axes
Parameters
----------
line_color : any matplotlib color
marker_edge_color : any matplotlib color
"""
for ax1 in [self.RGB, self.R, self.G, self.B]:
ax1.axis[:].line.set_color(line_color)
ax1.axis[:].major_ticks.set_markeredgecolor(marker_edge_color)
def add_RGB_to_figure(self):
"""Add the red, green and blue axes to the RGB composite's axes figure
"""
self.RGB.get_figure().add_axes(self.R)
self.RGB.get_figure().add_axes(self.G)
self.RGB.get_figure().add_axes(self.B)
def imshow_rgb(self, r, g, b, **kwargs):
"""Create the four images {rgb, r, g, b}
Parameters
----------
r : array-like
The red array
g : array-like
The green array
b : array-like
The blue array
kwargs : imshow kwargs
kwargs get unpacked into the imshow calls for the four images
Returns
-------
rgb : matplotlib.image.AxesImage
r : matplotlib.image.AxesImage
g : matplotlib.image.AxesImage
b : matplotlib.image.AxesImage
"""
ny, nx = r.shape
if not ((nx, ny) == g.shape == b.shape):
raise ValueError('Input shapes do not match.'
'\nr.shape = {}'
'\ng.shape = {}'
'\nb.shape = {}'
''.format(r.shape, g.shape, b.shape))
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = self.RGB.imshow(RGB, **kwargs)
im_r = self.R.imshow(R, **kwargs)
im_g = self.G.imshow(G, **kwargs)
im_b = self.B.imshow(B, **kwargs)
return im_rgb, im_r, im_g, im_b
class RGBAxes(RGBAxesBase):
_defaultAxesClass = Axes
| mit |
etkirsch/scikit-learn | examples/manifold/plot_manifold_sphere.py | 258 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
JanNash/sms-tools | lectures/03-Fourier-properties/plots-code/fft-zero-phase.py | 24 | 1140 | import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft, fftshift
import sys
sys.path.append('../../../software/models/')
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512
M = 401
hN = N/2
hM = (M+1)/2
start = .8*fs
xw = x[start-hM:start+hM-1] * np.hamming(M)
plt.figure(1, figsize=(9.5, 6.5))
plt.subplot(411)
plt.plot(np.arange(-hM, hM-1), xw, lw=1.5)
plt.axis([-hN, hN-1, min(xw), max(xw)])
plt.title('x (oboe-A4.wav), M = 401')
fftbuffer = np.zeros(N)
fftbuffer[:hM] = xw[hM-1:]
fftbuffer[N-hM+1:] = xw[:hM-1]
plt.subplot(412)
plt.plot(np.arange(0, N), fftbuffer, lw=1.5)
plt.axis([0, N, min(xw), max(xw)])
plt.title('fftbuffer: N = 512')
X = fftshift(fft(fftbuffer))
mX = 20 * np.log10(abs(X)/N)
pX = np.unwrap(np.angle(X))
plt.subplot(413)
plt.plot(np.arange(-hN, hN), mX, 'r', lw=1.5)
plt.axis([-hN,hN-1,-100,max(mX)])
plt.title('mX')
plt.subplot(414)
plt.plot(np.arange(-hN, hN), pX, 'c', lw=1.5)
plt.axis([-hN,hN-1,min(pX),max(pX)])
plt.title('pX')
plt.tight_layout()
plt.savefig('fft-zero-phase.png')
plt.show()
| agpl-3.0 |
devanshdalal/scikit-learn | examples/tree/plot_tree_regression.py | 95 | 1516 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
evidation-health/pymc3 | pymc3/examples/ARM5_4.py | 14 | 1026 | '''
Created on May 18, 2012
@author: jsalvatier
'''
import numpy as np
from pymc3 import *
import theano.tensor as t
import pandas as pd
wells = get_data_file('pymc3.examples', 'data/wells.dat')
data = pd.read_csv(wells, delimiter=u' ', index_col=u'id',
dtype={u'switch': np.int8})
data.dist /= 100
data.educ /= 4
col = data.columns
P = data[col[1:]]
P = P - P.mean()
P['1'] = 1
Pa = np.array(P)
with Model() as model:
effects = Normal(
'effects', mu=0, tau=100. ** -2, shape=len(P.columns))
p = sigmoid(dot(Pa, effects))
s = Bernoulli('s', p, observed=np.array(data.switch))
def run(n=3000):
if n == "short":
n = 50
with model:
# move the chain to the MAP which should be a good starting point
start = find_MAP()
H = model.fastd2logp() # find a good orientation using the hessian at the MAP
h = H(start)
step = HamiltonianMC(model.vars, h)
trace = sample(n, step, start)
if __name__ == '__main__':
run()
| apache-2.0 |
florian-f/sklearn | sklearn/utils/tests/test_random.py | 20 | 3872 | from __future__ import division
import numpy as np
from scipy.misc import comb as combinations
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
| bsd-3-clause |
pkruskal/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 250 | 2233 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
huzq/scikit-learn | sklearn/mixture/_gaussian_mixture.py | 2 | 28253 | """Gaussian Mixture Model."""
# Author: Wei Xue <[email protected]>
# Modified by Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ._base import BaseMixture, _check_shape
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.validation import _deprecate_positional_args
###############################################################################
# Gaussian mixture shape checkers used by the GaussianMixture class
def _check_weights(weights, n_components):
"""Check the user provided 'weights'.
Parameters
----------
weights : array-like of shape (n_components,)
The proportions of components of each mixture.
n_components : int
Number of components.
Returns
-------
weights : array, shape (n_components,)
"""
weights = check_array(weights, dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(weights, (n_components,), 'weights')
# check range
if (any(np.less(weights, 0.)) or
any(np.greater(weights, 1.))):
raise ValueError("The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f"
% (np.min(weights), np.max(weights)))
# check normalization
if not np.allclose(np.abs(1. - np.sum(weights)), 0.):
raise ValueError("The parameter 'weights' should be normalized, "
"but got sum(weights) = %.5f" % np.sum(weights))
return weights
def _check_means(means, n_components, n_features):
"""Validate the provided 'means'.
Parameters
----------
means : array-like of shape (n_components, n_features)
The centers of the current components.
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
means : array, (n_components, n_features)
"""
means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(means, (n_components, n_features), 'means')
return means
def _check_precision_positivity(precision, covariance_type):
"""Check a precision vector is positive-definite."""
if np.any(np.less_equal(precision, 0.0)):
raise ValueError("'%s precision' should be "
"positive" % covariance_type)
def _check_precision_matrix(precision, covariance_type):
"""Check a precision matrix is symmetric and positive-definite."""
if not (np.allclose(precision, precision.T) and
np.all(linalg.eigvalsh(precision) > 0.)):
raise ValueError("'%s precision' should be symmetric, "
"positive-definite" % covariance_type)
def _check_precisions_full(precisions, covariance_type):
"""Check the precision matrices are symmetric and positive-definite."""
for prec in precisions:
_check_precision_matrix(prec, covariance_type)
def _check_precisions(precisions, covariance_type, n_components, n_features):
"""Validate user provided precisions.
Parameters
----------
precisions : array-like
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : string
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
precisions : array
"""
precisions = check_array(precisions, dtype=[np.float64, np.float32],
ensure_2d=False,
allow_nd=covariance_type == 'full')
precisions_shape = {'full': (n_components, n_features, n_features),
'tied': (n_features, n_features),
'diag': (n_components, n_features),
'spherical': (n_components,)}
_check_shape(precisions, precisions_shape[covariance_type],
'%s precision' % covariance_type)
_check_precisions = {'full': _check_precisions_full,
'tied': _check_precision_matrix,
'diag': _check_precision_positivity,
'spherical': _check_precision_positivity}
_check_precisions[covariance_type](precisions, covariance_type)
return precisions
###############################################################################
# Gaussian mixture parameters estimators (used by the M-Step)
def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar):
"""Estimate the full covariance matrices.
Parameters
----------
resp : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features, n_features)
The covariance matrix of the current components.
"""
n_components, n_features = means.shape
covariances = np.empty((n_components, n_features, n_features))
for k in range(n_components):
diff = X - means[k]
covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k]
covariances[k].flat[::n_features + 1] += reg_covar
return covariances
def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar):
"""Estimate the tied covariance matrix.
Parameters
----------
resp : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
covariance : array, shape (n_features, n_features)
The tied covariance matrix of the components.
"""
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(nk * means.T, means)
covariance = avg_X2 - avg_means2
covariance /= nk.sum()
covariance.flat[::len(covariance) + 1] += reg_covar
return covariance
def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar):
"""Estimate the diagonal covariance vectors.
Parameters
----------
responsibilities : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features)
The covariance vector of the current components.
"""
avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis]
avg_means2 = means ** 2
avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis]
return avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar
def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar):
"""Estimate the spherical variance values.
Parameters
----------
responsibilities : array-like of shape (n_samples, n_components)
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
means : array-like of shape (n_components, n_features)
reg_covar : float
Returns
-------
variances : array, shape (n_components,)
The variance values of each components.
"""
return _estimate_gaussian_covariances_diag(resp, X, nk,
means, reg_covar).mean(1)
def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data array.
resp : array-like of shape (n_samples, n_components)
The responsibilities for each data sample in X.
reg_covar : float
The regularization added to the diagonal of the covariance matrices.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
nk : array-like of shape (n_components,)
The numbers of data samples in the current components.
means : array-like of shape (n_components, n_features)
The centers of the current components.
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
"""
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
means = np.dot(resp.T, X) / nk[:, np.newaxis]
covariances = {"full": _estimate_gaussian_covariances_full,
"tied": _estimate_gaussian_covariances_tied,
"diag": _estimate_gaussian_covariances_diag,
"spherical": _estimate_gaussian_covariances_spherical
}[covariance_type](resp, X, nk, means, reg_covar)
return nk, means, covariances
def _compute_precision_cholesky(covariances, covariance_type):
"""Compute the Cholesky decomposition of the precisions.
Parameters
----------
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
precisions_cholesky : array-like
The cholesky decomposition of sample precisions of the current
components. The shape depends of the covariance_type.
"""
estimate_precision_error_message = (
"Fitting the mixture model failed because some components have "
"ill-defined empirical covariance (for instance caused by singleton "
"or collapsed samples). Try to decrease the number of components, "
"or increase reg_covar.")
if covariance_type == 'full':
n_components, n_features, _ = covariances.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for k, covariance in enumerate(covariances):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(cov_chol,
np.eye(n_features),
lower=True).T
elif covariance_type == 'tied':
_, n_features = covariances.shape
try:
cov_chol = linalg.cholesky(covariances, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol = linalg.solve_triangular(cov_chol, np.eye(n_features),
lower=True).T
else:
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(estimate_precision_error_message)
precisions_chol = 1. / np.sqrt(covariances)
return precisions_chol
###############################################################################
# Gaussian mixture probability estimators
def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
"""Compute the log-det of the cholesky decomposition of matrices.
Parameters
----------
matrix_chol : array-like
Cholesky decompositions of the matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
n_features : int
Number of features.
Returns
-------
log_det_precision_chol : array-like of shape (n_components,)
The determinant of the precision matrix for each component.
"""
if covariance_type == 'full':
n_components, _, _ = matrix_chol.shape
log_det_chol = (np.sum(np.log(
matrix_chol.reshape(
n_components, -1)[:, ::n_features + 1]), 1))
elif covariance_type == 'tied':
log_det_chol = (np.sum(np.log(np.diag(matrix_chol))))
elif covariance_type == 'diag':
log_det_chol = (np.sum(np.log(matrix_chol), axis=1))
else:
log_det_chol = n_features * (np.log(matrix_chol))
return log_det_chol
def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type):
"""Estimate the log Gaussian probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
means : array-like of shape (n_components, n_features)
precisions_chol : array-like
Cholesky decompositions of the precision matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
Returns
-------
log_prob : array, shape (n_samples, n_components)
"""
n_samples, n_features = X.shape
n_components, _ = means.shape
# det(precision_chol) is half of det(precision)
log_det = _compute_log_det_cholesky(
precisions_chol, covariance_type, n_features)
if covariance_type == 'full':
log_prob = np.empty((n_samples, n_components))
for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)):
y = np.dot(X, prec_chol) - np.dot(mu, prec_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'tied':
log_prob = np.empty((n_samples, n_components))
for k, mu in enumerate(means):
y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'diag':
precisions = precisions_chol ** 2
log_prob = (np.sum((means ** 2 * precisions), 1) -
2. * np.dot(X, (means * precisions).T) +
np.dot(X ** 2, precisions.T))
elif covariance_type == 'spherical':
precisions = precisions_chol ** 2
log_prob = (np.sum(means ** 2, 1) * precisions -
2 * np.dot(X, means.T * precisions) +
np.outer(row_norms(X, squared=True), precisions))
return -.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det
class GaussianMixture(BaseMixture):
"""Gaussian Mixture.
Representation of a Gaussian mixture model probability distribution.
This class allows to estimate the parameters of a Gaussian mixture
distribution.
Read more in the :ref:`User Guide <gmm>`.
.. versionadded:: 0.18
Parameters
----------
n_components : int, default=1
The number of mixture components.
covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full'
String describing the type of covariance parameters to use.
Must be one of:
'full'
each component has its own general covariance matrix
'tied'
all components share the same general covariance matrix
'diag'
each component has its own diagonal covariance matrix
'spherical'
each component has its own single variance
tol : float, default=1e-3
The convergence threshold. EM iterations will stop when the
lower bound average gain is below this threshold.
reg_covar : float, default=1e-6
Non-negative regularization added to the diagonal of covariance.
Allows to assure that the covariance matrices are all positive.
max_iter : int, default=100
The number of EM iterations to perform.
n_init : int, default=1
The number of initializations to perform. The best results are kept.
init_params : {'kmeans', 'random'}, default='kmeans'
The method used to initialize the weights, the means and the
precisions.
Must be one of::
'kmeans' : responsibilities are initialized using kmeans.
'random' : responsibilities are initialized randomly.
weights_init : array-like of shape (n_components, ), default=None
The user-provided initial weights.
If it None, weights are initialized using the `init_params` method.
means_init : array-like of shape (n_components, n_features), default=None
The user-provided initial means,
If it None, means are initialized using the `init_params` method.
precisions_init : array-like, default=None
The user-provided initial precisions (inverse of the covariance
matrices).
If it None, precisions are initialized using the 'init_params' method.
The shape depends on 'covariance_type'::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
random_state : int, RandomState instance or None, default=None
Controls the random seed given to the method chosen to initialize the
parameters (see `init_params`).
In addition, it controls the generation of random samples from the
fitted distribution (see the method `sample`).
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
If 'warm_start' is True, the solution of the last fitting is used as
initialization for the next call of fit(). This can speed up
convergence when fit is called several times on similar problems.
In that case, 'n_init' is ignored and only a single initialization
occurs upon the first call.
See :term:`the Glossary <warm_start>`.
verbose : int, default=0
Enable verbose output. If 1 then it prints the current
initialization and each iteration step. If greater than 1 then
it prints also the log probability and the time needed
for each step.
verbose_interval : int, default=10
Number of iteration done before the next print.
Attributes
----------
weights_ : array-like of shape (n_components,)
The weights of each mixture components.
means_ : array-like of shape (n_components, n_features)
The mean of each mixture component.
covariances_ : array-like
The covariance of each mixture component.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_ : array-like
The precision matrices for each component in the mixture. A precision
matrix is the inverse of a covariance matrix. A covariance matrix is
symmetric positive definite so the mixture of Gaussian can be
equivalently parameterized by the precision matrices. Storing the
precision matrices instead of the covariance matrices makes it more
efficient to compute the log-likelihood of new samples at test time.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_cholesky_ : array-like
The cholesky decomposition of the precision matrices of each mixture
component. A precision matrix is the inverse of a covariance matrix.
A covariance matrix is symmetric positive definite so the mixture of
Gaussian can be equivalently parameterized by the precision matrices.
Storing the precision matrices instead of the covariance matrices makes
it more efficient to compute the log-likelihood of new samples at test
time. The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
n_iter_ : int
Number of step used by the best fit of EM to reach the convergence.
lower_bound_ : float
Lower bound value on the log-likelihood (of the training data with
respect to the model) of the best fit of EM.
Examples
--------
>>> import numpy as np
>>> from sklearn.mixture import GaussianMixture
>>> X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]])
>>> gm = GaussianMixture(n_components=2, random_state=0).fit(X)
>>> gm.means_
array([[10., 2.],
[ 1., 2.]])
>>> gm.predict([[0, 0], [12, 3]])
array([1, 0])
See Also
--------
BayesianGaussianMixture : Gaussian mixture model fit with a variational
inference.
"""
@_deprecate_positional_args
def __init__(self, n_components=1, *, covariance_type='full', tol=1e-3,
reg_covar=1e-6, max_iter=100, n_init=1, init_params='kmeans',
weights_init=None, means_init=None, precisions_init=None,
random_state=None, warm_start=False,
verbose=0, verbose_interval=10):
super().__init__(
n_components=n_components, tol=tol, reg_covar=reg_covar,
max_iter=max_iter, n_init=n_init, init_params=init_params,
random_state=random_state, warm_start=warm_start,
verbose=verbose, verbose_interval=verbose_interval)
self.covariance_type = covariance_type
self.weights_init = weights_init
self.means_init = means_init
self.precisions_init = precisions_init
def _check_parameters(self, X):
"""Check the Gaussian mixture parameters are well defined."""
_, n_features = X.shape
if self.covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError("Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% self.covariance_type)
if self.weights_init is not None:
self.weights_init = _check_weights(self.weights_init,
self.n_components)
if self.means_init is not None:
self.means_init = _check_means(self.means_init,
self.n_components, n_features)
if self.precisions_init is not None:
self.precisions_init = _check_precisions(self.precisions_init,
self.covariance_type,
self.n_components,
n_features)
def _initialize(self, X, resp):
"""Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
n_samples, _ = X.shape
weights, means, covariances = _estimate_gaussian_parameters(
X, resp, self.reg_covar, self.covariance_type)
weights /= n_samples
self.weights_ = (weights if self.weights_init is None
else self.weights_init)
self.means_ = means if self.means_init is None else self.means_init
if self.precisions_init is None:
self.covariances_ = covariances
self.precisions_cholesky_ = _compute_precision_cholesky(
covariances, self.covariance_type)
elif self.covariance_type == 'full':
self.precisions_cholesky_ = np.array(
[linalg.cholesky(prec_init, lower=True)
for prec_init in self.precisions_init])
elif self.covariance_type == 'tied':
self.precisions_cholesky_ = linalg.cholesky(self.precisions_init,
lower=True)
else:
self.precisions_cholesky_ = self.precisions_init
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
n_samples, _ = X.shape
self.weights_, self.means_, self.covariances_ = (
_estimate_gaussian_parameters(X, np.exp(log_resp), self.reg_covar,
self.covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
def _estimate_log_prob(self, X):
return _estimate_log_gaussian_prob(
X, self.means_, self.precisions_cholesky_, self.covariance_type)
def _estimate_log_weights(self):
return np.log(self.weights_)
def _compute_lower_bound(self, _, log_prob_norm):
return log_prob_norm
def _get_parameters(self):
return (self.weights_, self.means_, self.covariances_,
self.precisions_cholesky_)
def _set_parameters(self, params):
(self.weights_, self.means_, self.covariances_,
self.precisions_cholesky_) = params
# Attributes computation
_, n_features = self.means_.shape
if self.covariance_type == 'full':
self.precisions_ = np.empty(self.precisions_cholesky_.shape)
for k, prec_chol in enumerate(self.precisions_cholesky_):
self.precisions_[k] = np.dot(prec_chol, prec_chol.T)
elif self.covariance_type == 'tied':
self.precisions_ = np.dot(self.precisions_cholesky_,
self.precisions_cholesky_.T)
else:
self.precisions_ = self.precisions_cholesky_ ** 2
def _n_parameters(self):
"""Return the number of free parameters in the model."""
_, n_features = self.means_.shape
if self.covariance_type == 'full':
cov_params = self.n_components * n_features * (n_features + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * n_features
elif self.covariance_type == 'tied':
cov_params = n_features * (n_features + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = n_features * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model on the input X.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
Returns
-------
bic : float
The lower the better.
"""
return (-2 * self.score(X) * X.shape[0] +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model on the input X.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
Returns
-------
aic : float
The lower the better.
"""
return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters()
| bsd-3-clause |
TheHonestGene/imputor | setup.py | 1 | 1812 | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='Imputor',
version='0.0.1',
description='A library for imputation',
long_description=long_description,
url='https://github.com/TheHonestGene/imputor',
author=['Bjarni Vilhjalmsson','Uemit Seren'],
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='Risk Prediction',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=[
"numpy",
"scipy",
"h5py",
"snpy==0.1",
"matplotlib >= 1.4.3"
],
dependency_links=['https://github.com/timeu/snpy/tarball/master#egg=snpy-0.1'],
setup_requires=[
"pytest-runner"
],
tests_require=[
"pytest",
"pytest-cov",
"pytest-pep8",
"coverage"
],
entry_points={
'console_scripts': [
'imputor=imputor:main'
],
},
)
| mit |
zygmuntz/numer.ai | march/validate_lr.py | 1 | 2651 | #!/usr/bin/env python
"Load data, create the validation split, optionally scale data, train a linear model, evaluate"
"Code updated for march 2016 data"
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Normalizer, PolynomialFeatures
from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, StandardScaler, RobustScaler
from sklearn.linear_model import LogisticRegression as LR
from sklearn.metrics import roc_auc_score as AUC, accuracy_score as accuracy, log_loss
#
def train_and_evaluate( y_train, x_train, y_val, x_val ):
lr = LR()
lr.fit( x_train, y_train )
p = lr.predict_proba( x_val )
auc = AUC( y_val, p[:,1] )
ll = log_loss( y_val, p[:,1] )
return ( auc, ll )
def transform_train_and_evaluate( transformer ):
global x_train, x_val, y_train
x_train_new = transformer.fit_transform( x_train )
x_val_new = transformer.transform( x_val )
return train_and_evaluate( y_train, x_train_new, y_val, x_val_new )
#
input_file = 'data/numerai_training_data.csv'
d = pd.read_csv( input_file )
train, val = train_test_split( d, test_size = 5000 )
y_train = train.target.values
y_val = val.target.values
x_train = train.drop( 'target', axis = 1 )
x_val = val.drop( 'target', axis = 1 )
# train, predict, evaluate
auc, ll = train_and_evaluate( y_train, x_train, y_val, x_val )
print "No transformation"
print "AUC: {:.2%}, log loss: {:.2%} \n".format( auc, ll )
# try different transformations for X
# X is already scaled to (0,1) so these won't make much difference
transformers = [ MaxAbsScaler(), MinMaxScaler(), RobustScaler(), StandardScaler(),
Normalizer( norm = 'l1' ), Normalizer( norm = 'l2' ), Normalizer( norm = 'max' ) ]
#poly_scaled = Pipeline([ ( 'poly', PolynomialFeatures()), ( 'scaler', MinMaxScaler()) ])
#transformers.append( PolynomialFeatures(), poly_scaled )
for transformer in transformers:
print transformer
auc, ll = transform_train_and_evaluate( transformer )
print "AUC: {:.2%}, log loss: {:.2%} \n".format( auc, ll )
"""
No transformation
AUC: 52.35%, log loss: 69.20%
MaxAbsScaler(copy=True)
AUC: 52.35%, log loss: 69.20%
MinMaxScaler(copy=True, feature_range=(0, 1))
AUC: 52.35%, log loss: 69.20%
RobustScaler(copy=True, with_centering=True, with_scaling=True)
AUC: 52.35%, log loss: 69.20%
StandardScaler(copy=True, with_mean=True, with_std=True)
AUC: 52.35%, log loss: 69.20%
Normalizer(copy=True, norm='l1')
AUC: 51.26%, log loss: 69.26%
Normalizer(copy=True, norm='l2')
AUC: 52.18%, log loss: 69.21%
Normalizer(copy=True, norm='max')
AUC: 52.40%, log loss: 69.19%
""" | bsd-3-clause |
dr-nate/msmbuilder | msmbuilder/preprocessing/__init__.py | 9 | 3185 | # Author: Carlos Xavier Hernandez <[email protected]>
# Contributors:
# Copyright (c) 2016, Stanford University and the Authors
# All rights reserved.
from __future__ import print_function, division, absolute_import
from sklearn import preprocessing
from .base import (MultiSequencePreprocessingMixin,
MultiSequenceOnlinePreprocessingMixin)
from .timeseries import Butterworth, EWMA, DoubleEWMA
__all__ = ['Binarizer', 'Butterworth', 'DoubleEWMA', 'EWMA', 'Imputer',
'KernelCenterer', 'LabelBinarizer', 'MultiLabelBinarizer',
'Normalizer', 'PolynomialFeatures']
class Binarizer(MultiSequencePreprocessingMixin, preprocessing.Binarizer):
__doc__ = preprocessing.Binarizer.__doc__
# Older versions of sklearn might not have this
if hasattr(preprocessing, 'FunctionTransformer'):
__all__.append('FunctionTransformer')
class FunctionTransformer(MultiSequencePreprocessingMixin,
preprocessing.FunctionTransformer):
__doc__ = preprocessing.FunctionTransformer.__doc__
class Imputer(MultiSequencePreprocessingMixin, preprocessing.Imputer):
__doc__ = preprocessing.Imputer.__doc__
class KernelCenterer(MultiSequencePreprocessingMixin,
preprocessing.KernelCenterer):
__doc__ = preprocessing.KernelCenterer.__doc__
class LabelBinarizer(MultiSequencePreprocessingMixin,
preprocessing.LabelBinarizer):
__doc__ = preprocessing.LabelBinarizer.__doc__
class MultiLabelBinarizer(MultiSequencePreprocessingMixin,
preprocessing.MultiLabelBinarizer):
__doc__ = preprocessing.MultiLabelBinarizer.__doc__
# Older versions of sklearn might not have this
if hasattr(preprocessing.MinMaxScaler, 'partial_fit'):
__all__.append('MinMaxScaler')
class MinMaxScaler(MultiSequenceOnlinePreprocessingMixin,
preprocessing.MinMaxScaler):
__doc__ = preprocessing.MinMaxScaler.__doc__
# Older versions of sklearn might not have this
if hasattr(preprocessing, 'MaxAbsScaler'):
__all__.append('MaxAbsScaler')
class MaxAbsScaler(MultiSequenceOnlinePreprocessingMixin,
preprocessing.MaxAbsScaler):
__doc__ = preprocessing.MaxAbsScaler.__doc__
class Normalizer(MultiSequencePreprocessingMixin, preprocessing.Normalizer):
__doc__ = preprocessing.Normalizer.__doc__
# Older versions of sklearn might not have this
if hasattr(preprocessing, 'RobustScaler'):
__all__.append('RobustScaler')
class RobustScaler(MultiSequencePreprocessingMixin,
preprocessing.RobustScaler):
__doc__ = preprocessing.RobustScaler.__doc__
# Older versions of sklearn might not have this
if hasattr(preprocessing.StandardScaler, 'partial_fit'):
__all__.append('StandardScaler')
class StandardScaler(MultiSequenceOnlinePreprocessingMixin,
preprocessing.StandardScaler):
__doc__ = preprocessing.StandardScaler.__doc__
class PolynomialFeatures(MultiSequencePreprocessingMixin,
preprocessing.PolynomialFeatures):
__doc__ = preprocessing.PolynomialFeatures.__doc__
| lgpl-2.1 |
CharlesGulian/Deconv | coadd_fits_editor.py | 1 | 3366 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 8 21:35:06 2016
@author: charlesgulian
"""
import os
os.chdir('/home/cgulian2/Deconv')
curr_dir = os.getcwd()
import glob
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
#import fits_tools as tools
# ===============================================================================
# Getting image file paths for co-add frames
# Image directory:
image_dir = '/home/DATA/STRIPE82_330-360_AlignCropped/test7'
image_files = glob.glob(os.path.join(image_dir,'*alignCropped.fits'))
image_files.remove('/home/DATA/STRIPE82_330-360_AlignCropped/test7/fpC-4927-x4127-y118_stitched_alignCropped.fits')
#print image_files
all_image_files = os.path.join(os.getcwd(),'imagelist.txt')
with open(all_image_files,'w') as f:
f.write('# Complete list of images \n')
f.write('\n')
for i in image_files:
f.write('{} \n'.format(i))
#print type(image_files)
#print image_files[0:5]
# ===============================================================================
# Getting image file paths for unaligned frames, extracting data from header, rewriting to headers of aligned frames
# Unaligned image directory
og_dir = '/home/DATA/STRIPE82_330-360'
# Search for co-add frame counterparts in unaligned image directory
og_files = []
bad_image_files = []
for image_file in image_files:
image_tag = os.path.split(image_file)[1]
tag = image_tag[0:8]
tag = tag.replace('-','-00')
#print os.path.join(og_dir,tag+'*.fit')
og_file_list = glob.glob(os.path.join(og_dir,tag+'*.fit'))
if len(og_file_list) == 0:
print 'Error: non-aligned image not found'
continue
flux0,flux20,bias,skylevel=None,None,None,None
# Select an image from unaligned image directory with appropriate header information
print 'Selecting corresponding image from unaligned directory'
for i in range(len(og_file_list)):
og_file = og_file_list[i]
og_header = fits.getheader(og_file)
print '\nChecking image {0} header for appropriate keywords'.format(str(i))
print og_file
try:
flux0 = og_header['flux0']
flux20 = og_header['flux20']
bias = og_header['softbias']
skylevel = og_header['sky']
except KeyError:
del(og_header)
continue
break
if i == len(og_file_list)-1:
print 'WARNING: No suitable image found for {0}'.format(image_file)
print "flux0",flux0
print "flux20",flux20
print "bias",bias
print "sky",skylevel
bad_image_files.append(image_file)
continue
header = fits.getheader(image_file)
header['flux0'] = flux0
header['flux20'] = flux20
header['softbias'] = bias
header['sky'] = skylevel
print 'Header data:'
print 'Sky level: {0} | Flux 20: {1}'.format(skylevel, flux20)
new_image_file = image_file.replace('DATA','DATA/charlie')
fits.writeto(new_image_file,fits.getdata(image_file),header,clobber=True)
og_files.append(og_file)
del(og_header)
del(header)
print og_files
print len(og_files)
print len(bad_image_files)
print len(image_files)
missing_keyword_files = os.path.join(os.getcwd(),'missing_keyword_imagelist.txt')
with open(missing_keyword_files,'w') as g:
g.write('# List of images with missing header keywords \n')
g.write('\n')
for i in bad_image_files:
g.write('{} \n'.format(i))
| gpl-3.0 |
dandanvidi/capacity-usage | scripts/model_reactions_protein_weights.py | 3 | 1452 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 16 13:53:05 2016
@author: dan
"""
import pandas as pd
import re
import numpy as np
from collections import defaultdict
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.manipulation.modify import convert_to_irreversible
model = create_cobra_model_from_sbml_file("../data/iJO1366.xml")
enzyme_genes = pd.DataFrame.from_csv("../data/model_genes.csv")
gr_mol = enzyme_genes["Molecular weight (Da)"].copy()
convert_to_irreversible(model)
complexes = pd.DataFrame.from_csv("../data/enzyme_complexes.csv")
comp = list(complexes["Gene composition"].values)
comp = [dict(zip(re.findall(r"b[0-9]+", s),re.findall(r"\(([0-9]+)\)", s))) for s in comp]
#
#
all_genes = defaultdict(list)
for s in comp:
for k,v in s.iteritems():
all_genes[k].append(float(v))
for b in gr_mol.index:
if b not in all_genes.keys():
all_genes[b].append(1.0)
subunit_comp = {k:np.mean(v) for k,v in all_genes.iteritems()}
r_to_weights = {}
for r in model.reactions:
isozymes = r.gene_reaction_rule.split("or")
isozymes = [re.findall(r"b[0-9]+", iso) for iso in isozymes]
weights = []
for genes_list in isozymes:
for b in genes_list:
if b in gr_mol.index:
weights.append(subunit_comp[b]*gr_mol[b])
else:
weights.append(np.nan)
# print r.id, weights
r_to_weights[r.id] = np.mean(weights)
| mit |
GeoMop/GeoMop | src/LayerEditor/bem/side_project_map.py | 1 | 3361 | import numpy as np
import matplotlib.pyplot as plt
class SideProjectMapping:
"""
Class to define mapping through displacement of set of edges.
The mapping is defined by a set of edges 'orig_edges' and set of moved edges 'new_edges'.
Than a set of points may be mapped using the map_points or map_point functions.
The mapping should have following properties:
- points on original edges are mapped to corresponding points on new edges (do not hold on intersection of edges).
- if the edges forms a decomposition of the plane to (nonconvex) polygons, the points inside old polygons are mapped to ppoints in new polygons.
- the mapping is continuous
- mapping is smooth inside polygons
Mapping of point P is weighted average of its projections to individual edges. Weights are 1/r^2 where 'r' is the distances to the projection.
"""
def __init__(self, orig_edges, new_edges):
"""
:param orig_edges: Initial set of edges.
:param new_edges: Set of displaced edges (same number of edges).
"""
assert (len(orig_edges) == len(new_edges))
self.displ = np.array(new_edges) - np.array(orig_edges)
self.edges = [(np.array(a), np.array(b)) for a, b in orig_edges]
def map_points(self, points):
"""
:param points: list of points [(x,y), ...]
:return: list of transformed points
"""
return [self.map_point(np.array(p)) for p in points]
def _weight_displ(self, sides):
displ = np.array([0.0, 0.0])
wsum = 0.0
for w, d in sides:
displ += w * d
wsum += w
return displ / wsum
def map_point(self, p):
"""
:param p: point as numpy array of size 2
:return: transformed point
"""
print("Map p: ", p)
epsilon = 1e-13
sides = []
singular_sides = []
#ax = plt.gca()
#ax.margins(0.5)
for i in range(len(self.edges)):
X1, X2 = self.edges[i]
UX1, UX2 = self.displ[i]
# project to edge
dX = X2 - X1
norm = np.array([-dX[1], dX[0]])
norm = norm / np.linalg.norm(norm)
dist = np.dot(p - X1, norm)
# projection of the point to the element
t_proj = np.linalg.norm(p - X1 - dist * norm) / np.linalg.norm(dX)
t_proj = max(0.0, t_proj)
t_proj = min(1.0, t_proj)
proj_p = X1 + t_proj * dX
dist = np.linalg.norm( proj_p - p)
print(t_proj, proj_p, dist)
dU = UX2 - UX1
Up = UX1 + t_proj * dU
#ax.quiver(proj_p[0], proj_p[1], Up[0], Up[1], angles='xy', scale_units='xy', scale=1, color='b')
#ax.quiver(X1[0], X1[1], UX1[0], UX1[1], angles='xy', scale_units='xy', scale=1, color='g')
#ax.quiver(X2[0], X2[1], UX2[0], UX2[1], angles='xy', scale_units='xy', scale=1, color='g')
print(t_proj, proj_p, dist, Up)
if (dist < epsilon):
singular_sides.append((1.0, Up))
else:
sides.append((1.0 / dist /dist, Up))
if singular_sides:
displ = self._weight_displ(singular_sides)
else:
displ = self._weight_displ(sides)
print(displ)
return p + displ
| gpl-3.0 |
bhzunami/Immo | immo/scikit/pipeline.py | 1 | 46523 | import os
import pdb
import logging
import json
import argparse
import datetime
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import ast
# Scikit
from sklearn.ensemble import ExtraTreesRegressor, IsolationForest
from sklearn.externals import joblib
from sklearn.metrics import confusion_matrix, mean_squared_error
from sklearn.grid_search import GridSearchCV
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.neighbors import KNeighborsRegressor
from sklearn.pipeline import make_pipeline
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.linear_model import LassoLarsCV, Ridge, RidgeCV, LassoCV, Lasso, LinearRegression, LogisticRegression
from sklearn.metrics.scorer import make_scorer
import xgboost as xgb
#import lightgbm as lgb
from sklearn import cross_validation, metrics #Additional scklearn functions
# NLTK
#import nltk
#from nltk.corpus import stopwords # Import the stop word list
#from nltk.stem import SnowballStemmer
from .a_detection import AnomalyDetection
from .helper import generate_matrix, ape, mape, mdape, gen_subplots, plot, train_statistics, feature_importance
from .combined_ensemble import CombinedEnsemble
from .stacked_regressor import StackedRegressor
RNG = np.random.RandomState(42)
def score_function(y_test, y_pred, **kwargs):
return mape(y_test, y_pred)
scorer = make_scorer(score_function, greater_is_better=False)
class Pipeline():
def __init__(self, goal, settings, directory):
self.goal = goal
self.settings = settings
self.directory = directory
self.image_folder = os.path.abspath(os.path.join(directory, settings['image_folder']))
self.model_folder = os.path.abspath(os.path.join(directory, settings['model_folder']))
# Create folder if they do not exist
if not os.path.exists('{}'.format(self.image_folder)):
logging.info("Directory for images does not exists. Create one")
os.makedirs('{}'.format(self.image_folder))
if not os.path.exists('{}'.format(self.model_folder)):
logging.info("Directory for models does not exists. Create one")
os.makedirs('{}'.format(self.model_folder))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load and Save Methods
def load_csv(self, filename):
def load_csv_inner(ads):
try:
return pd.read_csv(filename, index_col=0, engine='c')
except FileNotFoundError:
logging.info("File {} does not exist. Please run data_analyse.py first".format(filename))
return None
return load_csv_inner
def save_as_df(self, name):
def inner_save_as_df(ads):
joblib.dump(ads, name)
return ads
return inner_save_as_df
def load_df(self, name):
def inner_load_df(ads):
advertisements = joblib.load(name)
X, y = generate_matrix(advertisements, 'price')
self.X, self.y = X.values, y.values
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=0.2, random_state=RNG)
logging.info("Size of X_train: {}, size of X_test: {}".format(len(self.X_train), len(self.X_test)))
return advertisements
return inner_load_df
def echo(self, message):
def inner_echo(ads):
logging.info("{}".format(message))
return ads
return inner_echo
def remove(self, name):
def inner_remove(ads):
try:
os.remove('{}/{}'.format(self.model_folder, name))
except Exception:
logging.error("Could not remove pkl")
pass
return inner_remove
def load_pipeline(self, pipeline):
filename = "{}/ads_prepared.pkl".format(self.model_folder)
if os.path.isfile(filename):
return [self.load_df(filename)]
else:
return pipeline
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Feature engineering
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def cleanup(self, remove):
def inner_cleanup(ads):
ads = ads.drop(ads[ads['price'] < 10].index)
ads = ads.drop(ads[ads['build_year'] < 1200].index)
ads = ads.drop(ads[ads['build_year'] > 2030].index)
ads = ads.drop(ads[(ads['ogroup'] == 'raum') | (ads['ogroup'] == 'invalid')].index)
ads = ads.drop_duplicates(keep='first')
# Remove empty prices
dropna = ['price', 'build_year', 'num_rooms', 'living_area']
ads = ads.dropna(subset=dropna)
# Remove unwanted cols
return ads.drop(remove, axis=1)
return inner_cleanup
def simple_stats(self, title):
"""Show how many NaN has one Feature and how many we can use
"""
def run(ads):
total_amount_of_data = ads.shape[0]
logging.info("{}".format(title))
logging.info("="*70)
logging.info("We have total {} values".format(total_amount_of_data))
logging.info("{:25} | {:17} | {:8}".format("Feature",
"NaN-Values",
"usable Values"))
logging.info("-"*70)
total_nan = 0
total_use = total_amount_of_data
for key in ads.keys():
if key == 'id' or key == 'Unnamed': # Keys from pandas we do not want
continue
# for i, key in KEYS:
nan_values = ads[key].isnull().sum()
useful_values = total_amount_of_data - nan_values
# Sum up
total_nan += nan_values
total_use = total_use if total_use < useful_values else useful_values
logging.info("{:25} | {:8} ({:5.2f}%) | {:8} ({:3.0f}%)".format(key,
nan_values, (nan_values/total_amount_of_data)*100,
useful_values, (useful_values/total_amount_of_data)*100))
logging.info("-"*70)
logging.info("{:25} | {:17} | {}".format('Total', total_nan, total_use))
return ads
return run
def show_crawler_stats(self, ads):
logging.info(ads.groupby('crawler')['price'].count())
return ads
def transform_noise_level(self, ads):
""" If we have no nose_level at the address
we use the municipality noise_level
"""
def lambdarow(row):
if np.isnan(row.noise_level):
return row.m_noise_level
return row.noise_level
if 'noise_level' in ads.keys():
ads['noise_level'] = ads.apply(lambdarow, axis=1)
return ads
return ads
def replace_zeros_with_nan(self, ads):
""" replace 0 values into np.nan for statistic
"""
ads.loc[ads.living_area == 0, 'living_area'] = np.nan
ads.loc[ads.num_rooms == 0, 'num_rooms'] = np.nan
return ads
def transform_misc_living_area(self, ads):
ads['avg_room_area'] = ads['living_area'] / ads['num_rooms']
return ads
def transform_build_renovation(self, ads):
"""Set was_renovated to 1 if we have a date in renovation_year
"""
ads['was_renovated'] = ads.apply(lambda row: not np.isnan(row['last_renovation_year']), axis=1)
def last_const(row):
current_year = datetime.date.today().year
if row['build_year'] >= current_year or row['last_renovation_year'] >= current_year:
return 0
elif np.isnan(row['last_renovation_year']):
return current_year - row['build_year']
else:
return current_year - row['last_renovation_year']
ads['last_construction'] = ads.apply(last_const, axis=1)
return ads.drop(['last_renovation_year'], axis=1)
def transform_onehot(self, ads):
"""Build one hot encoding for all columns with string as value
"""
logging.debug("Features: {}".format(ads.keys()))
ads = pd.get_dummies(ads, columns=self.settings.get('one_hot_columns'))
return ads
def transform_tags(self, ads):
"""Transform tags
"""
with open(os.path.join(self.directory, '../crawler/taglist.txt')) as f:
search_words = set(["tags_" + x.split(':')[0] for x in f.read().splitlines()])
template_dict = dict.fromkeys(search_words, 0)
def transformer(row):
the_dict = template_dict.copy()
for tag in ast.literal_eval(row.tags):
the_dict["tags_" + tag] = 1
return pd.Series(the_dict)
tag_columns = ads.apply(transformer, axis=1)
return ads.drop(['tags'], axis=1).merge(tag_columns, left_index=True, right_index=True)
def predict_living_area(self, ads):
"""If living area is missing try to predict one
and set the predicted flag to 1
"""
try:
model = joblib.load('{}/living_area.pkl'.format(self.model_folder))
except FileNotFoundError:
logging.error("Could not load living area model. Did you forget to train living area?")
return ads.dropna(subset=['living_area'])
tempdf = ads.drop(['price'], axis=1)
ads.living_area_predicted = 0
nan_idxs = tempdf.living_area.index[tempdf.living_area.apply(np.isnan)]
if len(nan_idxs) > 0:
ads.loc[nan_idxs, 'living_area'] = model.predict(tempdf.drop(['living_area'], axis=1).ix[nan_idxs])
ads.loc[nan_idxs, 'living_area_predicted'] = 1
return ads
def transform_features(self, ads):
"""Transfrom features to more global one
"""
# Merge some Features:
ads['bath'] = np.where((ads['tags_badewanne'] == 1) |
(ads['tags_badezimmer'] == 1) |
(ads['tags_dusche'] == 1) |
(ads['tags_lavabo'] == 1), 1, 0)
ads['interior'] = np.where((ads['tags_anschluss'] == 1) |
(ads['tags_abstellplatz'] == 1) |
(ads['tags_cheminée'] == 1) |
(ads['tags_eingang'] == 1) |
(ads['tags_esszimmer'] == 1) |
(ads['tags_gross'] == 1) |
(ads['tags_heizung'] == 1) |
(ads['tags_lift'] == 1) |
(ads['tags_minergie'] == 1) |
(ads['tags_schlafzimmer'] == 1) |
(ads['tags_wohnzimmer'] == 1) |
(ads['tags_rollstuhlgängig'] == 1) |
(ads['tags_tv'] == 1) |
(ads['tags_küche'] == 1) |
(ads['tags_waschküche'] == 1) |
(ads['tags_waschmaschine'] == 1) |
(ads['tags_wc'] == 1) |
(ads['tags_keller'] == 1) |
(ads['tags_raum'] == 1) |
(ads['tags_zimmer'] == 1), 1, 0)
ads['exterior'] = np.where((ads['tags_aussicht'] == 1) |
(ads['tags_balkon'] == 1) |
(ads['tags_garten'] == 1) |
(ads['tags_garage'] == 1) |
(ads['tags_lage'] == 1) |
(ads['tags_liegenschaft'] == 1) |
(ads['tags_parkplatz'] == 1) |
(ads['tags_sitzplatz'] == 1) |
(ads['tags_terrasse'] == 1), 1, 0)
ads['neighbourhood'] = np.where((ads['tags_autobahnanschluss'] == 1) |
(ads['tags_einkaufen'] == 1) |
(ads['tags_kinderfreundlich'] == 1) |
(ads['tags_kindergarten'] == 1) |
(ads['tags_oberstufe'] == 1) |
(ads['tags_primarschule'] == 1) |
(ads['tags_quartier'] == 1) |
(ads['tags_ruhig'] == 1) |
(ads['tags_sommer'] == 1) |
(ads['tags_verkehr'] == 1) |
(ads['tags_zentral'] == 1), 1, 0)
# Drop the concatenated features
drop_features = ['tags_badewanne', 'tags_badezimmer', 'tags_dusche', 'tags_lavabo', 'tags_anschluss',
'tags_abstellplatz', 'tags_cheminée', 'tags_eingang', 'tags_esszimmer', 'tags_gross',
'tags_heizung', 'tags_lift', 'tags_minergie', 'tags_schlafzimmer', 'tags_wohnzimmer',
'tags_rollstuhlgängig', 'tags_tv', 'tags_küche', 'tags_waschküche', 'tags_waschmaschine',
'tags_wc', 'tags_zimmer', 'tags_aussicht', 'tags_balkon', 'tags_garten', 'tags_garage',
'tags_lage', 'tags_liegenschaft', 'tags_parkplatz', 'tags_sitzplatz', 'tags_terrasse',
'tags_autobahnanschluss', 'tags_einkaufen', 'tags_kinderfreundlich',
'tags_kindergarten', 'tags_oberstufe', 'tags_primarschule', 'tags_quartier',
'tags_ruhig', 'tags_sommer', 'tags_verkehr', 'tags_zentral', 'tags_keller', 'tags_raum']
return ads.drop(drop_features, axis=1)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Outlier detection
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def outlier_detection(self, ads):
"""Detect outliers we do not want in our training phase
The outlier model must be trained first
"""
# if os.path.isfile("{}/ads_cleaned.pkl".format(self.model_folder)):
# logging.info("Clean file found skipping outlier detection load data from file.")
# return self.load_df("{}/ads_cleaned.pkl".format(self.model_folder))(ads)
meshgrid = {
'build_year': np.meshgrid(np.linspace(0, max(ads['build_year']), 400),
np.linspace(0, max(ads['price']), 1000)),
'num_rooms': np.meshgrid(np.linspace(-1, max(ads['num_rooms']), 400),
np.linspace(0, max(ads['price']), 1000)),
'living_area': np.meshgrid(np.linspace(0, max(ads['living_area']), 400),
np.linspace(0, max(ads['price']), 1000)),
'last_construction': np.meshgrid(np.linspace(0, max(ads['last_construction']), 400),
np.linspace(0, max(ads['price']), 1000)),
'noise_level': np.meshgrid(np.linspace(0, max(ads['noise_level']), 400),
np.linspace(0, max(ads['price']), 1000))
}
anomaly_detection = AnomalyDetection(ads, self.image_folder, self.model_folder)
ads = anomaly_detection.isolation_forest(self.settings['anomaly_detection'],
meshgrid, self.goal)
return ads
def train_outlier_detection(self, ads):
"""Check which contamination is the best for the features
Run isolation forest with different contamination and check
difference in the standard derivation.
If the diff is < 1 we found our c
"""
for feature in self.settings['anomaly_detection']['features']:
# Do not run outlier detection if model exists
if os.path.isfile('{}/isolation_forest_{}.pkl'.format(self.model_folder, feature)):
logging.info("Outlier detection for feature {} exists.".format(feature))
continue
logging.info("Check feature: {}".format(feature))
# Only use the this specific feature with our target
tmp_ad = ads[[feature, self.goal]]
# Initialize std and std_percent
std = [np.std(tmp_ad[feature].astype(int))]
std_percent = [100]
difference = [0]
# We are in train phase so best_c should always be 0
best_c, self.settings['anomaly_detection'][feature] = 0, 0
last_model, cls_ = None, None
chosen_std_percent = 0
# Run isolation forest for diffrent contamination
for c in np.arange(0.01, self.settings['anomaly_detection']['limit'],
self.settings['anomaly_detection']['step']):
last_model, cls_ = cls_ , IsolationForest(max_samples=0.6,
contamination=c,
n_estimators=self.settings['anomaly_detection']['estimator'],
random_state=RNG)
logging.debug("Check C: {}".format(c))
cls_.fit(tmp_ad.values)
outlierIdx = cls_.predict(tmp_ad.values)
# Remove entries which are detected as outliers
filtered = tmp_ad.drop(tmp_ad.index[np.where(outlierIdx == -1)[0]])
# Calculate standard derivation of the new filtered ads
std.append(np.std(filtered[feature].astype(int)))
std_percent.append((std[-1]/std[0])*100)
logging.info("New std: with c {}: {}".format(c, std[-1]))
logging.info("New std in percent with c {}: {}".format(c, std_percent[-1]))
# Calculate diff from last standard derivation to check if we found our contamination
diff = std_percent[-2] - std_percent[-1]
logging.info("Diff when using c {}: {}".format(c, diff))
# We stop after diff is the first time < 1. So best_c must be 0
# But we can not stop the calculation because we want to make the whole diagramm
if diff < 2.5 and best_c == 0:
# We do not need this c, we need the last c - step
best_c = np.around(c - self.settings['anomaly_detection']['step'], 2)
chosen_std_percent = std_percent[-2]
joblib.dump(last_model, '{}/isolation_forest_{}.pkl'.format(self.model_folder, feature))
difference.append(diff)
# Store best c in our settings to use it later
self.settings['anomaly_detection'][feature] = best_c if best_c > 0 else 0 + self.settings['anomaly_detection']['step']
# Plot stuff
# Create directory if not exists
if not os.path.exists('{}/outlier_detection'.format(self.image_folder)):
logging.info("Directory for outliers does not exists. Create one")
os.makedirs('{}/outlier_detection'.format(self.image_folder))
logging.info("Best C for feature {}: {} chosen_std: {}".format(feature, best_c, chosen_std_percent))
# Plot the standard derivation for this feature
fig, ax1 = plt.subplots()
plt.title('Reduction of σ for feature {}'.format(feature.replace("_", " ")))
plt.plot(list(map(lambda x: x*100, np.arange(0.0, self.settings['anomaly_detection']['limit'],
self.settings['anomaly_detection']['step']))),
std_percent, c='r')
plt.ylabel('Decline of σ in %')
plt.xlabel('% removed of {} outliers'.format(feature.replace("_", " ")))
# Draw lines to choosen c
plt.plot([best_c*100, best_c*100], [40, chosen_std_percent], linewidth=1, color='b', linestyle='--')
plt.plot([0, best_c*100], [chosen_std_percent, chosen_std_percent], linewidth=1, color='b', linestyle='--')
ax1.set_xlim([0, 10])
ax1.set_ylim([35, 100])
plt.axis('tight')
plt.savefig('{}/outlier_detection/{}_std.png'.format(self.image_folder, feature), dpi=250)
plt.close()
# Plot the difference between the standard derivation
fig, ax1 = plt.subplots()
ax1.set_title('Difference of σ from previous σ for feature: {}'.format(feature.replace("_", " ")))
ax1.plot(list(np.arange(0.0, self.settings['anomaly_detection']['limit'],
self.settings['anomaly_detection']['step'])), difference, c='r')
ax1.set_ylabel('% decline of σ to previous σ')
ax1.set_xlabel('% removed of {} outliers'.format(feature.replace("_", " ")))
plt.savefig('{}/outlier_detection/{}_diff_of_std.png'.format(self.image_folder, feature), dpi=250)
plt.close()
# Save best c for all features
with open('{}/settings.json'.format(self.directory), 'w') as f:
f.write(json.dumps(self.settings))
return ads
def predict(self, name):
def inner_predict(ads):
X, y = generate_matrix(ads, 'price')
X, y = X.values, y.values
idx = 0
model = joblib.load('{}/{}.pkl'.format(self.model_folder, name))
for train_index, test_index in KFold(n_splits=3, shuffle=True).split(X):
logging.info('New split')
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
logging.info("Size of training data: {}".format(len(X_train)))
logging.info("Size of testing data: {}".format(len(X_test)))
y_pred = model.predict(X_test)
train_statistics(y_test, y_pred, title="{}_{}".format(name, idx))
plot(y_test, y_pred, self.image_folder, show=False, title="{}_{}".format(name, idx))
idx += 1
return ads
return inner_predict
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Linear Regression
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def linear_regression(self, ads):
X, y = generate_matrix(ads, 'price')
X, y = X.values, y.values
for train_index, test_index in KFold(n_splits=3, shuffle=True).split(X):
logging.info('Linear regression new split')
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
logging.info("Size of training data: {}".format(len(X_train)))
logging.info("Size of testing data: {}".format(len(X_test)))
linreg = LinearRegression(normalize=True, n_jobs=-1)
linreg.fit(X_train, y_train)
y_pred = linreg.predict(X_test)
train_statistics(y_test, y_pred)
plot(y_test, y_pred, self.image_folder, show=False, title="simpel_linear_regresion")
return ads
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Ridge
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def train_ridge(self, ads):
X, y = generate_matrix(ads, 'price')
X, y = X.values, y.values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=RNG)
ridge = RidgeCV(alphas=[0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30])
ridge.fit(X_train, y_train)
alpha = ridge.alpha_
logging.info("Try again for more precision with alphas centered around " + str(alpha))
ridge = RidgeCV(alphas=[alpha * .6, alpha * .65, alpha * .7, alpha * .75, alpha * .8, alpha * .85,
alpha * .9, alpha * .95, alpha, alpha * 1.05, alpha * 1.1, alpha * 1.15,
alpha * 1.25, alpha * 1.3, alpha * 1.35, alpha * 1.4],
cv=5)
ridge.fit(X_train, y_train)
alpha = ridge.alpha_
logging.info("Best alpha: {}".format(alpha))
idx = 0
for train_index, test_index in KFold(n_splits=5, shuffle=True).split(X):
logging.info('New split')
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
ridgereg = Ridge(alpha=alpha, normalize=True)
ridgereg.fit(X_train, y_train)
joblib.dump(ridgereg, '{}/ridge_{}.pkl'.format(self.model_folder, idx))
y_pred = ridgereg.predict(X_test)
train_statistics(y_test, y_pred, title="Ridge_{}".format(idx))
plot(y_test, y_pred, self.image_folder, show=False, title="ridge_regression_{}".format(idx))
idx +=1
return ads
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Lasso
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def train_lasso(self, ads):
X, y = generate_matrix(ads, 'price')
X, y = X.values, y.values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=RNG)
lasso = LassoCV(alphas=[0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30])
lasso.fit(X_train, y_train)
alpha = lasso.alpha_
logging.info("Try again for more precision with alphas centered around " + str(alpha))
lasso = LassoCV(alphas=[alpha * .6, alpha * .65, alpha * .7, alpha * .75, alpha * .8, alpha * .85,
alpha * .9, alpha * .95, alpha, alpha * 1.05, alpha * 1.1, alpha * 1.15,
alpha * 1.25, alpha * 1.3, alpha * 1.35, alpha * 1.4],
cv=5)
lasso.fit(X_train, y_train)
alpha = lasso.alpha_
logging.info("Best alpha: {}".format(alpha))
idx = 0
for train_index, test_index in KFold(n_splits=5, shuffle=True).split(X):
logging.info('New split')
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
lassoreg = Lasso(alpha=alpha, normalize=True, max_iter=1e5)
lassoreg.fit(X_train, y_train)
joblib.dump(lassoreg, '{}/lasso_{}.pkl'.format(self.model_folder, idx))
y_pred = lassoreg.predict(X_test)
train_statistics(y_test, y_pred, title="lasso_{}".format(idx))
plot(y_test, y_pred, self.image_folder, show=False, title="lasso_regression_{}".format(idx))
idx += 1
return ads
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# K nearest Neighbour
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def train_kneighbours(self, ads):
# X, y = generate_matrix(ads, 'price')
# X, y = X.values, y.values
# parameters = {"n_neighbors": [2, 3, 5], "leaf_size":[50, 100, 200]}
# neigh = KNeighborsRegressor(weights='distance', n_jobs=-1)
# gd = GridSearchCV(neigh, parameters, verbose=1, scoring=scorer, cv=5)
# logging.info("Start Fit")
# gd.fit(X, y)
# logging.info("Best score: {}".format(gd.best_score_))
# logging.info("Best params: {}".format(gd.best_params_))
# params = gd.best_params_
params = {}
self.settings['k-neighbour'] = {}
self.settings['k-neighbour']['n_neighbors'] = params.get('n_neighbors', 2)
self.settings['k-neighbour']['leaf_size'] = params.get('leaf_size', 100)
# Save best c for all features
with open('{}/settings.json'.format(self.directory), 'w') as f:
f.write(json.dumps(self.settings))
neigh = KNeighborsRegressor(weights='distance', n_jobs=-1,
leaf_size=params.get('leaf_size', 100),
n_neighbors=params.get('n_neighbors', 2))
neigh.fit(self.X_train, self.y_train)
joblib.dump(neigh, '{}/kneighbour.pkl'.format(self.model_folder))
y_pred = neigh.predict(self.X_test)
train_statistics(self.y_test, y_pred, title="KNeighbour")
plot(self.y_test, y_pred, self.image_folder, show=False, title="KNeighbour")
return ads
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# adaBOOST
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def train_adaBoost(self, ads):
X, y = generate_matrix(ads, 'price')
X, y = X.values, y.values
# parameters = {"n_estimators": [17, 18, 19, 20], "learning_rate": [0.01, 0.1, 0.3]}
# adaboost = AdaBoostRegressor(DecisionTreeRegressor(), random_state=RNG)
# gd = GridSearchCV(adaboost, parameters, verbose=1, scoring=scorer, cv=5)
# gd.fit(X, y)
# logging.info("Best score: {}".format(gd.best_score_))
# logging.info("Best params: {}".format(gd.best_params_))
# params = gd.best_params_
params = {}
self.settings['adaboost'] = {}
self.settings['adaboost']['n_estimators'] = params.get('n_estimators', 18)
self.settings['adaboost']['learning_rate'] = params.get('learning_rate', 1)
# Save best c for all features
with open('{}/settings.json'.format(self.directory), 'w') as f:
f.write(json.dumps(self.settings))
boost = AdaBoostRegressor(DecisionTreeRegressor(),
n_estimators=params.get('n_estimators', 18),
learning_rate=params.get('learning_rate', 1),
random_state=RNG)
boost.fit(self.X_train, self.y_train)
joblib.dump(boost, '{}/adaboost.pkl'.format(self.model_folder))
y_pred = boost.predict(self.X_test)
train_statistics(self.y_test, y_pred, title="adaboost")
plot(self.y_test, y_pred, self.image_folder, show=False, title="adaboost")
# X values is numpy matrix with no keys()
X, y = generate_matrix(ads, 'price')
try:
feature_importance(boost, X)
except Exception:
logging.error("Could not get Feature importance")
pass
return ads
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Random Forest
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def train_random_forest(self, ads):
X, y = generate_matrix(ads, 'price')
X, y = X.values, y.values
# parameters = {"n_estimators": [100, 500, 700, 1000], "min_samples_leaf":[1, 5]}
# random = RandomForestRegressor(n_jobs=-1)
# gd = GridSearchCV(random, parameters, verbose=1, scoring=scorer, cv=5)
# logging.info("Start Fit")
# gd.fit(X, y)
# logging.info("Best score: {}".format(gd.best_score_))
# logging.info("Best score: {}".format(gd.best_params_))
# params = gd.best_params_
params = {}
self.settings['random_forest'] = {}
self.settings['random_forest']['n_estimators'] = params.get('n_estimators', 700)
self.settings['random_forest']['min_samples_leaf'] = params.get('min_samples_leaf', 1)
# Save best c for all features
with open('{}/settings.json'.format(self.directory), 'w') as f:
f.write(json.dumps(self.settings))
model = RandomForestRegressor(n_estimators=params.get('n_estimators', 700),
max_features="auto", n_jobs=-1,
min_samples_leaf=params.get('min_samples_leaf', 1))
model.fit(self.X_train, self.y_train)
joblib.dump(model, '{}/random_forest.pkl'.format(self.model_folder))
y_pred = model.predict(self.X_test)
train_statistics(self.y_test, y_pred, title="random_forest")
plot(self.y_test, y_pred, self.image_folder, show=False, title="random_forest")
# X values is numpy matrix with no keys()
X, y = generate_matrix(ads, 'price')
try:
feature_importance(model, X)
except Exception:
logging.error("Could not get Feature importance")
pass
return ads
return ads
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# XGBoost
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def train_xgboost(self, ads):
X, y = generate_matrix(ads, 'price')
X, y = X.values, y.values
# parameters = {"max_depth": [10, 100, 500],
# "learning_rate": [0.01, 0.1, 0.3],
# "n_estimators": [10, 50, 100, 250, 500]}
# xgb_model = xgb.XGBRegressor(silent=False)
# logging.info("Create GridSearch")
# clf = GridSearchCV(xgb_model, parameters, verbose=1, scoring=scorer, cv=5)
# logging.info("Start Fit")
# clf.fit(X_train, y_train)
# logging.info("Best score: {}".format(gd.best_score_))
# logging.info("Best score: {}".format(gd.best_params_))
# params = gd.best_params_
params = {}
self.settings['xgboost'] = {}
self.settings['xgboost']['max_depth'] = params.get('max_depth', 100)
self.settings['xgboost']['learning_rate'] = params.get('learning_rate', 0.1)
self.settings['xgboost']['n_estimators'] = params.get('n_estimators', 350)
# Save best c for all features
with open('{}/settings.json'.format(self.directory), 'w') as f:
f.write(json.dumps(self.settings))
xgb_model = xgb.XGBRegressor(silent=False,
max_depth=params.get('max_depth', 100),
learning_rate=params.get('learning_rate', 0.1),
n_estimators=params.get('n_estimators', 350),
n_jobs=-1)
xgb_model.fit(self.X_train, self.y_train)
xgb_model._Booster.save_model('{}/xgbooster.pkl'.format(self.model_folder))
y_pred = xgb_model.predict(self.X_test)
train_statistics(self.y_test, y_pred, title="xgb")
plot(self.y_test, y_pred, self.image_folder, show=False, title="xgboost")
return ads
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Extra Tree
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def train_extra_tree(self, ads):
X, y = generate_matrix(ads, 'price')
X, y = X.values, y.values
# parameters = {"n_estimators": [100, 500, 700, 1000]}
# extra = ExtraTreesRegressor(warm_start=True, n_jobs=-1, random_state=RNG)
# gd = GridSearchCV(extra, parameters, verbose=1, scoring=scorer, cv=5)
# logging.info("Start Fit")
# gd.fit(X, y)
# logging.info("Best score: {}".format(gd.best_score_))
# logging.info("Best score: {}".format(gd.best_params_))
# params = gd.best_params_
params = {}
self.settings['extraTree'] = {}
self.settings['extraTree']['n_estimators'] = params.get('n_estimators', 700)
# Save best c for all features
with open('{}/settings.json'.format(self.directory), 'w') as f:
f.write(json.dumps(self.settings))
extra = ExtraTreesRegressor(n_estimators=params.get('n_estimators', 700),
warm_start=True, n_jobs=-1, random_state=RNG)
extra.fit(self.X_train, self.y_train)
joblib.dump(extra, '{}/extraTree.pkl'.format(self.model_folder))
y_pred = extra.predict(self.X_test)
train_statistics(self.y_test, y_pred, title="ExtraTree_train")
plot(self.y_test, y_pred, self.image_folder, show=False, title="extra")
# X values is numpy matrix with no keys()
X, y = generate_matrix(ads, 'price')
try:
feature_importance(extra, X)
except Exception:
logging.error("Could not get Feature importance")
pass
return ads
return ads
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Stacked model
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def stacked_models(self, ads):
logging.info("Start extra Tree")
params_tree = {}
params_xgb = {}
params_ada = {}
estimator = StackedRegressor(estimators=[
ExtraTreesRegressor(n_estimators=params_tree.get('n_estimators', 700),
warm_start=True, n_jobs=-1, random_state=RNG),
xgb.XGBRegressor(silent=False,
max_depth=params_xgb.get('max_depth', 100),
learning_rate=params_xgb.get('learning_rate', 0.1),
n_estimators=params_xgb.get('n_estimators', 350),
n_jobs=-1),
AdaBoostRegressor(DecisionTreeRegressor(),
n_estimators=params_ada.get('n_estimators', 18),
learning_rate=params_ada.get('learning_rate', 1),
random_state=RNG)
])
logging.info("Start stacked estimator fit")
estimator.fit(self.X_train, self.y_train)
t_predict, xg_predict, a_predict = estimator.predict_(self.X_test)
y_pred = np.array(0.8*a_predict + 0.1*xg_predict + 0.1*t_predict)
train_statistics(self.y_test, y_pred, title="08_01_01")
plot(self.y_test, y_pred, self.image_folder, show=False, title="08_01_01")
y_pred = np.array(0.7*a_predict + 0.2*xg_predict + 0.1*t_predict)
train_statistics(self.y_test, y_pred, title="7_2_1")
plot(self.y_test, y_pred, self.image_folder, show=False, title="7_2_1")
y_pred = np.array(0.6*a_predict + 0.3*xg_predict + 0.1*t_predict)
train_statistics(self.y_test, y_pred, title="6_3_1")
plot(self.y_test, y_pred, self.image_folder, show=False, title="6_3_1")
y_pred = np.array(0.6*a_predict + 0.2*xg_predict + 0.2*t_predict)
train_statistics(self.y_test, y_pred, title="6_2_2")
plot(self.y_test, y_pred, self.image_folder, show=False, title="6_2_2")
y_pred = np.array(0.5*a_predict + 0.3*xg_predict + 0.2*t_predict)
train_statistics(self.y_test, y_pred, title="5_3_2")
plot(self.y_test, y_pred, self.image_folder, show=False, title="5_3_2")
y_pred = np.array(0.5*a_predict + 0.4*xg_predict + 0.1*t_predict)
train_statistics(self.y_test, y_pred, title="5_4_1")
plot(self.y_test, y_pred, self.image_folder, show=False, title="5_4_1")
y_pred = np.array(0.4*a_predict + 0.4*xg_predict + 0.2*t_predict)
train_statistics(self.y_test, y_pred, title="4_4_2")
plot(self.y_test, y_pred, self.image_folder, show=False, title="4_4_2")
y_pred = np.array(0.4*a_predict + 0.3*xg_predict + 0.3*t_predict)
train_statistics(self.y_test, y_pred, title="4_3_3")
plot(self.y_test, y_pred, self.image_folder, show=False, title="4_3_3")
y_pred = np.array(0.7*a_predict + 0.3*xg_predict + 0.0*t_predict)
train_statistics(self.y_test, y_pred, title="7_3_0")
plot(self.y_test, y_pred, self.image_folder, show=False, title="7_3_0")
y_pred = np.array(0.8*a_predict + 0.2*xg_predict + 0.0*t_predict)
train_statistics(self.y_test, y_pred, title="8_2_0")
plot(self.y_test, y_pred, self.image_folder, show=False, title="8_2_0")
estimator.set_weights([0.2, 0.6, 0.2])
joblib.dump(estimator, '{}/stacked.pkl'.format(self.model_folder))
return ads
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Combined
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def combinedEnsemble_settings(self, ads):
self.n_estimators = 700
self.min_samples_leaf = 5
self.ensemble_estimator = 'extratrees'
self.combinedEnsemble_identifier = 'combinedEnsemble_{}_{}_{}'.format(self.ensemble_estimator, self.n_estimators, self.min_samples_leaf)
self.combinedEnsemble_identifier_long = 'combinedEnsemble ensemble_estimator={} n_estimators={}, min_samples_leaf={}'.format(self.ensemble_estimator, self.n_estimators, self.min_samples_leaf)
self.estimators = {
# 'linear': LinearRegression(normalize=True),
# 'ridge': RidgeCV(alphas=[0.01, 0.03, 0.1, 0.3, 1, 3, 10]),
'knn2': KNeighborsRegressor(n_neighbors=2, weights='distance', leaf_size=100),
'knn3': KNeighborsRegressor(n_neighbors=3, weights='distance', leaf_size=100),
# 'knn5': KNeighborsRegressor(n_neighbors=5, weights='distance'),
# 'mean': MeanEstimator(),
# 'rounded': RoundedMeanEstimator(),
}
return ads
def combinedEnsemble_train(self, ads):
model = CombinedEnsemble(
verbose=True,
ensemble_estimator=ExtraTreesRegressor(n_estimators=self.n_estimators, min_samples_leaf=self.min_samples_leaf, n_jobs=-1),
)
logging.info('Fit {}'.format(self.combinedEnsemble_identifier_long))
model.fit(self.X_train, self.y_train)
logging.info("Fit finished. Save model")
joblib.dump(model, '{}/{}.pkl'.format(self.model_folder, self.combinedEnsemble_identifier))
self.combinedEnsemble = model
return ads
def combinedEnsemble_load(self, ads):
self.combinedEnsemble = joblib.load('{}/{}.pkl'.format(self.model_folder, self.combinedEnsemble_identifier))
return ads
def combinedEnsemble_test(self, ads):
model = self.combinedEnsemble
logging.info("Begin testing stage 2 estimators.")
logging.info("-"*80)
logging.info("")
for name, estimator in self.estimators.items():
logging.info('Predict stage 2 estimator: {}'.format(name))
model.estimator2 = estimator
y_pred = model.predict(self.X_test)
logging.info('Statistics for stage 2 estimator: {}'.format(name))
train_statistics(self.y_test, y_pred, title="{} estimator2={}".format(self.combinedEnsemble_identifier_long, name))
plot(self.y_test, y_pred, self.image_folder, show=False, title="{}_{}".format(self.combinedEnsemble_identifier, name))
logging.info("-"*80)
logging.info("")
logging.info('Finished')
return ads
def combinedEnsemble_CV(self, ads):
if 'crawler' in list(ads):
ads = ads.drop(['crawler'], axis=1)
all_y_test = defaultdict(list)
all_y_pred = defaultdict(list)
for train_index, test_index in KFold(n_splits=3, shuffle=True).split(self.X):
logging.info('combinedEnsemble_CV: new split')
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
model = CombinedEnsemble(
verbose=True,
ensemble_estimator=ExtraTreesRegressor(n_estimators=self.n_estimators, min_samples_leaf=self.min_samples_leaf, n_jobs=-1),
)
logging.info('combinedEnsemble_CV: fit')
model.fit(X_train, y_train)
for name, estimator in self.estimators.items():
logging.info('combinedEnsemble_CV: predict {}'.format(name))
model.estimator2 = estimator
y_pred = model.predict(X_test)
logging.info('combinedEnsemble_CV: statistics {}'.format(name))
train_statistics(y_test, y_pred, title="CV")
all_y_test[name] += y_test.tolist()
all_y_pred[name] += y_pred
for name, _ in self.estimators.items():
logging.info('combinedEnsemble_CV: combined statistics {}'.format(name))
train_statistics(np.array(all_y_test[name]), np.array(all_y_pred[name]), title="CV combined")
return ads
def cut(self, n_ads):
def cut_inner(ads):
return ads[:n_ads]
return cut_inner
# def lgb(self, ads):
# X, y = generate_matrix(ads, 'price')
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# model = lgb.LGBMRegressor(objective='regression',num_leaves=800,
# learning_rate=0.05, n_estimators=720,
# max_bin = 55, bagging_fraction = 0.8,
# bagging_freq = 5, feature_fraction = 0.2319,
# feature_fraction_seed=9, bagging_seed=9,
# min_data_in_leaf =6, min_sum_hessian_in_leaf = 11)
# model.fit(X_train, y_train)
# joblib.dump(model, '{}/lgb.pkl'.format(self.model_folder))
# y_pred = model.predict(X_test)
# train_statistics(y_test, y_pred, title="lgb")
# plot(y_test, y_pred, self.image_folder, show=True, title="lgb")
# return ads
| mit |
stormsson/procedural_city_generation_wrapper | vendor/josauder/GUI.py | 2 | 10233 | import matplotlib
matplotlib.use("QT4Agg")
import sys
from PyQt4 import QtGui,QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import UI
import os
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class matplotlibWidget(QtGui.QWidget):
"""
MUST Preceed "from window import *"
"""
def __init__(self, parent = None):
QtGui.QWidget.__init__(self, parent)
self.canvas = MplCanvas()
self.vbl = QtGui.QVBoxLayout()
self.vbl.addWidget(self.canvas)
self.setLayout(self.vbl)
from window import *
class GUI(QtGui.QMainWindow):
""" Sets up Graphical User Interface for this Program. Requires PyQt4"""
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
from procedural_city_generation.additional_stuff.IOHelper import StdoutRedirector
redirector=StdoutRedirector(self.ui.console,app)
sys.stdout=redirector
#### 1: ROADMAP ####
UI.setRoadmapGUI(self)
self.ui.roadmap_widget.hide()
self.ui.roadmap_Run.clicked.connect(self.start_roadmap)
self.createTable("roadmap")
self.ui.roadmap_splitter.setSizes([90, 800])
self.ui.roadmap_table.hide()
#### 2: POLYGONS ####
UI.setPolygonsGUI(self)
self.ui.polygons_widget.hide()
self.ui.polygons_Run.clicked.connect(self.start_polygons)
self.createTable("polygons")
self.ui.polygons_splitter.setSizes([90, 800])
#### 3: BUILDING_GENERATION ####
UI.setBuilding_generationGUI(self)
self.ui.building_generation_widget.hide()
self.ui.building_generation_Run.clicked.connect(self.start_building_generation)
self.createTable("building_generation")
self.ui.building_generation_splitter.setSizes([90, 800])
#### 4: VISUALIZATION ####
self.ui.visualization_Run.clicked.connect(UI.visualization)
self.createTable("visualization")
self.ui.visualization_splitter.setSizes([90, 800])
#### 5: ADVANCED ####
self.ui.clean_directories.clicked.connect(self.clean_directories)
sys.stderr=redirector
#TODO Finish method
def saveOptions(self, submodule="roadmap"):
button=getattr(self.ui, submodule+"_save_button")
table=getattr(self.ui, submodule+"_table")
button.hide()
table.hide()
# return saver
def createTable(self, submodule):
""" Creates the Options Table as PyQT4 Objects when called with a submodule. Very messy code, needs to be rewritten.
Parameters
----------
submodule: String, name of submodule
"""
#Initial Pixel Width and Height of Options Table - should be replaced by getWindowSize()-like
h=411
w=891
from procedural_city_generation.additional_stuff.Param import paramsFromJson, jsonFromParams
from procedural_city_generation.additional_stuff.Singleton import Singleton
#Load Parameters from .conf
params=paramsFromJson(os.getcwd()+"/procedural_city_generation/inputs/"+submodule+".conf")
#Add Buttons and assign functions
table=QtGui.QTableWidget(getattr(self.ui, submodule+"_frame"))
save_button=QtGui.QPushButton(getattr(self.ui, submodule+"_frame"), text="Save")
save_button.setGeometry(QtCore.QRect(w-100, h, 100, 31))
save_button.hide()
default_button=QtGui.QPushButton(getattr(self.ui, submodule+"_frame"), text="Reset Defaults")
default_button.setGeometry(QtCore.QRect(w-260, h, 150, 31))
default_button.hide()
table.hide()
#Set Table Geometry, code looks repetitive and should be reworked
table.setGeometry(QtCore.QRect(0, 0, w, h))
table.setColumnCount(6)
table.setHorizontalHeaderItem(0, QtGui.QTableWidgetItem("Parameter Name"))
table.setColumnWidth(0, int(0.2*w))
table.setHorizontalHeaderItem(1, QtGui.QTableWidgetItem("Description"))
table.setColumnWidth(1, int(0.5*w))
table.setHorizontalHeaderItem(2, QtGui.QTableWidgetItem("Default Value"))
table.setColumnWidth(2, int(0.125*w))
table.setHorizontalHeaderItem(3, QtGui.QTableWidgetItem("Value"))
table.setColumnWidth(3, int(0.125*w))
table.setHorizontalHeaderItem(4, QtGui.QTableWidgetItem("min"))
table.setColumnWidth(4, int(0.1*w))
table.setHorizontalHeaderItem(5, QtGui.QTableWidgetItem("max"))
table.setColumnWidth(5, int(0.1*w))
table.setRowCount(len(params))
#Fill out Table with Parameters. Code Looks repetitive, should be reworked
i=0
for parameter in params:
g=QtGui.QTableWidgetItem(str(parameter.name) )
g.setFlags( g.flags() & ~QtCore.Qt.ItemIsEditable )
g.setBackground(QtGui.QBrush(QtGui.QColor(235, 235, 235)))
table.setItem( i, 0 , g)
g=QtGui.QTableWidgetItem(str(parameter.description))
g.setFlags( g.flags() & ~QtCore.Qt.ItemIsEditable)
g.setBackground(QtGui.QBrush(QtGui.QColor(235, 235, 235)))
table.setItem( i, 1 , g)
g=QtGui.QTableWidgetItem(str(parameter.default))
g.setFlags( g.flags() & ~QtCore.Qt.ItemIsEditable)
g.setBackground(QtGui.QBrush(QtGui.QColor(235, 235, 235)))
table.setItem( i, 2 , g)
g=QtGui.QTableWidgetItem(str(parameter.value))
table.setItem( i, 3 , g)
s = "" if parameter.value_lower_bound is None else str(parameter.value_lower_bound)
g=QtGui.QTableWidgetItem(s)
g.setFlags( g.flags() & ~QtCore.Qt.ItemIsEditable)
g.setBackground(QtGui.QBrush(QtGui.QColor(235, 235, 235)))
table.setItem( i, 4 , g)
s = "" if parameter.value_upper_bound is None else str(parameter.value_upper_bound)
g=QtGui.QTableWidgetItem(s)
g.setFlags( g.flags() & ~QtCore.Qt.ItemIsEditable)
g.setBackground(QtGui.QBrush(QtGui.QColor(235, 235, 235)))
table.setItem( i, 5 , g)
i+=1
#Connect functions to buttons
getattr(self.ui, submodule+"_Options").clicked.connect(table.show)
getattr(self.ui, submodule+"_Options").clicked.connect(save_button.show)
getattr(self.ui, submodule+"_Options").clicked.connect(default_button.show)
setattr(self.ui, submodule+"_table", table)
def save_params():
for i, param in enumerate(params):
it=table.item(i, 3).text()
try:
it=eval(str(it))
except:
it=str(it)
param.setValue(it)
Singleton(submodule).kill()
jsonFromParams(os.getcwd()+"/procedural_city_generation/inputs/"+submodule+".conf", params)
print("Save successful")
save_button.hide()
default_button.hide()
table.hide()
print(UI.donemessage)
save_button.clicked.connect(save_params)
setattr(self.ui, submodule+"_save_button", save_button)
def default_params():
for i, param in enumerate(params):
table.item(i, 3).setText(_fromUtf8(str(param.default)))
default_button.clicked.connect(default_params)
setattr(self.ui, submodule+"_default_button", default_button)
def plot(self, x, y, linewidth=1, color="red"):
self.active_widget.canvas.ax.plot(x, y, linewidth=linewidth, color=color)
def clear(self):
self.active_widget.canvas.ax.clear()
def start_roadmap(self):
self.active_widget=self.ui.roadmap_widget
self.active_widget.show()
self.clear()
UI.roadmap()
def start_polygons(self):
self.active_widget=self.ui.polygons_widget
self.active_widget.show()
self.clear()
UI.polygons()
def start_building_generation(self):
self.active_widget=self.ui.building_generation_widget
self.active_widget.show()
self.clear()
UI.building_generation()
def clean_directories(self):
from procedural_city_generation.additional_stuff.clean_tools import clean_pyc_files
print("removing all .pyc files")
clean_pyc_files(os.getcwd())
print("removing all items in /procedural_city_generation/temp/ directory")
os.system("rm -f " +os.getcwd()+"/procedural_city_generation/temp/*")
print("removing all items in /procedural_city_generation/outputs/ directory")
os.system("rm -f " +os.getcwd()+"/procedural_city_generation/outputs/*")
print(UI.donemessage)
def set_xlim(self, tpl):
self.active_widget.canvas.ax.set_xlim(tpl)
def set_ylim(self, tpl):
self.active_widget.canvas.ax.set_ylim(tpl)
def update(self):
self.active_widget.canvas.draw()
global app
app.processEvents()
class FigureSaver:
class __FigureSaver:
def __init__(self, fig=None):
self.plot=fig.plot
self.show=fig.show
instance=None
def __init__(self, fig=None):
if not FigureSaver.instance and (fig is not None):
FigureSaver.instance=FigureSaver.__FigureSaver(fig)
def __getattr__(self, name):
return getattr(self.instance, name)
def __setattr__(self, name, value):
setattr(self.instance, name, value)
class MplCanvas(FigureCanvas):
def __init__(self):
self.fig = Figure(frameon=False)
self.ax = self.fig.add_subplot(111)
self.ax.get_yaxis().set_visible(False)
self.ax.get_xaxis().set_visible(False)
FigureCanvas.__init__(self, self.fig)
FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
if __name__ == "__main__":
global app
app = QtGui.QApplication(sys.argv)
myapp = GUI()
myapp.show()
app.exec_()
| mpl-2.0 |
ch3ll0v3k/scikit-learn | examples/manifold/plot_lle_digits.py | 181 | 8510 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
poryfly/scikit-learn | sklearn/tests/test_random_projection.py | 142 | 14033 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
OSSHealth/ghdata | workers/contributor_worker/setup.py | 1 | 1396 | #SPDX-License-Identifier: MIT
import io
import os
import re
from setuptools import find_packages
from setuptools import setup
def read(filename):
filename = os.path.join(os.path.dirname(__file__), filename)
text_type = type(u"")
with io.open(filename, mode="r", encoding='utf-8') as fd:
return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read())
setup(
name="contributor_worker",
version="1.0.0",
url="https://github.com/chaoss/augur",
license='MIT',
author="Augurlabs",
author_email="[email protected]",
description="Augur Worker that processes and inserts information related to contributors",
packages=find_packages(exclude=('tests',)),
install_requires=[
'Flask==1.1.4',
'Flask-Cors==3.0.10',
'Flask-Login==0.5.0',
'Flask-WTF==0.14.3',
'requests==2.22.0',
'psycopg2-binary==2.8.6',
'click==7.1.2',
'scipy==1.4.1',
'sklearn==0.0'
],
entry_points={
'console_scripts': [
'contributor_worker_start=workers.contributor_worker.runtime:main',
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
]
)
| mit |
themrmax/scikit-learn | examples/gaussian_process/plot_compare_gpr_krr.py | 84 | 5205 | """
==========================================================
Comparison of kernel ridge and Gaussian process regression
==========================================================
Both kernel ridge regression (KRR) and Gaussian process regression (GPR) learn
a target function by employing internally the "kernel trick". KRR learns a
linear function in the space induced by the respective kernel which corresponds
to a non-linear function in the original space. The linear function in the
kernel space is chosen based on the mean-squared error loss with
ridge regularization. GPR uses the kernel to define the covariance of
a prior distribution over the target functions and uses the observed training
data to define a likelihood function. Based on Bayes theorem, a (Gaussian)
posterior distribution over target functions is defined, whose mean is used
for prediction.
A major difference is that GPR can choose the kernel's hyperparameters based
on gradient-ascent on the marginal likelihood function while KRR needs to
perform a grid search on a cross-validated loss function (mean-squared error
loss). A further difference is that GPR learns a generative, probabilistic
model of the target function and can thus provide meaningful confidence
intervals and posterior samples along with the predictions while KRR only
provides predictions.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise. The figure compares
the learned model of KRR and GPR based on a ExpSineSquared kernel, which is
suited for learning periodic functions. The kernel's hyperparameters control
the smoothness (l) and periodicity of the kernel (p). Moreover, the noise level
of the data is learned explicitly by GPR by an additional WhiteKernel component
in the kernel and by the regularization parameter alpha of KRR.
The figure shows that both methods learn reasonable models of the target
function. GPR correctly identifies the periodicity of the function to be
roughly 2*pi (6.28), while KRR chooses the doubled periodicity 4*pi. Besides
that, GPR provides reasonable confidence bounds on the prediction which are not
available for KRR. A major difference between the two methods is the time
required for fitting and predicting: while fitting KRR is fast in principle,
the grid-search for hyperparameter optimization scales exponentially with the
number of hyperparameters ("curse of dimensionality"). The gradient-based
optimization of the parameters in GPR does not suffer from this exponential
scaling and is thus considerable faster on this example with 3-dimensional
hyperparameter space. The time for predicting is similar; however, generating
the variance of the predictive distribution of GPR takes considerable longer
than just predicting the mean.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, ExpSineSquared
rng = np.random.RandomState(0)
# Generate sample data
X = 15 * rng.rand(100, 1)
y = np.sin(X).ravel()
y += 3 * (0.5 - rng.rand(X.shape[0])) # add noise
# Fit KernelRidge with parameter selection based on 5-fold cross validation
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [ExpSineSquared(l, p)
for l in np.logspace(-2, 2, 10)
for p in np.logspace(0, 2, 10)]}
kr = GridSearchCV(KernelRidge(), cv=5, param_grid=param_grid)
stime = time.time()
kr.fit(X, y)
print("Time for KRR fitting: %.3f" % (time.time() - stime))
gp_kernel = ExpSineSquared(1.0, 5.0, periodicity_bounds=(1e-2, 1e1)) \
+ WhiteKernel(1e-1)
gpr = GaussianProcessRegressor(kernel=gp_kernel)
stime = time.time()
gpr.fit(X, y)
print("Time for GPR fitting: %.3f" % (time.time() - stime))
# Predict using kernel ridge
X_plot = np.linspace(0, 20, 10000)[:, None]
stime = time.time()
y_kr = kr.predict(X_plot)
print("Time for KRR prediction: %.3f" % (time.time() - stime))
# Predict using gaussian process regressor
stime = time.time()
y_gpr = gpr.predict(X_plot, return_std=False)
print("Time for GPR prediction: %.3f" % (time.time() - stime))
stime = time.time()
y_gpr, y_std = gpr.predict(X_plot, return_std=True)
print("Time for GPR prediction with standard-deviation: %.3f"
% (time.time() - stime))
# Plot results
plt.figure(figsize=(10, 5))
lw = 2
plt.scatter(X, y, c='k', label='data')
plt.plot(X_plot, np.sin(X_plot), color='navy', lw=lw, label='True')
plt.plot(X_plot, y_kr, color='turquoise', lw=lw,
label='KRR (%s)' % kr.best_params_)
plt.plot(X_plot, y_gpr, color='darkorange', lw=lw,
label='GPR (%s)' % gpr.kernel_)
plt.fill_between(X_plot[:, 0], y_gpr - y_std, y_gpr + y_std, color='darkorange',
alpha=0.2)
plt.xlabel('data')
plt.ylabel('target')
plt.xlim(0, 20)
plt.ylim(-4, 4)
plt.title('GPR versus Kernel Ridge')
plt.legend(loc="best", scatterpoints=1, prop={'size': 8})
plt.show()
| bsd-3-clause |
alexanderpanchenko/vec2synset | jnt/common.py | 1 | 9157 | # coding=utf8
from __future__ import print_function
import os
import argparse
import numpy as np
import math
import random
from sys import stderr, stdout
import six
import plumbum.cmd as cmd
import pprint
from os.path import splitext, join
import codecs
from patterns import re_escape, re_amp, re_quote_escape
from pandas import read_csv
from itertools import islice
import cPickle as pickle
import os
from os.path import join, abspath, dirname
import jnt
import gzip
from ntpath import basename
""" This namespace contains a set of small common purpose functions and constants. """
UNK_LABEL = "unknown"
TRUE = ['true', '1', 't', 'y', 'yes']
LETTERS = [u'а',u'б',u'в',u'г',u'д',u'е',u'ё',u'ж',u'з',u'и',u'к',u'л',u'м',u'н',u'о',u'п',u'р',u'с',u'т',u'у',u'ф',u'х',u'ц',u'ч',u'ш',u'щ',u'э',u'ь',u'ы',u'ю',u'я',u'a',u'b',u'c',u'd',u'e',u'f',u'g',u'h',u'i',u'j',u'k',u'l',u'm',u'n',u'o',u'p',u'q',u'r',u's',u't',u'u',u'v',u'w',u'x',u'y',u'z']
def fpath2filename(fpath):
""" Returns filename without up to two extensions e.g. /Users/alex/work/joint/src/isas-cc.csv.gz --> isas-cc """
return splitext(splitext(basename(fpath))[0])[0]
def get_data_dir():
return abspath(join(join(dirname(jnt.__file__), os.pardir), "data"))
def dt_scientific2fixed(dt_fpath, output_fpath):
""" Convert similarity from scientific to normal format. """
dt = read_csv(dt_fpath, "\t", encoding='utf8', error_bad_lines=False)
dt = dt.sort(["sim"], ascending=[0])
dt.to_csv(output_fpath, sep="\t", encoding="utf-8", float_format='%.12f', index=False)
def strip_header(input_fpath):
import fileinput
for line in fileinput.input(files=[input_fpath], inplace=True):
if fileinput.isfirstline():
continue
print(line, end="")
def add_header(input_fpath, header):
import fileinput
for line in fileinput.input(files=[input_fpath], inplace=True):
if fileinput.isfirstline():
print(header)
print(line, end="")
def base(fpath):
return base_ext(fpath)[0]
def base_ext(fpath):
components = splitext(fpath)
if len(components) < 2:
return components[0], ""
else:
return components[0], components[1]
def prt(string):
stdout.write("%s\n" % string)
def prt2(tuple2):
stdout.write("%s %s\n" % (tuple2[0], tuple2[1]))
def wc(fpath):
return int(cmd.wc["-l"](fpath).split()[0])
def profiling(function):
import cProfile
import pstats
from cStringIO import StringIO
pr = cProfile.Profile()
pr.enable()
function()
pr.disable()
s = StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
stderr.write(s.getvalue())
def ensure_utf8(text):
""" Make sure that the string is in unicode. """
if six.PY2 and not isinstance(text, six.text_type):
return text.decode('utf-8')
else: return text
def list2str(lst, short=True):
""" Returns a string representing a list """
try:
if short:
return ', '.join(lst)
else:
return str(lst)
except:
if short:
return ""
else:
return "[]"
def str2list(str_list):
""" Parses a string that supposed to contain a list
(or something that has len). Returns a list. """
try:
l = eval(str_list)
if hasattr(l, "__len__"):
return l
else:
print("Warning: cannot parse '%s'. " % str_list, file=stderr)
return []
except:
print("Warning: cannot parse '%s'. " % str_list, file=stderr)
return []
def random_ints():
""" Returns a random integer from 0 to 100,000 """
return str(int(math.floor(random.random() * 100000)))
from patterns import re_newlines
def strip_newlines(input):
return re_newlines.sub(" ", input)
from patterns import re_whitespaces
def normalize_whitespaces(input):
return re_whitespaces.sub(" ", input)
from patterns import re_url
def get_urls(input):
matches = re_url.findall(input)
return matches
def findnth(haystack, needle, n):
parts = haystack.split(needle, n + 1)
if len(parts) <= n + 1:
return -1
return len(haystack) - len(parts[-1]) - len(needle)
def whatisthis(s):
if isinstance(s, str):
return "str"
elif isinstance(s, unicode):
return "unicode"
else:
return "not str"
def exists(dir_path):
return os.path.isdir(dir_path) or os.path.isfile(dir_path)
def try_remove(fpath):
if exists(fpath):
os.remove(fpath)
def safe_remove(fpath):
try:
os.remove(fpath)
print("File removed:", fpath)
except OSError:
print("Cannot remove file:", fpath)
def ensure_dir(f):
""" Make the directory. """
if not os.path.exists(f): os.makedirs(f)
def chunks(l, n):
""" Yield successive n-sized chunks from l. """
for i in xrange(0, len(l), n):
yield zip(range(i,i+n), l[i:i+n])
def stat(lst, print_stat=True):
sizes_arr = np.array(lst)
s = {}
s["sum"] = np.sum(sizes_arr, axis=0)
s["mean"] = np.mean(sizes_arr, axis=0)
s["std"] = np.std(sizes_arr, axis=0)
s["median"] = np.median(sizes_arr, axis=0)
s["min"] = np.min(sizes_arr, axis=0)
s["max"] = np.max(sizes_arr, axis=0)
if print_stat:
print("number:", s["sum"], file=stderr)
print("mean: %.0f +- %.0f" % (s["mean"], s["std"]), file=stderr)
print("median: %.0f" % s["median"], file=stderr)
print("min: %.0f" % s["min"], file=stderr)
print("max: %.0f" % s["max"], file=stderr)
return s
class readable_dir(argparse.Action):
""" Required for argparse parse.add_argument method. """
def __call__(self, parser, namespace, values, option_string=None):
prospective_dir = values
if not os.path.isdir(prospective_dir):
raise argparse.ArgumentTypeError("readable_dir:{0} is not a valid path".format(prospective_dir))
if os.access(prospective_dir, os.R_OK):
setattr(namespace, self.dest, prospective_dir)
else:
raise argparse.ArgumentTypeError("readable_dir:{0} is not a readable dir".format(prospective_dir))
class PrettyPrinterUtf8(pprint.PrettyPrinter):
def format(self, object, context, maxlevels, level):
if isinstance(object, unicode):
return (object.encode('utf8'), True, False)
return pprint.PrettyPrinter.format(self, object, context, maxlevels, level)
def print_line():
print("...............................................................")
def load_voc(voc_fpath, preprocess=True, sep='\t', use_pickle=True, silent=False):
""" Reads vocabulary in the "word" format """
pkl_fpath = voc_fpath + ".pkl"
if use_pickle and exists(pkl_fpath):
voc = pickle.load(open(pkl_fpath, "rb"))
else:
if preprocess:
freq_cln_fpath = voc_fpath + "-cln"
preprocess_pandas_csv(voc_fpath, freq_cln_fpath)
else:
freq_cln_fpath = voc_fpath
word_df = read_csv(freq_cln_fpath, sep, encoding='utf8', error_bad_lines=False)
voc = set(row["word"] for i, row in word_df.iterrows())
print("vocabulary is loaded:", len(voc))
if use_pickle:
pickle.dump(voc, open(pkl_fpath, "wb"))
print("Pickled voc dictionary:", pkl_fpath)
if not silent: print("Loaded %d words from: %s" % (len(voc), pkl_fpath if pkl_fpath else voc_fpath))
return voc
def gunzip_file(input_gzipped_fpath, output_gunzipped_fpath):
with codecs.open(output_gunzipped_fpath, "wb") as out:
input_file = gzip.open(input_gzipped_fpath, "rb")
try:
out.write(input_file.read())
finally:
input_file.close()
def preprocess_pandas_csv(input_fpath, output_fpath=""):
prefix, ext = splitext(input_fpath)
if ext == ".gz":
gunzipped_input_fpath = prefix + ".csv"
gunzip_file(input_fpath, gunzipped_input_fpath)
input_fpath = gunzipped_input_fpath
out_fpath = output_fpath if output_fpath != "" else input_fpath + ".tmp"
with codecs.open(input_fpath, "r", "utf-8") as input, codecs.open(out_fpath, "w", "utf-8") as output:
for line in input:
s = line.strip()
s = re_amp.sub(" ", s)
s = re_escape.sub(" ", s)
s = re_quote_escape.sub(" ", s)
print(s, file=output)
if output_fpath == "":
try_remove(input_fpath)
os.rename(out_fpath, input_fpath)
print("cleaned csv:", input_fpath)
else:
print("cleaned csv:", output_fpath)
if ext == ".gz": try_remove(gunzipped_input_fpath)
def lowercase_voc(voc):
""" In case of conflict take the max of two. """
print("....")
vocl = {}
for v in voc:
vl = v.lower()
if vl not in vocl or vocl[vl] < voc[v]:
vocl[vl] = voc[v]
else:
pass
return vocl
def take(n, iterable):
"Return first n items of the iterable as a list"
return list(islice(iterable, n)) | apache-2.0 |
geodynamics/burnman | examples/example_chemical_potentials.py | 2 | 6809 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_chemical_potentials
---------------------------
This example shows how to use the chemical potentials library of functions.
*Demonstrates:*
* How to calculate chemical potentials
* How to compute fugacities and relative fugacities
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import burnman_path # adds the local burnman directory to the path
import burnman
import burnman.constants as constants
import burnman.processchemistry as processchemistry
import burnman.chemicalpotentials as chemical_potentials
import burnman.minerals as minerals
assert burnman_path # silence pyflakes warning
if __name__ == "__main__":
'''
Here we initialise the minerals we'll be using
'''
P = 1.e9
T = 1000.
fa = minerals.HP_2011_ds62.fa()
mt = minerals.HP_2011_ds62.mt()
qtz = minerals.HP_2011_ds62.q()
FMQ = [fa, mt, qtz]
for mineral in FMQ:
mineral.set_state(P, T)
'''
Here we find chemical potentials of FeO, SiO2 and O2 for
an assemblage containing fayalite, magnetite and quartz,
and a second assemblage of magnetite and wustite
at 1 GPa, 1000 K
'''
component_formulae = ['FeO', 'SiO2', 'O2']
component_formulae_dict = [processchemistry.dictionarize_formula(f)
for f in component_formulae]
chem_potentials = chemical_potentials.chemical_potentials(FMQ, component_formulae_dict)
oxygen = minerals.HP_2011_fluids.O2()
oxygen.set_state(P, T)
hem = minerals.HP_2011_ds62.hem()
MH = [mt, hem]
for mineral in MH:
mineral.set_state(P, T)
print('log10(fO2) at the FMQ buffer:', np.log10(chemical_potentials.fugacity(oxygen, FMQ)))
print('log10(fO2) at the mt-hem buffer:', np.log10(chemical_potentials.fugacity(oxygen, MH)))
print('Relative log10(fO2):', np.log10(chemical_potentials.relative_fugacity(oxygen, FMQ, MH)))
'''
Here we find the oxygen fugacity of the
FMQ buffer, and compare it to published values.
Fugacity is often defined relative to a material at
some fixed reference pressure (in this case, O2)
Here we use room pressure, 100 kPa
'''
# Set up arrays
temperatures = np.linspace(900., 1420., 100)
log10fO2_FMQ_ONeill1987 = np.empty_like(temperatures)
log10fO2_FMQ = np.empty_like(temperatures)
invT = np.empty_like(temperatures)
# Reference and assemblage pressure
Pr = 1.e5
P = 1.e5
for i, T in enumerate(temperatures):
# Set states
oxygen.set_state(Pr, T)
for mineral in FMQ:
mineral.set_state(P, T)
# The chemical potential and fugacity of O2 at the FMQ buffer
# according to O'Neill, 1987
muO2_FMQ_ONeill1987 = -587474. + 1584.427 * \
T - 203.3164 * T * np.log(T) + 0.092710 * T * T
log10fO2_FMQ_ONeill1987[i] = np.log10(
np.exp((muO2_FMQ_ONeill1987) / (constants.gas_constant * T)))
invT[i] = 10000. / (T)
# The calculated chemical potential and fugacity of O2 at the FMQ
# buffer
log10fO2_FMQ[i] = np.log10(chemical_potentials.fugacity(oxygen, FMQ))
# Plot the FMQ log10(fO2) values
plt.plot(temperatures, log10fO2_FMQ_ONeill1987,
'k', linewidth=1., label='FMQ (O\'Neill (1987)')
plt.plot(temperatures, log10fO2_FMQ, 'b--',
linewidth=2., label='FMQ (HP 2011 ds62)')
# Do the same for Re-ReO2
'''
Here we define two minerals, Re (rhenium) and
ReO2 (tugarinovite)
'''
class Re (burnman.Mineral):
def __init__(self):
formula = 'Re1.0'
formula = processchemistry.dictionarize_formula(formula)
self.params = {
'name': 'Re',
'formula': formula,
'equation_of_state': 'hp_tmt',
'H_0': 0.0,
'S_0': 36.53,
'V_0': 8.862e-06,
'Cp': [23.7, 0.005448, 68.0, 0.0],
'a_0': 1.9e-05,
'K_0': 3.6e+11,
'Kprime_0': 4.05,
'Kdprime_0': -1.1e-11,
'n': sum(formula.values()),
'molar_mass': processchemistry.formula_mass(formula)}
burnman.Mineral.__init__(self)
class ReO2 (burnman.Mineral):
def __init__(self):
formula = 'Re1.0O2.0'
formula = processchemistry.dictionarize_formula(formula)
self.params = {
'name': 'ReO2',
'formula': formula,
'equation_of_state': 'hp_tmt',
'H_0': -445140.0,
'S_0': 47.82,
'V_0': 1.8779e-05,
'Cp': [76.89, 0.00993, -1207130.0, -208.0],
'a_0': 4.4e-05,
'K_0': 1.8e+11,
'Kprime_0': 4.05,
'Kdprime_0': -2.25e-11,
'n': sum(formula.values()),
'molar_mass': processchemistry.formula_mass(formula)}
burnman.Mineral.__init__(self)
'''
Here we find the oxygen fugacity of the Re-ReO2
buffer, and again compare it to published values.
'''
# Mineral and assemblage definitions
rhenium = Re()
rheniumIVoxide = ReO2()
ReReO2buffer = [rhenium, rheniumIVoxide]
# Set up arrays
temperatures = np.linspace(850., 1250., 100)
log10fO2_Re_PO1994 = np.empty_like(temperatures)
log10fO2_ReReO2buffer = np.empty_like(temperatures)
for i, T in enumerate(temperatures):
# Set states
oxygen.set_state(Pr, T)
for mineral in ReReO2buffer:
mineral.set_state(P, T)
# The chemical potential and fugacity of O2 at the Re-ReO2 buffer
# according to Powncesby and O'Neill, 1994
muO2_Re_PO1994 = -451020 + 297.595 * T - 14.6585 * T * np.log(T)
log10fO2_Re_PO1994[i] = np.log10(
np.exp((muO2_Re_PO1994) / (constants.gas_constant * T)))
invT[i] = 10000. / (T)
# The chemical potential and fugacity of O2 at the Re-ReO2 buffer
log10fO2_ReReO2buffer[i] = np.log10(chemical_potentials.fugacity(oxygen, ReReO2buffer))
# Plot the Re-ReO2 log10(fO2) values
plt.plot(temperatures, log10fO2_Re_PO1994, 'k',
linewidth=1., label='Re-ReO2 (Pownceby and O\'Neill (1994)')
plt.plot(temperatures, log10fO2_ReReO2buffer,
'r--', linewidth=2., label='Re-ReO2 (HP 2011 ds62)')
plt.ylabel("log_10 (fO2)")
plt.xlabel("T (K)")
plt.legend(loc='lower right')
plt.show()
| gpl-2.0 |
ahnitz/pycbc | pycbc/results/pygrb_plotting_utils.py | 7 | 27260 | # Copyright (C) 2019 Francesco Pannarale, Gino Contestabile
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# =============================================================================
# Preamble
# =============================================================================
"""
Module to generate PyGRB figures: scatter plots and timeseries.
"""
import sys
import os
import logging
import argparse
import copy
import numpy
from pycbc.results import save_fig_with_metadata
# TODO: imports to fix/remove
try:
from glue import segments
from glue.ligolw import utils, lsctables, ligolw, table
except ImportError:
pass
try:
from pylal import MultiInspiralUtils
from pylal.coh_PTF_pyutils import get_bestnr, get_det_response
from pylal.coh_PTF_pyutils import readSegFiles
from pylal.dq import dqSegmentUtils
except ImportError:
pass
# Only if a backend is not already set ... This should really *not* be done
# here, but in the executables you should set matplotlib.use()
# This matches the check that matplotlib does internally, but this *may* be
# version dependenant. If this is a problem then remove this and control from
# the executables directly.
import matplotlib
if 'matplotlib.backends' not in sys.modules: # nopep8
matplotlib.use('agg')
from matplotlib import rc
from matplotlib import pyplot as plt
# =============================================================================
# Parse command line
# =============================================================================
def pygrb_plot_opts_parser(usage='', description=None, version=None):
"""Parses options for PyGRB plotting scripts"""
parser = argparse.ArgumentParser(usage=usage, description=description)
parser.add_argument("--version", action="version", version=version)
parser.add_argument("-v", "--verbose", default=False, action="store_true",
help="verbose output")
parser.add_argument("-t", "--trig-file", action="store",
default=None, required=True,
help="The location of the trigger file")
parser.add_argument("-I", "--inj-file", action="store", default=None,
help="The location of the injection file")
parser.add_argument("-a", "--segment-dir", action="store",
required=True, help="directory holding buffer, on " +
"and off source segment files.")
parser.add_argument("-o", "--output-file", default=None, required=True,
help="Output file.")
parser.add_argument("-O", "--zoomed-output-file", default=None,
required=False, help="Output file for a zoomed in " +
"version of the plot.")
parser.add_argument("-Q", "--chisq-index", action="store", type=float,
default=4.0, help="chisq_index for newSNR calculation")
parser.add_argument("-N", "--chisq-nhigh", action="store", type=float,
default=3.0, help="nhigh for newSNR calculation")
parser.add_argument("-B", "--sngl-snr-threshold", action="store",
type=float, default=4.0, help="Single detector SNR " +
"threshold, the two most sensitive detectors " +
"should have SNR above this")
parser.add_argument("-d", "--snr-threshold", action="store", type=float,
default=6.0, help="SNR threshold for recording " +
"triggers")
parser.add_argument("-c", "--newsnr-threshold", action="store", type=float,
default=None, help="NewSNR threshold for " +
"calculating the chisq of triggers (based on value " +
"of auto and bank chisq values. By default will " +
"take the same value as snr-threshold")
parser.add_argument("-A", "--null-snr-threshold", action="store",
default="4.25,6",
help="comma separated lower,higher null SNR " +
"threshold for null SNR cut")
parser.add_argument("-C", "--null-grad-thresh", action="store", type=float,
default=20., help="Threshold above which to " +
"increase the values of the null SNR cut")
parser.add_argument("-D", "--null-grad-val", action="store", type=float,
default=0.2, help="Rate the null SNR cut will " +
"increase above the threshold")
parser.add_argument("-l", "--veto-directory", action="store", default=None,
help="The location of the CATX veto files")
parser.add_argument("-b", "--veto-category", action="store", type=int,
default=None, help="Apply vetoes up to this level " +
"inclusive")
parser.add_argument("-i", "--ifo", default=None, help="IFO used for IFO " +
"specific plots")
parser.add_argument("--use-sngl-ifo-snr", default=False,
action="store_true", help="Plots are vs single IFO " +
"SNR, rather than coherent SNR")
parser.add_argument("--variable", default=None, help="Quantity to plot " +
"the vertical axis. Supported choices are: " +
"coherent, single, reweighted, or null (for " +
"timeeries plots), standard, bank, or auto (for " +
"chi-square veto plots), coincident, nullstat, " +
"or overwhitened (for null statistics plots)")
parser.add_argument('--plot-title',
help="If given, use this as the plot caption")
parser.add_argument('--plot-caption',
help="If given, use this as the plot caption")
return parser.parse_args()
# =============================================================================
# Format single detector chi-square data as numpy array and floor at 0.005
# =============================================================================
def format_single_chisqs(trig_ifo_cs, ifos):
"""Format single IFO chi-square data as numpy array and floor at 0.005"""
for ifo in ifos:
trig_ifo_cs[ifo] = numpy.asarray(trig_ifo_cs[ifo])
numpy.putmask(trig_ifo_cs[ifo], trig_ifo_cs[ifo] == 0, 0.005)
return trig_ifo_cs
# =============================================================================
# Reset times so that t=0 is corresponds to the GRB trigger time
# =============================================================================
def reset_times(seg_dir, trig_data, inj_data, inj_file):
"""Reset times so that t=0 is corresponds to the GRB trigger time"""
segs = readSegFiles(seg_dir)
grb_time = segs['on'][1] - 1
start = int(min(trig_data.time)) - grb_time
end = int(max(trig_data.time)) - grb_time
duration = end-start
start -= duration*0.05
end += duration*0.05
trig_data.time = [t-grb_time for t in trig_data.time]
if inj_file:
inj_data.time = [t-grb_time for t in inj_data.time]
return grb_time, start, end, trig_data, inj_data
# =============================================================================
# Extract trigger/injection data produced by PyGRB
# =============================================================================
class PygrbFilterOutput(object):
"""Extract trigger/injection data produced by PyGRB search"""
def __init__(self, trigs_or_injs, ifos, columns, output_type, opts):
logging.info("Extracting data from the %s just loaded...", output_type)
# Initialize all content of self
self.time = None
self.snr = numpy.array(None)
self.reweighted_snr = None
self.null_snr = None
self.null_stat = None
self.trace_snr = None
self.chi_square = numpy.array(None)
self.bank_veto = None
self.auto_veto = None
self.coinc_snr = None
self.ifo_snr = dict((ifo, None) for ifo in ifos)
self.ifo_bank_cs = dict((ifo, None) for ifo in ifos)
self.ifo_auto_cs = dict((ifo, None) for ifo in ifos)
self.ifo_stan_cs = dict((ifo, None) for ifo in ifos)
self.rel_amp_1 = None
self.norm_3 = None
self.rel_amp_2 = None
self.inclination = None
# Exctract data and fill in content of self
null_thresh = map(float, opts.null_snr_threshold.split(','))
if trigs_or_injs is not None:
# Work out if using sngl chisqs
ifo_att = {'G1': 'g', 'H1': 'h1', 'H2': 'h2', 'L1': 'l', 'V1': 'v',
'T1': 't'}
i = ifo_att[ifos[0]]
self.sngl_chisq = 'chisq_%s' % i in columns
self.sngl_bank_chisq = 'bank_chisq_%s' % i in columns
self.sngl_cont_chisq = 'cont_chisq_%s' % i in columns
# Set basic data
self.time = numpy.asarray(trigs_or_injs.get_end())
self.snr = numpy.asarray(trigs_or_injs.get_column('snr'))
self.reweighted_snr = [get_bestnr(t, q=opts.chisq_index,
n=opts.chisq_nhigh,
null_thresh=null_thresh,
snr_threshold=opts.snr_threshold,
sngl_snr_threshold=opts.sngl_snr_threshold,
chisq_threshold=opts.newsnr_threshold,
null_grad_thresh=opts.null_grad_thresh,
null_grad_val=opts.null_grad_val)
for t in trigs_or_injs]
self.reweighted_snr = numpy.array(self.reweighted_snr)
self.null_snr = numpy.asarray(trigs_or_injs.get_null_snr())
self.null_stat = numpy.asarray(trigs_or_injs.get_column(
'null_statistic'))
self.trace_snr = numpy.asarray(trigs_or_injs.get_column(
'null_stat_degen'))
# Get chisq data
self.chi_square = numpy.asarray(trigs_or_injs.get_column('chisq'))
self.bank_veto = numpy.asarray(trigs_or_injs.get_column(
'bank_chisq'))
self.auto_veto = numpy.asarray(trigs_or_injs.get_column(
'cont_chisq'))
numpy.putmask(self.chi_square, self.chi_square == 0, 0.005)
numpy.putmask(self.bank_veto, self.bank_veto == 0, 0.005)
numpy.putmask(self.auto_veto, self.auto_veto == 0, 0.005)
# Get single detector data
self.coinc_snr = (trigs_or_injs.get_column('coinc_snr'))
self.ifo_snr = dict((ifo, trigs_or_injs.get_sngl_snr(ifo))
for ifo in ifos)
if self.sngl_bank_chisq:
self.ifo_bank_cs = trigs_or_injs.get_sngl_bank_chisqs(ifos)
self.ifo_bank_cs = format_single_chisqs(self.ifo_bank_cs, ifos)
if self.sngl_cont_chisq:
self.ifo_auto_cs = trigs_or_injs.get_sngl_cont_chisqs(ifos)
self.ifo_auto_cs = format_single_chisqs(self.ifo_auto_cs, ifos)
if self.sngl_chisq:
self.ifo_stan_cs = trigs_or_injs.get_sngl_chisqs(ifos)
self.ifo_stan_cs = format_single_chisqs(self.ifo_stan_cs, ifos)
# Initiate amplitude generator
num_amp = 4
amplitudes = range(1, num_amp+1)
# Get amplitude terms
amp = dict((amplitude,
numpy.asarray(trigs_or_injs.get_column(
'amp_term_%d' % amplitude)))
for amplitude in amplitudes)
#
# All 0, hence the 3 warnings
# for i in amplitudes:
# print numpy.count_nonzero(amp[amplitudes])
#
self.rel_amp_1 = numpy.sqrt((amp[1]**2 + amp[2]**2) /
(amp[3]**2 + amp[4]**2))
gamma_r = amp[1] - amp[4]
gamma_i = amp[2] + amp[3]
delta_r = amp[1] + amp[4]
delta_i = amp[3] - amp[2]
norm_1 = delta_r*delta_r + delta_i*delta_i
norm_2 = gamma_r*gamma_r + gamma_i*gamma_i
self.norm_3 = ((norm_1**0.25) + (norm_2**0.25))**2
amp_plus = (norm_1)**0.5 + (norm_2)**0.5
amp_cross = abs((norm_1)**0.5 - (norm_2)**0.5)
self.rel_amp_2 = amp_plus/amp_cross
self.inclination = amp_cross/self.norm_3
num_trigs_or_injs = len(trigs_or_injs)
if num_trigs_or_injs < 1:
logging.warning("No %s found.", output_type)
elif num_trigs_or_injs >= 1:
logging.info("%d %s found.", num_trigs_or_injs, output_type)
# Deal with the sigma-squares (historically called sigmas here)
if output_type == "triggers":
sigma = trigs_or_injs.get_sigmasqs()
self.sigma_tot = numpy.zeros(num_trigs_or_injs)
# Get antenna response based parameters
self.longitude = numpy.degrees(trigs_or_injs.get_column('ra'))
self.latitude = numpy.degrees(trigs_or_injs.get_column('dec'))
self.f_resp = dict((ifo, numpy.empty(num_trigs_or_injs))
for ifo in ifos)
for i in range(num_trigs_or_injs):
# Calculate f_resp for each IFO if we haven't done so yet
f_plus, f_cross = get_det_response(self.longitude[i],
self.latitude[i],
self.time[i])
for ifo in ifos:
self.f_resp[ifo][i] = sum(numpy.array([f_plus[ifo],
f_cross[ifo]]
)**2)
self.sigma_tot[i] += (sigma[ifo][i] *
self.f_resp[ifo][i])
for ifo in ifos:
self.f_resp[ifo] = self.f_resp[ifo].mean()
# Normalise trig_sigma
self.sigma_tot = numpy.array(self.sigma_tot)
for ifo in ifos:
sigma[ifo] = numpy.asarray(sigma[ifo]) / self.sigma_tot
self.sigma_mean = {}
self.sigma_max = {}
self.sigma_min = {}
for ifo in ifos:
try:
self.sigma_mean[ifo] = sigma[ifo].mean()
self.sigma_max[ifo] = sigma[ifo].max()
self.sigma_min[ifo] = sigma[ifo].min()
except ValueError:
self.sigma_mean[ifo] = 0
self.sigma_max[ifo] = 0
self.sigma_min[ifo] = 0
logging.info("%s parameters extracted", output_type)
# =============================================================================
# Function to open trigger and injection xml files
# =============================================================================
def load_xml_file(filename):
"""Wrapper to ligolw's utils.load_filename"""
xml_doc = utils.load_filename(filename, gz=filename.endswith("gz"),
contenthandler=lsctables.use_in(
ligolw.LIGOLWContentHandler))
return xml_doc
# =============================================================================
# Function to extract ifos
# =============================================================================
def extract_ifos(trig_file):
"""Extracts IFOs from search summary table"""
# Load search summary
xml_doc = load_xml_file(trig_file)
search_summ = table.get_table(xml_doc,
lsctables.SearchSummaryTable.tableName)
# Extract IFOs
ifos = sorted(map(str, search_summ[0].get_ifos()))
return ifos
# =============================================================================
# Function to extract vetoes
# =============================================================================
def extract_vetoes(veto_files, ifos):
"""Extracts vetoes from veto filelist"""
# Initialize vetoe containers
vetoes = segments.segmentlistdict()
for ifo in ifos:
vetoes[ifo] = segments.segmentlist()
# Construct veto list from veto filelist
if veto_files:
for file in veto_files:
ifo = os.path.basename(file)[:2]
if ifo in ifos:
# This returns a coalesced list of the vetoes
tmp_veto_segs = dqSegmentUtils.fromsegmentxml(open(file, 'r'))
for entry in tmp_veto_segs:
vetoes[ifo].append(entry)
for ifo in ifos:
vetoes[ifo].coalesce()
return vetoes
# =============================================================================
# Function to load triggers
# =============================================================================
def load_triggers(trig_file, vetoes, ifos):
""""Loads triggers from PyGRB output file"""
logging.info("Loading triggers...")
# Extract time-slides
multis, slide_dict, _ = \
MultiInspiralUtils.ReadMultiInspiralTimeSlidesFromFiles([trig_file])
num_slides = len(slide_dict)
lsctables.MultiInspiralTable.loadcolumns =\
[slot for slot in multis[0].__slots__ if hasattr(multis[0], slot)]
# Extract triggers
trigs = lsctables.New(lsctables.MultiInspiralTable,
columns=lsctables.MultiInspiralTable.loadcolumns)
logging.info("%d triggers found.", len(trigs))
# Time-slid vetoes
for slide_id in range(num_slides):
slid_vetoes = copy.deepcopy(vetoes)
for ifo in ifos:
slid_vetoes[ifo].shift(-slide_dict[slide_id][ifo])
# Add time-slid triggers
vets = slid_vetoes.union(slid_vetoes.keys())
trigs.extend(t for t in multis.veto(vets)
if int(t.time_slide_id) == slide_id)
logging.info("%d triggers found when including timeslides.", len(trigs))
return trigs
# =============================================================================
# Function to load injections
# =============================================================================
def load_injections(inj_file, vetoes):
""""Loads injections from PyGRB output file"""
logging.info("Loading injections...")
# Load injection file
xml_doc = load_xml_file(inj_file)
multis = table.get_table(xml_doc, lsctables.MultiInspiralTable.tableName)
# Extract injections
injs = lsctables.New(lsctables.MultiInspiralTable,
columns=lsctables.MultiInspiralTable.loadcolumns)
# Injections in time-slid non-vetoed data
injs.extend(t for t in multis if t.get_end() not in vetoes)
logging.info("%d injections found.", len(injs))
return injs
# =============================================================================
# Function to load injections
# =============================================================================
def new_snr_chisq(snr, new_snr, chisq_dof, chisq_index=4.0, chisq_nhigh=3.0):
"""Returns the chi-square value needed to weight SNR into new SNR"""
chisqnorm = (snr/new_snr)**chisq_index
if chisqnorm <= 1:
return 1E-20
return chisq_dof * (2*chisqnorm - 1)**(chisq_nhigh/chisq_index)
# =============================================================================
# Given the trigger and injection values of a quantity, determine the maximum
# =============================================================================
def axis_max_value(trig_values, inj_values, inj_file):
"""Deterime the maximum of a quantity in the trigger and injection data"""
axis_max = trig_values.max()
if inj_file and inj_values.size and inj_values.max() > axis_max:
axis_max = inj_values.max()
return axis_max
# =============================================================================
# Calculate all chi-square contours for diagnostic plots
# =============================================================================
def calculate_contours(trigs, opts, new_snrs=None):
"""Generate the plot contours for chisq variable plots"""
if new_snrs is None:
new_snrs = [5.5, 6, 6.5, 7, 8, 9, 10, 11]
chisq_index = opts.chisq_index
chisq_nhigh = opts.chisq_nhigh
new_snr_thresh = opts.newsnr_threshold
null_thresh = []
for val in map(float, opts.null_snr_threshold.split(',')):
null_thresh.append(val)
null_thresh = null_thresh[-1]
null_grad_snr = opts.null_grad_thresh
null_grad_val = opts.null_grad_val
chisq_dof = trigs[0].chisq_dof
bank_chisq_dof = trigs[0].bank_chisq_dof
cont_chisq_dof = trigs[0].cont_chisq_dof
# Add the new SNR threshold contour to the list if necessary
# and keep track of where it is
cont_value = None
try:
cont_value = new_snrs.index(new_snr_thresh)
except ValueError:
new_snrs.append(new_snr_thresh)
cont_value = -1
# Initialise chisq contour values and colours
colors = ["k-" if snr == new_snr_thresh else
"y-" if snr == int(snr) else
"y--" for snr in new_snrs]
# Get SNR values for contours
snr_low_vals = numpy.arange(4, 30, 0.1)
snr_high_vals = numpy.arange(30, 500, 1)
snr_vals = numpy.asarray(list(snr_low_vals) + list(snr_high_vals))
# Initialise contours
bank_conts = numpy.zeros([len(new_snrs), len(snr_vals)],
dtype=numpy.float64)
auto_conts = numpy.zeros([len(new_snrs), len(snr_vals)],
dtype=numpy.float64)
chi_conts = numpy.zeros([len(new_snrs), len(snr_vals)],
dtype=numpy.float64)
null_cont = []
# Loop over each and calculate chisq variable needed for SNR contour
for j, snr in enumerate(snr_vals):
for i, new_snr in enumerate(new_snrs):
bank_conts[i][j] = new_snr_chisq(snr, new_snr, bank_chisq_dof,
chisq_index, chisq_nhigh)
auto_conts[i][j] = new_snr_chisq(snr, new_snr, cont_chisq_dof,
chisq_index, chisq_nhigh)
chi_conts[i][j] = new_snr_chisq(snr, new_snr, chisq_dof,
chisq_index, chisq_nhigh)
if snr > null_grad_snr:
null_cont.append(null_thresh + (snr-null_grad_snr)*null_grad_val)
else:
null_cont.append(null_thresh)
null_cont = numpy.asarray(null_cont)
return bank_conts, auto_conts, chi_conts, null_cont, snr_vals, \
cont_value, colors
# =============================================================================
# Plot contours in a scatter plot where SNR is on the horizontal axis
# =============================================================================
def contour_plotter(axis, snr_vals, contours, colors, vert_spike=False):
"""Plot contours in a scatter plot where SNR is on the horizontal axis"""
for i, _ in enumerate(contours):
plot_vals_x = []
plot_vals_y = []
if vert_spike:
for j, _ in enumerate(snr_vals):
# Workaround to ensure vertical spike is shown on veto plots
if contours[i][j] > 1E-15 and not plot_vals_x:
plot_vals_x.append(snr_vals[j])
plot_vals_y.append(0.1)
if contours[i][j] > 1E-15 and plot_vals_x:
plot_vals_x.append(snr_vals[j])
plot_vals_y.append(contours[i][j])
else:
plot_vals_x = snr_vals
plot_vals_y = contours[i]
axis.plot(plot_vals_x, plot_vals_y, colors[i])
# =============================================================================
# Contains plotting setups shared by PyGRB plots
# =============================================================================
def pygrb_shared_plot_setups():
"""Master function to plot PyGRB results"""
# Get rcParams
rc('font', size=14)
# Set color for out-of-range values
plt.cm.spring.set_over('g')
# =============================================================================
# Master plotting function: fits all plotting needs in for PyGRB results
# =============================================================================
def pygrb_plotter(trig_x, trig_y, inj_x, inj_y, inj_file, xlabel, ylabel,
fig_path, snr_vals=None, conts=None,
shade_cont_value=None, colors=None, vert_spike=False,
xlims=None, ylims=None, use_logs=True,
cmd=None, plot_title=None, plot_caption=None):
"""Master function to plot PyGRB results"""
fig_name = os.path.split(os.path.abspath(fig_path))[1]
logging.info(" * %s (%s vs %s)...", fig_name, xlabel, ylabel)
# Set up plot
fig = plt.figure()
cax = fig.gca()
# Plot trigger-related quantities
if use_logs:
cax.loglog(trig_x, trig_y, 'bx')
else:
cax.plot(trig_x, trig_y, 'bx')
cax.grid()
# Plot injection-related quantities
if inj_file:
if use_logs:
cax.loglog(inj_x, inj_y, 'r+')
else:
cax.plot(inj_x, inj_y, 'r+')
# Plot contours
if conts is not None:
contour_plotter(cax, snr_vals, conts, colors, vert_spike=vert_spike)
# Add shading above a specific contour (typically used for vetoed area)
if shade_cont_value is not None:
limy = cax.get_ylim()[1]
polyx = copy.deepcopy(snr_vals)
polyy = copy.deepcopy(conts[shade_cont_value])
polyx = numpy.append(polyx, [max(snr_vals), min(snr_vals)])
polyy = numpy.append(polyy, [limy, limy])
cax.fill(polyx, polyy, color='#dddddd')
# Axes: labels and limits
cax.set_xlabel(xlabel)
cax.set_ylabel(ylabel)
if xlims:
cax.set_xlim(xlims)
if ylims:
cax.set_ylim(ylims)
# Wrap up
plt.tight_layout()
save_fig_with_metadata(fig, fig_path, cmd=cmd, title=plot_title,
caption=plot_caption)
# fig_kwds=fig_kwds,
plt.close()
| gpl-3.0 |
PyWiFeS/tools | runscript.py | 1 | 1264 | import process_stellar
import matplotlib.pyplot as plt
conv_tlusty_spect = process_stellar.conv_tlusty_spect
conv_phoenix_spect = process_stellar.conv_phoenix_spect
rv_process_dir = process_stellar.rv_process_dir
import pdb
import time
import numpy as np
##CONVOLVING TEMPLATES
#conv_tlusty_spect('/Volumes/UTRAID/TLUSTY/BGvispec_v2/','tlusty_conv')
#conv_phoenix_spect('/Volumes/UTRAID/phoenix_hires/PHOENIX-ACES-AGSS-COND-2011/Z-0.0/foruse/','phoenix_conv')
##RUNNING RV FITTER ON DATA
##Executing from the code directory:
#rv_process_dir('/Volumes/UTRAID/wifes_data/140619/reduction_red_150806',
#template_conv_dir='./phoenix_conv/',outdir='arizz_outputs/140619/phoenix',mask_ha_emission=True)
#rv_process_dir('/Users/arizz/python/pywifes/tools/test_intput',
#template_conv_dir='/Users/arizz/python/pywifes/tools/full_conv/',outdir='/Users/arizz/python/pywifes/tools/testing_outputs',mask_ha_emission=False)
indirs = np.array(['140623','140622','140621','140619'])
for ii in indirs:
indir= '/Volumes/UTRAID/wifes_data/'+ii+'/reduction_red_150806'
odir = 'arizz_outputs/'+indir.split('/')[4]+'/both'
#pdb.set_trace()
rv_process_dir(indir,template_conv_dir='/Users/arizz/python/pywifes/tools/full_conv/',outdir=odir,mask_ha_emission=False)
| mit |
e-mission/e-mission-server | emission/analysis/modelling/tour_model/cluster_groundtruth.py | 2 | 5021 | from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
# standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import *
from past.utils import old_div
import logging
from sklearn.metrics.cluster import homogeneity_score, completeness_score
import numpy
import matplotlib.pyplot as plt
# our imports
import emission.analysis.modelling.tour_model.cluster_pipeline as cp
import emission.analysis.modelling.tour_model.similarity as similarity
"""
Functions to evaluate clustering based on groundtruth. To use these functions,
an array of the length of the data must be passed in, with different values in the
array indicating different groundtruth clusters.
These functions can be used alongside the cluster pipeline to evaluate clustering.
An example of how to run this with the cluster pipeline is in the main method. To run it,
pass in a list of groundtruth.
Note that the cluster pipeline works with trips, not sections, so to use the above
code the groundtruth has to also be by trips.
"""
#turns color array into an array of integers
def get_colors(data, colors):
if len(data) != len(colors):
raise ValueError('Data and groundtruth must have the same number of elements')
indices = [] * len(set(colors))
for n in colors:
if n not in indices:
indices.append(n)
for i in range(len(colors)):
colors[i] = indices.index(colors[i])
return colors
#update the ground truth after binning
def update_colors(bins, colors):
newcolors = []
for bin in bins:
for b in bin:
newcolors.append(colors[b])
indices = [] * len(set(newcolors))
for n in newcolors:
if n not in indices:
indices.append(n)
for i in range(len(newcolors)):
newcolors[i] = indices.index(newcolors[i])
return newcolors
#evaluates the cluster labels against the groundtruth colors
def evaluate(colors, labels):
b = homogeneity_score(colors, labels)
c = completeness_score(colors, labels)
logging.debug('homogeneity is %d' % b)
logging.debug('completeness is %d' % c)
#maps the clusters, colored by the groundtruth
#creates a map for each groundtruthed cluster and
#a map showing all the clusters.
def map_clusters_by_groundtruth(data, labels, colors, map_individuals=False):
from matplotlib import colors as matcol
colormap = plt.cm.get_cmap()
import random
r = random.sample(list(range(len(set(labels)))), len(set(labels)))
rand = []
clusters = len(set(labels))
for i in range(len(labels)):
rand.append(old_div(r[labels[i]],float(clusters)))
if map_individuals:
for color in set(colors):
first = True
num_paths = 0
for i in range(len(colors)):
if colors[i] == color:
num_paths += 1
start_lat = data[i].trip_start_location.lat
start_lon = data[i].trip_start_location.lon
end_lat = data[i].trip_end_location.lat
end_lon = data[i].trip_end_location.lon
if first:
# mymap = pygmaps.maps(start_lat, start_lon, 10)
first = False
path = [(start_lat, start_lon), (end_lat, end_lon)]
mymap.addpath(path, matcol.rgb2hex(colormap(rand[i])))
mymap.draw('./mycluster' + str(color) + '.html')
# mymap = pygmaps.maps(37.5, -122.32, 10)
for i in range(len(data)):
start_lat = data[i].trip_start_location.lat
start_lon = data[i].trip_start_location.lon
end_lat = data[i].trip_end_location.lat
end_lon = data[i].trip_end_location.lon
path = [(start_lat, start_lon), (end_lat, end_lon)]
mymap.addpath(path, matcol.rgb2hex(colormap(old_div(float(colors[i]),len(set(colors))))))
mymap.draw('./mymap.html')
def main(colors):
data = cp.read_data() #get the data
colors = get_colors(data, colors) #make colors the right format
data, bins = cp.remove_noise(data, .5, 300) #remove noise from data
###### the next few lines are to evaluate the binning
sim = similarity.similarity(data, .5, 300) #create a similarity object
sim.bins = bins #set the bins, since we calculated them above
sim.evaluate_bins() #evaluate them to create the labels
######
colors = update_colors(bins, colors) #update the colors to reflect deleted bins
labels = sim.labels #get labels
evaluate(numpy.array(colors), numpy.array(labels)) #evaluate the bins
clusters, labels, data = cp.cluster(data, len(bins)) #cluster
evaluate(numpy.array(colors), numpy.array(labels)) #evaluate clustering
map_clusters_by_groundtruth(data, labels, colors, map_individuals=False) #map clusters, make last parameter true to map individual clusters
| bsd-3-clause |
tigerneil/MLSS | classification/util.py | 2 | 8466 | import csv
import numpy as np
import scipy.linalg
import matplotlib.pyplot as pl
from mpl_toolkits.mplot3d import Axes3D
from sklearn import svm
from scipy.stats import multivariate_normal
from scipy.stats import bernoulli
print("version",1)
class GaussianMixture:
def __init__(self,mean0,cov0,mean1,cov1):
""" construct a mixture of two gaussians. mean0 is 2x1 vector of means for class 0, cov0 is 2x2 covariance matrix for class 0.
Similarly for class 1"""
self.mean0 = mean0
self.mean1 = mean1
self.cov0 = cov0
self.cov1 = cov1
self.rv0 = multivariate_normal(mean0, cov0)
self.rv1 = multivariate_normal(mean1, cov1)
def plot(self,data=None):
x1 = np.linspace(-4,4,100)
x2 = np.linspace(-4,4,100)
X1,X2 = np.meshgrid(x1,x2)
pos = np.empty(X1.shape+(2,))
pos[:,:,0] = X1
pos[:,:,1]= X2
a = self.rv1.pdf(pos)/self.rv0.pdf(pos)
if data:
nplots = 4
else:
nplots = 3
fig,ax = pl.subplots(1,nplots,figsize = (5*nplots,5))
[ax[i].spines['left'].set_position('zero') for i in range(0,nplots)]
[ax[i].spines['right'].set_color('none') for i in range(0,nplots)]
[ax[i].spines['bottom'].set_position('zero') for i in range(0,nplots)]
[ax[i].spines['top'].set_color('none') for i in range(0,nplots)]
ax[0].set_title("p(x1,x2|y = 1")
ax[1].set_title("p(x1,x2|y = 0")
ax[2].set_title("P(y = 1|x1,x2)")
[ax[i].set_xlim([-4,4]) for i in range(0,3)]
[ax[i].set_ylim([-4,4]) for i in range(0,3)]
cn = ax[0].contourf(x1,x2,self.rv1.pdf(pos))
cn2 = ax[1].contourf(x1,x2,self.rv0.pdf(pos))
z = a/(1.0+a)
cn3 = ax[2].contourf(x1,x2,z)
ct = ax[2].contour(cn3,levels=[0.5])
ax[2].clabel(ct)
if data:
X,Y = data
colors = ["blue" if target < 1 else "red" for target in Y]
x = X[:,0]
y = X[:,1]
yis1 = np.where(Y==1)[0]
yis0 = np.where(Y!=1)[0]
ax[3].set_title("Samples colored by class")
ax[3].scatter(x,y,s=30,c=colors,alpha=.5)
ax[0].scatter(x[yis1],y[yis1],s=5,c=colors,alpha=.3)
ax[1].scatter(x[yis0],y[yis0],s=5,c=colors,alpha=.3)
ax[2].scatter(x,y,s=5,c=colors,alpha=.3)
pl.show()
def sample(self,n_samples,py,plot=False):
"""samples Y according to py and corresponding features x1,x2 according to the gaussian for the corresponding class"""
Y = bernoulli.rvs(py,size=n_samples)
X = np.zeros((n_samples,2))
for i in range(n_samples):
if Y[i] == 1:
X[i,:] = self.rv1.rvs()
else:
X[i,:] = self.rv0.rvs()
if plot:
self.plot(data=(X,Y))
return X,Y
def load_data_(filename):
with open(filename) as f:
g = (",".join([i[1],i[2],i[4],i[5],i[6],i[7],i[9],i[11]]).encode(encoding='UTF-8')
for i in csv.reader(f,delimiter=",",quotechar='"'))
data = np.genfromtxt(g, delimiter=",",names=True,
dtype=(int,int,np.dtype('a6'),float,int,int,float,np.dtype('a1')))
embark_dict = {b'S':0, b'C':1, b'Q':2, b'':3}
survived = data['Survived']
passenger_class = data['Pclass']
is_female = (data['Sex'] == b'female').astype(int)
age = data['Age']
sibsp = data['SibSp']
parch = data['Parch']
fare = data['Fare']
embarked = np.array([embark_dict[k] for k in data['Embarked']])
# skip age for the moment because of the missing data
X = np.vstack((passenger_class, is_female, sibsp, parch, fare, embarked)).T
Y = survived
return X, Y
def load_data():
return load_data_("titanic_train.csv")
def load_test_data():
return load_data_("titanic_test.csv")
def whitening_matrix(X):
"""The matrix of Eigenvectors that whitens the input vector X"""
assert (X.ndim == 2)
sigma = np.dot(X.T, X)
e, m = scipy.linalg.eigh(sigma)
return np.dot(m, np.diag(1.0/np.sqrt(e)))*np.sqrt((X.shape[0]-1))
def plot_svm(X, Y, svm_instance, xdim1=0, xdim2=1, minbound=(-3,-3),
maxbound=(3,3), resolution=(100,100)):
""" Plot any two dimensions from an SVM"""
# build the meshgrid for the two dims we care about
d = svm_instance.shape_fit_[1]
n = resolution[0] * resolution[1]
xx, yy = np.meshgrid(np.linspace(minbound[0], maxbound[0], resolution[0]),
np.linspace(minbound[1], maxbound[1], resolution[1]))
query2d = np.c_[xx.ravel(), yy.ravel()]
query = np.zeros((n,d))
query[:,xdim1] = query2d[:, 0]
query[:,xdim2] = query2d[:, 1]
Z = svm_instance.decision_function(query)
Z = Z.reshape(xx.shape)
fig = pl.figure(figsize=(10,10))
ax = fig.add_subplot(111)
ax.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=pl.cm.PuOr_r)
contours = ax.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
ax.scatter(X[:, xdim1], X[:, xdim2], s=30, c=Y, cmap=pl.cm.Paired)
# ax.set_xticks(())
# pl.yticks(())
ax.set_xlim((minbound[0], maxbound[0]))
ax.set_ylim((minbound[1], maxbound[1]))
pl.show()
def illustrate_preprocessing():
x = np.random.multivariate_normal(np.array([5.0,5.0]),
np.array([[5.0,3.0],[3.0,4.0]]),size=1000)
x_demean = x - np.mean(x, axis=0)
x_unitsd = x_demean/(np.std(x_demean,axis=0))
x_whiten = np.dot(x_demean, whitening_matrix(x_demean))
fig = pl.figure(figsize=(10,10))
def mk_subplot(n, data, label):
ax = fig.add_subplot(2,2,n)
ax.scatter(data[:,0], data[:,1])
ax.set_xlim((-10,10))
ax.set_ylim((-10,10))
ax.set_xlabel(label)
mk_subplot(1, x, "Original")
mk_subplot(2, x_demean, "De-meaned")
mk_subplot(3, x_unitsd, "Unit SD")
mk_subplot(4, x_whiten, "Whitened")
pl.show()
def margins_and_hyperplane():
#gen some data
np.random.seed(0)
n = 20
X = (np.vstack((np.ones((n,2))*np.array([0.5,1]),
np.ones((n,2))*np.array([-0.5,-1]))) + np.random.randn(2*n,2)*0.3)
Y = np.hstack((np.ones(n), np.zeros(n)))
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# Note the following code comes from a scikit learn example...
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xs = np.linspace(-2, 2)
ys = a * xs - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
ys_down = a * xs + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
ys_up = a * xs + (b[1] - a * b[0])
#draw a bad margin
def line_point_grad(x, grad, p1):
y = grad*(x - p1[0]) + p1[1]
return y
minp = X[np.argmin(X[:n,0])]
maxp = X[n + np.argmax(X[n:,0])]
yb = line_point_grad(xs, a*20, np.array([0.5*(minp[0]+maxp[0]),0.0]))
yb_down = line_point_grad(xs, a*20, minp)
yb_up = line_point_grad(xs, a*20, maxp)
# plot the line, the points, and the nearest vectors to the plane
fig = pl.figure(figsize=(10,10))
ax = fig.add_subplot(111)
ax.plot(xs, ys, 'g-')
ax.plot(xs, yb, 'r-')
ax.plot(xs, yb_down, 'r--')
ax.plot(xs, yb_up, 'r--')
ax.plot(xs, ys_down, 'g--')
ax.plot(xs, ys_up, 'g--')
ax.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
ax.scatter([minp[0],maxp[0]], [minp[1],maxp[1]],
s=80, facecolors='none')
ax.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
ax.set_xlim((-2,2))
ax.set_ylim((-2,2))
pl.show()
def hard_data():
#gen some data
np.random.seed(0)
epsilon = 0.05
n = 5000
X1 = np.random.randn(n,2)
X2 = np.random.randn(n,2)
valid1 = X1[:,0]**2 + X1[:,1]**2 < (0.5 - epsilon)
valid2 = np.logical_and((X2[:,0]**2 + X2[:,1]**2 > (0.5 + epsilon)),
(X2[:,0]**2 + X2[:,1]**2 < 1.0))
X1 = X1[valid1]
X2 = X2[valid2]
Y1 = np.ones(X1.shape[0])
Y2 = np.zeros(X2.shape[0])
X = np.vstack((X1,X2))
Y = np.hstack((Y1,Y2))
Z = np.sqrt(2)*X[:,0]*X[:,1]
return X, Y, Z
def nonlinear_example():
X, Y, Z = hard_data()
fig = pl.figure(figsize=(10,20))
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
ax = fig.add_subplot(212, projection='3d')
ax.scatter(X[:,0]**2, X[:,1]**2, Z, c=Y, cmap=pl.cm.Paired)
pl.show()
def nonlinear_svm():
X, Y, Z = hard_data()
clf = svm.SVC(kernel='rbf')
clf.fit(X, Y)
plot_svm(X, Y, clf, 0,1, (-1.5,-1.5), (1.5,1.5))
#if __name__ == "__main__":
# nonlinear_example()
| gpl-2.0 |
Nyker510/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |