repo_name
stringlengths 6
96
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 762
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ctoher/pymatgen | setup.py | 2 | 4605 | import glob
import os
import subprocess
from io import open
import sys
from setuptools import setup, find_packages, Extension
try:
from numpy.distutils.misc_util import get_numpy_include_dirs
except ImportError:
print("numpy.distutils.misc_util cannot be imported. Attempting to "
"install...")
subprocess.call(["easy_install", "numpy"])
from numpy.distutils.misc_util import get_numpy_include_dirs
SETUP_PTH = os.path.dirname(os.path.abspath(__file__))
def get_spglib_ext():
"""
Set up spglib extension.
"""
spglibs = glob.glob(os.path.join(SETUP_PTH, "dependencies", "spglib*"))
if len(spglibs) != 1:
raise ValueError("Incorrect number of spglib found in dependencies. "
"Expected 1, got %d" % len(spglibs))
spglibdir = spglibs[0]
# set rest of spglib
spgsrcdir = os.path.join(spglibdir, "src")
include_dirs = [spgsrcdir]
sources = glob.glob(os.path.join(spgsrcdir, "*.c"))
c_opt = [] if sys.version_info.major < 3 else [
"-Wno-error=declaration-after-statement"]
return Extension(
"pymatgen._spglib",
include_dirs=include_dirs + get_numpy_include_dirs(),
sources=[os.path.join(spglibdir, "_spglib.c")] + sources,
extra_compile_args=c_opt)
with open("README.rst") as f:
long_desc = f.read()
ind = long_desc.find("\n")
long_desc = long_desc[ind + 1:]
setup(
name="pymatgen",
packages=find_packages(),
version="3.0.13",
install_requires=["numpy>=1.8", "pyhull>=1.5.3", "six", "prettytable",
"atomicfile", "requests", "pybtex", "pyyaml",
"monty>=0.6.4", "scipy>=0.10"],
extras_require={"plotting": ["matplotlib>=1.1", "prettyplotlib"],
"ase_adaptor": ["ase>=3.3"],
"vis": ["vtk>=6.0.0"],
"abinitio": ["pydispatcher>=2.0.3", "apscheduler==2.1.0"]},
package_data={"pymatgen.core": ["*.json"],
"pymatgen.analysis": ["*.yaml", "*.csv"],
"pymatgen.io": ["*.yaml"],
"pymatgen.symmetry": ["*.yaml"],
"pymatgen.io.gwwrapper":["*.json"],
"pymatgen.entries": ["*.yaml"],
"pymatgen.structure_prediction": ["data/*.json"],
"pymatgen.vis": ["ElementColorSchemes.yaml"],
"pymatgen.command_line": ["OxideTersoffPotentials"],
"pymatgen.analysis.defects": ["*.json"],
"pymatgen.analysis.diffraction": ["*.json"],
"pymatgen.util": ["structures/*.json"]},
author="Shyue Ping Ong, Anubhav Jain, Michael Kocher, Geoffroy Hautier,"
"William Davidson Richards, Stephen Dacek, Dan Gunter, Shreyas Cholia, "
"Matteo Giantomassi, Vincent L Chevrier, Rickard Armiento",
author_email="[email protected], [email protected], [email protected], "
"[email protected], [email protected], [email protected], "
"[email protected], [email protected], [email protected], "
"[email protected], [email protected]",
maintainer="Shyue Ping Ong",
url="https://github.com/materialsproject/pymatgen/",
license="MIT",
description="Python Materials Genomics is a robust materials "
"analysis code that defines core object representations for "
"structures and molecules with support for many electronic "
"structure codes. It is currently the core analysis code "
"powering the Materials Project "
"(https://www.materialsproject.org).",
long_description=long_desc,
keywords=["VASP", "gaussian", "ABINIT", "nwchem", "materials", "project",
"electronic", "structure", "analysis", "phase", "diagrams"],
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules"
],
ext_modules=[get_spglib_ext()],
scripts=glob.glob(os.path.join(SETUP_PTH, "scripts", "*"))
)
| mit |
cwu2011/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
cogmission/nupic.research | projects/sequence_classification/generate_synthetic_data.py | 2 | 5520 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate synthetic sequences using a pool of sequence motifs
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from sklearn import decomposition
import random
plt.ion()
plt.close('all')
# Generate a set of sequence motifs
def generateSequenceMotifs(numMotif, motifLength, seed=None):
if seed is not None:
np.random.seed(seed)
sequenceMotifs = np.random.randn(motifLength, motifLength)
pca = decomposition.PCA(n_components=numMotif)
pca.fit(sequenceMotifs)
sequenceMotifs = pca.components_
for i in range(numMotif):
sequenceMotifs[i, :] = sequenceMotifs[i, :]-min(sequenceMotifs[i, :])
sequenceMotifs[i, :] = sequenceMotifs[i, :]/max(sequenceMotifs[i, :])
return sequenceMotifs
def generateSequence(sequenceLength, useMotif, currentClass, sequenceMotifs):
motifLength = sequenceMotifs.shape[1]
sequence = np.zeros((sequenceLength + 20,))
motifState = np.zeros((sequenceLength + 20,))
randomLengthList = np.linspace(1, 10, 10).astype('int')
# randomLengthList = [1]
t = 0
while t < sequenceLength:
randomLength = np.random.choice(randomLengthList)
sequence[t:t + randomLength] = np.random.rand(randomLength)
motifState[t:t + randomLength] = -1
t += randomLength
motifIdx = np.random.choice(useMotif[currentClass])
print "motifIdx: ", motifIdx
sequence[t:t + motifLength] = sequenceMotifs[motifIdx]
motifState[t:t + motifLength] = motifIdx
t += motifLength
sequence = sequence[:sequenceLength]
motifState = motifState[:sequenceLength]
return sequence, motifState
def generateSequences(numSeq, numClass, sequenceLength, useMotif, sequenceMotifs):
trainData = np.zeros((numSeq, sequenceLength+1))
numSeqPerClass = numSeq/numClass
classList = []
for classIdx in range(numClass):
classList += [classIdx] * numSeqPerClass
# classList = np.random.permutation(classList)
for seq in range(numSeq):
currentClass = classList[seq]
# print "useMotif, {}".format(useMotif)
sequence, motifState = generateSequence(sequenceLength, useMotif,
currentClass, sequenceMotifs)
trainData[seq, 0] = currentClass
trainData[seq, 1:] = sequence
return trainData
numMotif = 5
motifLength = 5
sequenceMotifs = generateSequenceMotifs(numMotif, 5, seed=42)
numTrain = 100
numTest = 100
numClass = 2
motifPerClass = 2
np.random.seed(2)
useMotif = {}
motifList = set(range(numMotif))
for classIdx in range(numClass):
useMotifForClass = []
for _ in range(motifPerClass):
useMotifForClass.append(np.random.choice(list(motifList)))
motifList.remove(useMotifForClass[-1])
useMotif[classIdx] = useMotifForClass
sequenceLength = 100
currentClass = 0
sequence, motifState = generateSequence(sequenceLength, useMotif, currentClass, sequenceMotifs)
MotifColor = {}
colorList = ['r','g','b','c','m','y']
i = 0
for c in useMotif.keys():
for v in useMotif[c]:
MotifColor[v] = colorList[i]
i += 1
fig, ax = plt.subplots(nrows=4, ncols=1)
for plti in xrange(4):
currentClass = [0 if plti < 2 else 1][0]
sequence, motifState = generateSequence(sequenceLength, useMotif, currentClass, sequenceMotifs)
ax[plti].plot(sequence, 'k-')
startPatch = False
for t in range(len(motifState)):
if motifState[t] >= 0 and startPatch is False:
startPatchAt = t
startPatch = True
currentMotif = motifState[t]
if startPatch and (motifState[t] < 0):
endPatchAt = t-1
ax[plti].add_patch(
patches.Rectangle(
(startPatchAt, 0),
endPatchAt-startPatchAt, 1, alpha=0.5,
color=MotifColor[currentMotif]
)
)
startPatch = False
ax[plti].set_xlim([0, 100])
ax[plti].set_ylabel('class {}'.format(currentClass))
# ax[1].plot(motifState)
trainData = generateSequences(numTrain, numClass, sequenceLength, useMotif, sequenceMotifs)
testData = generateSequences(numTest, numClass, sequenceLength, useMotif, sequenceMotifs)
np.savetxt('SyntheticData/Test1/Test1_TRAIN', trainData, delimiter=',')
np.savetxt('SyntheticData/Test1/Test1_TEST', testData, delimiter=',')
# writeSequenceToFile('SyntheticData/Test1/Test1_TRAIN', 100, numClass, sequenceLength, useMotif, sequenceMotifs)
# writeSequenceToFile('SyntheticData/Test1/Test1_TEST', 100, numClass, sequenceLength, useMotif, sequenceMotifs)
#
plt.figure()
trainLabel = trainData[:, 0].astype('int')
trainData = trainData[:, 1:]
plt.imshow(trainData[np.where(trainLabel==0)[0],:])
# plt.plot(motifState) | agpl-3.0 |
jmetzen/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 26 | 19053 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
platinhom/ManualHom | Coding/Python/numpy-html-1.10.1/reference/generated/numpy-random-RandomState-gumbel-1.py | 4 | 1082 | # Draw samples from the distribution:
mu, beta = 0, 0.1 # location and scale
s = np.random.gumbel(mu, beta, 1000)
# Display the histogram of the samples, along with
# the probability density function:
import matplotlib.pyplot as plt
count, bins, ignored = plt.hist(s, 30, normed=True)
plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
* np.exp( -np.exp( -(bins - mu) /beta) ),
linewidth=2, color='r')
plt.show()
# Show how an extreme value distribution can arise from a Gaussian process
# and compare to a Gaussian:
means = []
maxima = []
for i in range(0,1000) :
a = np.random.normal(mu, beta, 1000)
means.append(a.mean())
maxima.append(a.max())
count, bins, ignored = plt.hist(maxima, 30, normed=True)
beta = np.std(maxima)*np.pi/np.sqrt(6)
mu = np.mean(maxima) - 0.57721*beta
plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
* np.exp(-np.exp(-(bins - mu)/beta)),
linewidth=2, color='r')
plt.plot(bins, 1/(beta * np.sqrt(2 * np.pi))
* np.exp(-(bins - mu)**2 / (2 * beta**2)),
linewidth=2, color='g')
plt.show()
| gpl-2.0 |
meduz/scikit-learn | sklearn/linear_model/tests/test_huber.py | 54 | 7619 | # Authors: Manoj Kumar [email protected]
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_false
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model.huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression(fit_intercept=True)
lr.fit(X, y)
huber = HuberRegressor(fit_intercept=True, epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
loss_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[0]
grad_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
# Rescale coefs before comparing with assert_array_almost_equal to make sure
# that the number of decimal places used is somewhat insensitive to the
# amplitude of the coefficients and therefore to the scale of the data
# and the regularization parameter
scale = max(np.mean(np.abs(huber.coef_)),
np.mean(np.abs(huber.intercept_)))
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
sample_weight = np.ones(X.shape[0])
sample_weight[1] = 3
sample_weight[3] = 2
huber.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True)
huber_sparse.fit(X_csr, y, sample_weight=sample_weight)
assert_array_almost_equal(huber_sparse.coef_ / scale,
huber_coef / scale)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
def test_huber_scaling_invariant():
"""Test that outliers filtering is scaling independent."""
rng = np.random.RandomState(0)
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert_false(np.all(n_outliers_mask_1))
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
"""Test they should converge to same coefficients for same parameters"""
X, y = make_regression_with_outliers(n_samples=10, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, n_iter=10000,
fit_intercept=False, epsilon=1.35)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
# No n_iter_ in old SciPy (<=0.9)
if huber_warm.n_iter_ is not None:
assert_equal(0, huber_warm.n_iter_)
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.01, max_iter=100)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert_greater(huber_score, ridge_score)
# The huber model should also fit poorly on the outliers.
assert_greater(ridge_outlier_score, huber_outlier_score)
| bsd-3-clause |
trhongbinwang/data_science_journey | deep_learning/keras/examples/neural_doodle.py | 2 | 14079 | '''Neural doodle with Keras
Script Usage:
# Arguments:
```
--nlabels: # of regions (colors) in mask images
--style-image: image to learn style from
--style-mask: semantic labels for style image
--target-mask: semantic labels for target image (your doodle)
--content-image: optional image to learn content from
--target-image-prefix: path prefix for generated target images
```
# Example 1: doodle using a style image, style mask
and target mask.
```
python neural_doodle.py --nlabels 4 --style-image Monet/style.png \
--style-mask Monet/style_mask.png --target-mask Monet/target_mask.png \
--target-image-prefix generated/monet
```
# Example 2: doodle using a style image, style mask,
target mask and an optional content image.
```
python neural_doodle.py --nlabels 4 --style-image Renoir/style.png \
--style-mask Renoir/style_mask.png --target-mask Renoir/target_mask.png \
--content-image Renoir/creek.jpg \
--target-image-prefix generated/renoir
```
References:
[Dmitry Ulyanov's blog on fast-neural-doodle](http://dmitryulyanov.github.io/feed-forward-neural-doodle/)
[Torch code for fast-neural-doodle](https://github.com/DmitryUlyanov/fast-neural-doodle)
[Torch code for online-neural-doodle](https://github.com/DmitryUlyanov/online-neural-doodle)
[Paper Texture Networks: Feed-forward Synthesis of Textures and Stylized Images](http://arxiv.org/abs/1603.03417)
[Discussion on parameter tuning](https://github.com/fchollet/keras/issues/3705)
Resources:
Example images can be downloaded from
https://github.com/DmitryUlyanov/fast-neural-doodle/tree/master/data
'''
from __future__ import print_function
import time
import argparse
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
from scipy.misc import imread, imsave
from keras import backend as K
from keras.layers import Input, AveragePooling2D
from keras.models import Model
from keras.preprocessing.image import load_img, img_to_array
from keras.applications import vgg19
# Command line arguments
parser = argparse.ArgumentParser(description='Keras neural doodle example')
parser.add_argument('--nlabels', type=int,
help='number of semantic labels'
' (regions in differnet colors)'
' in style_mask/target_mask')
parser.add_argument('--style-image', type=str,
help='path to image to learn style from')
parser.add_argument('--style-mask', type=str,
help='path to semantic mask of style image')
parser.add_argument('--target-mask', type=str,
help='path to semantic mask of target image')
parser.add_argument('--content-image', type=str, default=None,
help='path to optional content image')
parser.add_argument('--target-image-prefix', type=str,
help='path prefix for generated results')
args = parser.parse_args()
style_img_path = args.style_image
style_mask_path = args.style_mask
target_mask_path = args.target_mask
content_img_path = args.content_image
target_img_prefix = args.target_image_prefix
use_content_img = content_img_path is not None
num_labels = args.nlabels
num_colors = 3 # RGB
# determine image sizes based on target_mask
ref_img = imread(target_mask_path)
img_nrows, img_ncols = ref_img.shape[:2]
total_variation_weight = 50.
style_weight = 1.
content_weight = 0.1 if use_content_img else 0
content_feature_layers = ['block5_conv2']
# To get better generation qualities, use more conv layers for style features
style_feature_layers = ['block1_conv1', 'block2_conv1', 'block3_conv1',
'block4_conv1', 'block5_conv1']
# helper functions for reading/processing images
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_nrows, img_ncols))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg19.preprocess_input(img)
return img
def deprocess_image(x):
if K.image_data_format() == 'channels_first':
x = x.reshape((3, img_nrows, img_ncols))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_nrows, img_ncols, 3))
# Remove zero-center by mean pixel
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR'->'RGB'
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
def kmeans(xs, k):
assert xs.ndim == 2
try:
from sklearn.cluster import k_means
_, labels, _ = k_means(xs.astype('float64'), k)
except ImportError:
from scipy.cluster.vq import kmeans2
_, labels = kmeans2(xs, k, missing='raise')
return labels
def load_mask_labels():
'''Load both target and style masks.
A mask image (nr x nc) with m labels/colors will be loaded
as a 4D boolean tensor: (1, m, nr, nc) for 'channels_first' or (1, nr, nc, m) for 'channels_last'
'''
target_mask_img = load_img(target_mask_path,
target_size=(img_nrows, img_ncols))
target_mask_img = img_to_array(target_mask_img)
style_mask_img = load_img(style_mask_path,
target_size=(img_nrows, img_ncols))
style_mask_img = img_to_array(style_mask_img)
if K.image_data_format() == 'channels_first':
mask_vecs = np.vstack([style_mask_img.reshape((3, -1)).T,
target_mask_img.reshape((3, -1)).T])
else:
mask_vecs = np.vstack([style_mask_img.reshape((-1, 3)),
target_mask_img.reshape((-1, 3))])
labels = kmeans(mask_vecs, num_labels)
style_mask_label = labels[:img_nrows *
img_ncols].reshape((img_nrows, img_ncols))
target_mask_label = labels[img_nrows *
img_ncols:].reshape((img_nrows, img_ncols))
stack_axis = 0 if K.image_data_format() == 'channels_first' else -1
style_mask = np.stack([style_mask_label == r for r in xrange(num_labels)],
axis=stack_axis)
target_mask = np.stack([target_mask_label == r for r in xrange(num_labels)],
axis=stack_axis)
return (np.expand_dims(style_mask, axis=0),
np.expand_dims(target_mask, axis=0))
# Create tensor variables for images
if K.image_data_format() == 'channels_first':
shape = (1, num_colors, img_nrows, img_ncols)
else:
shape = (1, img_nrows, img_ncols, num_colors)
style_image = K.variable(preprocess_image(style_img_path))
target_image = K.placeholder(shape=shape)
if use_content_img:
content_image = K.variable(preprocess_image(content_img_path))
else:
content_image = K.zeros(shape=shape)
images = K.concatenate([style_image, target_image, content_image], axis=0)
# Create tensor variables for masks
raw_style_mask, raw_target_mask = load_mask_labels()
style_mask = K.variable(raw_style_mask.astype('float32'))
target_mask = K.variable(raw_target_mask.astype('float32'))
masks = K.concatenate([style_mask, target_mask], axis=0)
# index constants for images and tasks variables
STYLE, TARGET, CONTENT = 0, 1, 2
# Build image model, mask model and use layer outputs as features
# image model as VGG19
image_model = vgg19.VGG19(include_top=False, input_tensor=images)
# mask model as a series of pooling
mask_input = Input(tensor=masks, shape=(None, None, None), name='mask_input')
x = mask_input
for layer in image_model.layers[1:]:
name = 'mask_%s' % layer.name
if 'conv' in layer.name:
x = AveragePooling2D((3, 3), strides=(
1, 1), name=name, border_mode='same')(x)
elif 'pool' in layer.name:
x = AveragePooling2D((2, 2), name=name)(x)
mask_model = Model(mask_input, x)
# Collect features from image_model and task_model
image_features = {}
mask_features = {}
for img_layer, mask_layer in zip(image_model.layers, mask_model.layers):
if 'conv' in img_layer.name:
assert 'mask_' + img_layer.name == mask_layer.name
layer_name = img_layer.name
img_feat, mask_feat = img_layer.output, mask_layer.output
image_features[layer_name] = img_feat
mask_features[layer_name] = mask_feat
# Define loss functions
def gram_matrix(x):
assert K.ndim(x) == 3
features = K.batch_flatten(x)
gram = K.dot(features, K.transpose(features))
return gram
def region_style_loss(style_image, target_image, style_mask, target_mask):
'''Calculate style loss between style_image and target_image,
for one common region specified by their (boolean) masks
'''
assert 3 == K.ndim(style_image) == K.ndim(target_image)
assert 2 == K.ndim(style_mask) == K.ndim(target_mask)
if K.image_data_format() == 'channels_first':
masked_style = style_image * style_mask
masked_target = target_image * target_mask
num_channels = K.shape(style_image)[0]
else:
masked_style = K.permute_dimensions(
style_image, (2, 0, 1)) * style_mask
masked_target = K.permute_dimensions(
target_image, (2, 0, 1)) * target_mask
num_channels = K.shape(style_image)[-1]
s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels
c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels
return K.mean(K.square(s - c))
def style_loss(style_image, target_image, style_masks, target_masks):
'''Calculate style loss between style_image and target_image,
in all regions.
'''
assert 3 == K.ndim(style_image) == K.ndim(target_image)
assert 3 == K.ndim(style_masks) == K.ndim(target_masks)
loss = K.variable(0)
for i in xrange(num_labels):
if K.image_data_format() == 'channels_first':
style_mask = style_masks[i, :, :]
target_mask = target_masks[i, :, :]
else:
style_mask = style_masks[:, :, i]
target_mask = target_masks[:, :, i]
loss += region_style_loss(style_image,
target_image, style_mask, target_mask)
return loss
def content_loss(content_image, target_image):
return K.sum(K.square(target_image - content_image))
def total_variation_loss(x):
assert 4 == K.ndim(x)
if K.image_data_format() == 'channels_first':
a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
x[:, :, 1:, :img_ncols - 1])
b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
x[:, :, :img_nrows - 1, 1:])
else:
a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] -
x[:, 1:, :img_ncols - 1, :])
b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] -
x[:, :img_nrows - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
# Overall loss is the weighted sum of content_loss, style_loss and tv_loss
# Each individual loss uses features from image/mask models.
loss = K.variable(0)
for layer in content_feature_layers:
content_feat = image_features[layer][CONTENT, :, :, :]
target_feat = image_features[layer][TARGET, :, :, :]
loss += content_weight * content_loss(content_feat, target_feat)
for layer in style_feature_layers:
style_feat = image_features[layer][STYLE, :, :, :]
target_feat = image_features[layer][TARGET, :, :, :]
style_masks = mask_features[layer][STYLE, :, :, :]
target_masks = mask_features[layer][TARGET, :, :, :]
sl = style_loss(style_feat, target_feat, style_masks, target_masks)
loss += (style_weight / len(style_feature_layers)) * sl
loss += total_variation_weight * total_variation_loss(target_image)
loss_grads = K.gradients(loss, target_image)
# Evaluator class for computing efficiency
outputs = [loss]
if isinstance(loss_grads, (list, tuple)):
outputs += loss_grads
else:
outputs.append(loss_grads)
f_outputs = K.function([target_image], outputs)
def eval_loss_and_grads(x):
if K.image_data_format() == 'channels_first':
x = x.reshape((1, 3, img_nrows, img_ncols))
else:
x = x.reshape((1, img_nrows, img_ncols, 3))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# Generate images by iterative optimization
if K.image_data_format() == 'channels_first':
x = np.random.uniform(0, 255, (1, 3, img_nrows, img_ncols)) - 128.
else:
x = np.random.uniform(0, 255, (1, img_nrows, img_ncols, 3)) - 128.
for i in range(50):
print('Start of iteration', i)
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
fprime=evaluator.grads, maxfun=20)
print('Current loss value:', min_val)
# save current generated image
img = deprocess_image(x.copy())
fname = target_img_prefix + '_at_iteration_%d.png' % i
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i, end_time - start_time))
| apache-2.0 |
twhyntie/image-heatmap | make_image_heatmap.py | 1 | 3834 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#...for the plotting.
import matplotlib.pyplot as plt
#...for the image manipulation.
import matplotlib.image as mpimg
#...for the MATH.
import numpy as np
# For scaling images.
import scipy.ndimage.interpolation as inter
#...for the colours.
from matplotlib import colorbar, colors
# For playing with the tick marks on the colour map axis.
from matplotlib import ticker
# Load the LaTeX text plot libraries.
from matplotlib import rc
# Uncomment to use LaTeX for the plot text.
rc('font',**{'family':'serif','serif':['Computer Modern']})
rc('text', usetex=True)
# Load in the image.
## The scan image as a NumPy array.
scan_img = mpimg.imread("scan.png")
print(" *")
print(" * Image dimensions: %s" % (str(scan_img.shape)))
## The figure upon which to display the scan image.
plot = plt.figure(101, figsize=(5.0, 5.0), dpi=150, facecolor='w', edgecolor='w')
# Adjust the position of the axes.
#plot.subplots_adjust(bottom=0.17, left=0.15)
plot.subplots_adjust(bottom=0.05, left=0.15, right=0.99, top=0.95)
## The plot axes.
plotax = plot.add_subplot(111)
# Set the x axis label.
plt.xlabel("$x$")
# Set the y axis label.
plt.ylabel("$y$")
# Add the original scan image to the plot.
plt.imshow(scan_img)
## The blob centre x values [pixels].
blob_xs = []
## The blob centre x values [pixels].
blob_ys = []
## The blob radii [pixels].
blob_rs = []
# Open the blob data file and retrieve the x, y, and r values.
with open("blobs.csv", "r") as f:
for l in f.readlines():
blob_xs.append(float(l.split(",")[0]))
blob_ys.append(float(l.split(",")[1]))
blob_rs.append(float(l.split(",")[2]))
## The image scale factor.
scale = 6.0
## The width of the image scaled up by the scale factor [pixels].
w = scan_img.shape[0]
## The original width of the image [pixels].
w_o = w / scale
## The height of the image scaled up by the scale factor [pixels].
h = scan_img.shape[1]
## The original height of the image [pixels].
h_o = h / scale
print(" * Image dimensions (w,h) = (%d,%d) -> (w_o,h_o) = (%d,%d)" % (w,h,w_o,h_o))
## The number of bins in each dimension of the heatmap.
#
# We are using the original image dimensions so that our heat map
# maps to the pixels in the original image. This is mainly for
# aesthetic reasons - there would be nothing to stop us using more
# (or fewer) bins.
bins = [w_o, h_o]
## The dimensions of the heat map, taken from the scaled-up image.
map_range = [[0, w], [0, h]]
# Create the heat map using NumPy's 2D histogram functionality.
centre_heatmap, x_edges, y_edges = np.histogram2d(blob_ys, blob_xs, bins=bins, range=map_range)
## The scaled heat map image.
#
# We need to scale the heat map array because although the bin widths
# are > 1, the resultant histogram (when made into an image) creates
# an image with one pixel per bin.
zoom_img = inter.zoom(centre_heatmap, (scale, scale), order=0, prefilter=False)
## The colo(u)r map for the heat map.
cmap = plt.cm.gnuplot
## The maximum number of blob centres in the heat map.
bc_max = np.amax(centre_heatmap)
#
print(" * Maximum value in the heat map is %d." % (bc_max))
## The maximum value to use in the colo(u)r map axis.
color_map_max = bc_max
# Add the (scaled) heat map (2D histogram) to the plot.
zoomed_heat_map = plt.imshow(zoom_img, alpha=0.8, cmap=cmap,norm=colors.Normalize(vmin=0,vmax=color_map_max))
## The heat map colo(u)r bar.
cb = plt.colorbar(alpha=1.0, mappable=zoomed_heat_map)
## An object to neaten up the colour map axis tick marks.
tick_locator = ticker.MaxNLocator(nbins=7)
#
cb.locator = tick_locator
#
cb.update_ticks()
# Add a grid.
plt.grid(1)
# Crop the plot limits to the limits of the scan iteself.
plotax.set_xlim([0, h])
plotax.set_ylim([w, 0])
# Save the figure.
plot.savefig("heatmap.png")
print(" *")
| mit |
kristianeschenburg/parcellearning | docs/source/conf.py | 1 | 6437 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# parcellearning documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 28 12:35:56 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'matplotlib.sphinxext.plot_directive',
'numpydoc',
'sphinx_copybutton',
]
# Configuration options for plot_directive. See:
# https://github.com/matplotlib/matplotlib/blob/f3ed922d935751e08494e5fb5311d3050a3b637b/lib/matplotlib/sphinxext/plot_directive.py#L81
plot_html_show_source_link = False
plot_html_show_formats = False
# Generate the API documentation when building
autosummary_generate = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'parcellearning'
copyright = '2020, Kristian Eschenburg'
author = 'Kristian Eschenburg'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import parcellearning
# The short X.Y version.
version = parcellearning.__version__
# The full version, including alpha/beta/rc tags.
release = parcellearning.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'parcellearning'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'parcellearning.tex', 'parcellearning Documentation',
'Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'parcellearning', 'parcellearning Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'parcellearning', 'parcellearning Documentation',
author, 'parcellearning', 'Python project for learning cortical maps using graph neural networks',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'matplotlib': ('https://matplotlib.org', None),
}
| mit |
WarrenWeckesser/numpngw | numpngw.py | 1 | 60394 | """
The numpngw module defines two functions and a class:
* write_png(...) writes a numpy array to a PNG file.
* write_apng(...) writes a sequence of arrays to an animated PNG file.
* AnimatedPNGWriter is a class that can be used with Matplotlib animations.
-----
Copyright (c) 2015, Warren Weckesser
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import (division as _division,
print_function as _print_function)
import sys as _sys
import contextlib as _contextlib
from io import BytesIO as _BytesIO
import time as _time
import struct as _struct
import zlib as _zlib
from fractions import Fraction as _Fraction
import operator
import numpy as _np
__all__ = ['write_png', 'write_apng', 'AnimatedPNGWriter']
__version__ = "0.0.9.dev1"
_PY3 = _sys.version_info > (3,)
if _PY3:
def _bord(c):
return c
else:
_bord = ord
def _software_text():
software = ("numpngw (version %s), "
"https://github.com/WarrenWeckesser/numpngw" % __version__)
return software
def _filter0(row, prev_row):
return row
def _filter0inv(frow, prev_row):
return frow
def _filter1(row, prev_row):
d = _np.zeros_like(row)
d[1:] = _np.diff(row, axis=0)
d[0] = row[0]
return d
def _filter1inv(frow, prev_row):
return frow.cumsum(axis=0, dtype=_np.uint64).astype(_np.uint8)
def _filter2(row, prev_row):
d = row - prev_row
return d
def _filter2inv(frow, prev_row):
return frow + prev_row
def _filter3(row, prev_row):
a = _np.zeros_like(row, dtype=_np.int64)
a[1:] = row[:-1]
c = ((a + prev_row) // 2).astype(row.dtype)
d = row - c
return d
def _filter3inv(frow, prev_row):
# Slow python loop, but currently this is only used for testing.
row = _np.empty_like(frow)
for k in range(len(frow)):
if k == 0:
row[k] = frow[k] + (prev_row[k] // 2)
else:
row[k] = frow[k] + (row[k-1].astype(int) +
prev_row[k].astype(int)) // 2
return row
def _filter4(row, prev_row):
"""Paeth filter."""
# Create a, b and c.
a = _np.zeros_like(row, dtype=_np.int64)
a[1:] = row[:-1]
b = prev_row.astype(_np.int64)
c = _np.zeros_like(b)
c[1:] = b[:-1]
p = a + b - c
pa = _np.abs(p - a)
pb = _np.abs(p - b)
pc = _np.abs(p - c)
y = _np.where((pa <= pb) & (pa <= pc), a, _np.where(pb <= pc, b, c))
pr = y.astype(_np.uint8)
d = row - pr
return d
def _filter4inv(frow, prev_row):
# Slow python loop, but currently this is only used for testing.
row = _np.empty_like(frow)
for k in range(len(frow)):
if k == 0:
ra = _np.zeros_like(frow[k])
rc = _np.zeros_like(frow[k])
else:
ra = row[k-1].astype(int)
rc = prev_row[k-1].astype(int)
rb = prev_row[k].astype(int)
p = ra + rb - rc
pa = _np.abs(p - ra)
pb = _np.abs(p - rb)
pc = _np.abs(p - rc)
y = _np.where((pa <= pb) & (pa <= pc), ra, _np.where(pb <= pc, rb, rc))
row[k] = frow[k] + y
return row
def _interlace_passes(img):
"""
Return the subimages of img that make up the seven Adam7 interlace passes.
"""
pass1 = img[::8, ::8]
pass2 = img[::8, 4::8]
pass3 = img[4::8, ::4]
pass4 = img[::4, 2::4]
pass5 = img[2::4, ::2]
pass6 = img[::2, 1::2]
pass7 = img[1::2, :]
return (pass1, pass2, pass3, pass4, pass5, pass6, pass7)
def _create_stream(a, filter_type=None):
"""
Convert the data in `a` into a python string.
`a` is must to be a 2D or 3D array of unsigned 8- or 16-bit
integers.
The string is formatted as the "scan lines" of the array.
"""
filters = [_filter0, _filter1, _filter2, _filter3, _filter4]
if filter_type is None:
filter_type = "heuristic"
allowed_filter_types = [0, 1, 2, 3, 4, "heuristic"]
if filter_type not in allowed_filter_types:
raise ValueError('filter_type must be one of %r' %
(allowed_filter_types,))
if a.ndim == 2:
# Gray scale. Add a trivial third dimension.
a = a[:, :, _np.newaxis]
lines = []
prev_row = _np.zeros_like(a[0]).view(_np.uint8)
for row in a:
# Convert the row to big-endian (i.e. network byte order).
row_be = row.astype('>' + row.dtype.str[1:]).view(_np.uint8)
if filter_type == "heuristic":
filtered_rows = [filt(row_be, prev_row) for filt in filters]
lst = [_np.abs(fr.view(_np.int8).astype(_np.int_)).sum()
for fr in filtered_rows]
values = _np.array(lst)
ftype = values.argmin()
# Create the string, with the filter type prepended.
lines.append(chr(ftype).encode('ascii') +
filtered_rows[ftype].tobytes())
else:
filtered_row = filters[filter_type](row_be, prev_row)
lines.append(chr(filter_type).encode('ascii') +
filtered_row.tobytes())
prev_row = row_be
stream = b''.join(lines)
return stream
def _write_chunk(f, chunk_type, chunk_data):
"""
Write a chunk to the file `f`. This function wraps the chunk_type and
chunk_data with the length and CRC field, and writes the result to `f`.
"""
content = chunk_type + chunk_data
length = _struct.pack("!I", len(chunk_data))
crc = _struct.pack("!I", _zlib.crc32(content) & 0xFFFFFFFF)
f.write(length + content + crc)
def _write_ihdr(f, width, height, nbits, color_type, interlace):
"""Write an IHDR chunk to `f`."""
fmt = "!IIBBBBB"
chunk_data = _struct.pack(fmt, width, height, nbits, color_type, 0, 0,
interlace)
_write_chunk(f, b"IHDR", chunk_data)
def _write_text(f, keyword, text_string):
"""Write a tEXt chunk to `f`.
keyword and test_string are expected to be bytes (not unicode).
They must already be validated.
"""
data = keyword + b'\0' + text_string
_write_chunk(f, b'tEXt', data)
def _write_time(f, timestamp):
"""Write a tIME chunk to `f`."""
chunk_data = _struct.pack('!HBBBBB', *timestamp)
_write_chunk(f, b'tIME', chunk_data)
def _write_sbit(f, sbit):
"""Write an sBIT chunk to `f`."""
chunk_data = _struct.pack('BBBB'[:len(sbit)], *sbit)
_write_chunk(f, b'sBIT', chunk_data)
def _write_gama(f, gamma):
"""Write a gAMA chunk to `f`."""
gama = int(gamma*100000 + 0.5)
chunk_data = _struct.pack('!I', gama)
_write_chunk(f, b'gAMA', chunk_data)
def _write_plte(f, palette):
_write_chunk(f, b"PLTE", palette.tobytes())
def _write_trns(f, trans):
trans_be = trans.astype('>' + trans.dtype.str[1:])
_write_chunk(f, b"tRNS", trans_be.tobytes())
def _write_bkgd(f, color, color_type):
"""
Write bKGD chunk to `f`.
* If `color_type` is 0 or 4, `color` must be an integer.
* If `color_type` is 2 or 6, `color` must be a sequence of three
integers (RGB values).
* If `color_type` is 3, `color` must be an integer that is less than
the number of colors in the palette.
"""
if color_type == 0 or color_type == 4:
chunk_data = _struct.pack("!H", color)
elif color_type == 2 or color_type == 6:
chunk_data = _struct.pack("!HHH", *color)
elif color_type == 3:
chunk_data = _struct.pack("B", color)
else:
raise ValueError("invalid chunk_type %r" % (color_type,))
_write_chunk(f, b"bKGD", chunk_data)
def _write_phys(f, phys):
"""Write a pHYs chunk to `f`."""
chunk_data = _struct.pack("!IIB", *phys)
_write_chunk(f, b"pHYs", chunk_data)
def _write_iccp(f, iccp):
"""Write a iCCP chunk to `f`."""
_validate_iccp(iccp)
profile_name = _encode_latin1(iccp[0])
compressed_profile = _zlib.compress(iccp[1])
chunk_data = profile_name + b'\0\0' + compressed_profile
_write_chunk(f, b"iCCP", chunk_data)
def _write_chrm(f, chromaticity):
"""Write a cHRM chunk to `f`."""
data = (100000 * _np.array(chromaticity) + 0.5).astype(_np.uint32)
chunk_data = _struct.pack('!IIIIIIII', *data.ravel())
_write_chunk(f, b'cHRM', chunk_data)
def _write_idat(f, data):
"""Write an IDAT chunk to `f`."""
_write_chunk(f, b"IDAT", data)
def _write_iend(f):
"""Write an IEND chunk to `f`."""
_write_chunk(f, b"IEND", b"")
def _write_actl(f, num_frames, num_plays):
"""Write an acTL chunk to `f`."""
if num_frames < 1:
raise ValueError("Attempt to create acTL chunk with num_frames (%i) "
"less than 1." % (num_frames,))
chunk_data = _struct.pack("!II", num_frames, num_plays)
_write_chunk(f, b"acTL", chunk_data)
def _write_fctl(f, sequence_number, width, height, x_offset, y_offset,
delay_num, delay_den, dispose_op=0, blend_op=0):
"""Write an fcTL chunk to `f`."""
if width < 1:
raise ValueError("width must be greater than 0")
if height < 1:
raise ValueError("heigt must be greater than 0")
if x_offset < 0:
raise ValueError("x_offset must be nonnegative")
if y_offset < 0:
raise ValueError("y_offset must be nonnegative")
fmt = "!IIIIIHHBB"
chunk_data = _struct.pack(fmt, sequence_number, width, height,
x_offset, y_offset, delay_num, delay_den,
dispose_op, blend_op)
_write_chunk(f, b"fcTL", chunk_data)
def _write_fdat(f, sequence_number, data):
"""Write an fdAT chunk to `f`."""
seq = _struct.pack("!I", sequence_number)
_write_chunk(f, b"fdAT", seq + data)
def _write_data(f, a, bitdepth, max_chunk_len=None, sequence_number=None,
filter_type=None, interlace=0):
"""
Write the image data in the array `a` to the file, using IDAT chunks
if sequence_number is None and fdAT chunks otherwise.
`f` must be a writable file object.
`a` must be a numpy array to be written to the file `f`.
If `sequence_number` is None, 'IDAT' chunks are written.
If `sequence_number` is not None, `fdAT` chunks are written.
`interlace` must be 0 or 1. It determines the interlace method.
0 mean no interlacing; 1 means Adam7 interlacing.
Returns the number of chunks written to the file.
`filter_type` is passed on to _create_stream().
"""
if interlace == 1:
passes = _interlace_passes(a)
else:
passes = [a]
if bitdepth is not None and bitdepth < 8:
passes = [_pack(aa, bitdepth) for aa in passes]
if filter_type == "auto":
filter_types = [0, 1, 2, 3, 4, "heuristic"]
else:
filter_types = [filter_type]
zstream = None
for filter_type in filter_types:
stream = b''
for a in passes:
if a.size > 0:
stream += _create_stream(a, filter_type=filter_type)
z = _zlib.compress(stream)
if zstream is None or len(z) < len(zstream):
zstream = z
# zstream is a string containing the packed, compressed version of the
# data from the array `a`. This will be written to the file in one or
# more IDAT or fdAT chunks.
if max_chunk_len is None:
# Put the whole thing in one chunk.
max_chunk_len = len(zstream)
elif max_chunk_len < 1:
raise ValueError("max_chunk_len must be at least 1.")
num_data_chunks = (len(zstream) + max_chunk_len - 1) // max_chunk_len
for k in range(num_data_chunks):
start = k * max_chunk_len
end = min(start + max_chunk_len, len(zstream))
data = zstream[start:end]
if sequence_number is None:
_write_idat(f, data)
else:
_write_fdat(f, sequence_number, data)
sequence_number += 1
return num_data_chunks
def _encode_latin1(s):
if _PY3:
unicode_type = str
else:
unicode_type = eval('unicode')
if isinstance(s, unicode_type):
s = s.encode('latin-1')
return s
def _validate_keyword(keyword, keyname='keyword'):
"""
From http://www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html:
The keyword must be at least one character and less than 80 characters
long. Keywords are always interpreted according to the ISO/IEC 8859-1
(Latin-1) character set [ISO/IEC-8859-1]. They must contain only
printable Latin-1 characters and spaces; that is, only character
codes 32-126 and 161-255 decimal are allowed. To reduce the chances
for human misreading of a keyword, leading and trailing spaces are
forbidden, as are consecutive spaces. Note also that the non-breaking
space (code 160) is not permitted in keywords, since it is visually
indistinguishable from an ordinary space.
"""
if not (0 < len(keyword) < 80):
raise ValueError("length of %s must greater than 0 and less "
"than 80." % (keyname,))
kw_check = all([(31 < _bord(c) < 127) or (160 < _bord(c) < 256)
for c in keyword])
if not kw_check:
raise ValueError("%s %r contains non-printable characters or "
"a non-breaking space (code 160)." %
(keyname, keyword,))
if keyword.startswith(b' '):
raise ValueError("%s %r begins with a space." % (keyname, keyword,))
if keyword.endswith(b' '):
raise ValueError("%s %r ends with a space." % (keyname, keyword,))
if b' ' in keyword:
raise ValueError("%s %r contains consecutive spaces." %
(keyname, keyword,))
def _validate_text(text_list):
if text_list is None:
text_list = []
creation_time = _encode_latin1(_time.strftime("%Y-%m-%dT%H:%M:%SZ",
_time.gmtime()))
software = _encode_latin1(_software_text())
text_list = [(_encode_latin1(keyword), _encode_latin1(text))
for keyword, text in text_list]
keywords = [keyword for keyword, text in text_list]
if b"Creation Time" not in keywords:
text_list.append((b"Creation Time", creation_time))
if b"Software" not in keywords:
text_list.append((b"Software", software))
validated_text_list = []
for keyword, text_string in text_list:
if text_string is None:
# Drop elements where the text string is None.
continue
_validate_keyword(keyword)
# Validate the text string.
if b'\0' in text_string:
raise ValueError("text_string contains a null character.")
validated_text_list.append((keyword, text_string))
return validated_text_list
def _palettize(a):
# `a` must be a numpy array with dtype `np.uint8` and shape (m, n, 3) or
# (m, n, 4).
a = _np.ascontiguousarray(a)
depth = a.shape[-1]
dt = ','.join(['u1'] * depth)
b = a.view(dt).reshape(a.shape[:-1])
colors, inv = _np.unique(b, return_inverse=True)
index = inv.astype(_np.uint8).reshape(a.shape[:-1])
# palette is the RGB values of the unique RGBA colors.
palette = colors.view(_np.uint8).reshape(-1, depth)[:, :3]
if depth == 3:
trans = None
else:
# trans is the 1-d array of alpha values of the unique RGBA colors.
# trans is the same length as `palette`.
trans = colors['f3']
return index, palette, trans
def _palettize_seq(seq):
""""
seq must be a sequence of 3-d numpy arrays with dtype np.uint8,
all with the same depth (i.e. the same length of the third dimension).
"""
# Call np.unique for each array in seq. Each array is viewed as a
# 2-d structured array of colors.
depth = seq[0].shape[-1]
dt = ','.join(['u1'] * depth)
result = [_np.unique(a.view(dt).reshape(a.shape[:-1]), return_inverse=True)
for a in seq]
# `sizes` is the number of unique colors found in each array.
sizes = [len(r[0]) for r in result]
# Combine all the colors found in each array to get the overall
# set of unique colors.
combined = _np.concatenate([r[0] for r in result])
colors, inv = _np.unique(combined, return_inverse=True)
offsets = _np.cumsum(_np.r_[0, sizes[:-1]])
invs = [r[1].reshape(a.shape[:2]) for r, a in zip(result, seq)]
# The sequence index_seq holds the converted arrays. The values
# in these arrays are indices into `palette`. Note that if
# len(palette) > 256, the conversion to np.uint8 will cause
# some values in the arrays in `index_seq` to wrap around.
# The caller must check the len(palette) to determine if this
# has happened.
index_seq = [inv[o:o+s][i].astype(_np.uint8)
for i, o, s in zip(invs, offsets, sizes)]
palette = colors.view(_np.uint8).reshape(-1, depth)[:, :3]
if depth == 3:
trans = None
else:
# trans is the 1-d array of alpha values of the unique RGBA colors.
# trans is the same length as `palette`.
trans = colors['f3']
return index_seq, palette, trans
def _pack(a, bitdepth):
"""
Pack the values in `a` into bitfields of a smaller array.
`a` must be a 2-d numpy array with dtype `np.uint8`
bitdepth must be either 1, 2, 4 or 8.
(bitdepth=8 is a trivial case, for which the return value is simply `a`.)
"""
if a.dtype != _np.uint8:
raise ValueError('Input array must have dtype uint8')
if a.ndim != 2:
raise ValueError('Input array must be two dimensional')
if bitdepth == 8:
return a
ncols, rembits = divmod(a.shape[1]*bitdepth, 8)
if rembits > 0:
ncols += 1
b = _np.zeros((a.shape[0], ncols), dtype=_np.uint8)
for row in range(a.shape[0]):
bcol = 0
pos = 8
for col in range(a.shape[1]):
val = (2**bitdepth - 1) & a[row, col]
pos -= bitdepth
if pos < 0:
bcol += 1
pos = 8 - bitdepth
b[row, bcol] |= (val << pos)
return b
def _unpack(p, bitdepth, width):
powers = _np.arange(bitdepth-1, -1, -1)
up = _np.unpackbits(p).reshape(p.shape[0], -1, bitdepth).dot(2**powers)
a = up[:, :width]
return a
def _validate_array(a):
if a.ndim != 2:
if a.ndim != 3 or a.shape[2] > 4 or a.shape[2] == 0:
raise ValueError("array must be 2D, or 3D with shape "
"(m, n, d) with 1 <= d <= 4.")
itemsize = a.dtype.itemsize
if not _np.issubdtype(a.dtype, _np.unsignedinteger) or itemsize > 2:
raise ValueError("array must be an array of 8- or 16-bit "
"unsigned integers")
# Notes on color_type:
#
# color_type meaning tRNS chunk contents (optional)
# ---------- ------------------------ --------------------------------
# 0 grayscale Single gray level value, 2 bytes
# 2 RGB Single RGB, 2 bytes per channel
# 3 8 bit indexed RGB or RGBA Series of 1 byte alpha values
# 4 Grayscale and alpha
# 6 RGBA
#
#
# from http://www.w3.org/TR/PNG/:
# Table 11.1 - Allowed combinations of colour type and bit depth
#
# Color Allowed
# PNG image type type bit depths Interpretation
# Greyscale 0 1, 2, 4, 8, 16 Each pixel is a greyscale
# sample
# Truecolour 2 8, 16 Each pixel is an RGB triple
# Indexed-colour 3 1, 2, 4, 8 Each pixel is a palette index;
# a PLTE chunk shall appear.
# Greyscale with alpha 4 8, 16 Each pixel is a greyscale
# sample followed by an alpha
# sample.
# Truecolour with alpha 6 8, 16 Each pixel is an RGB triple
# followed by an alpha sample.
def _get_color_type(a, use_palette):
if a.ndim == 2:
color_type = 0
else:
depth = a.shape[2]
if depth == 1:
# Grayscale
color_type = 0
elif depth == 2:
# Grayscale and alpha
color_type = 4
elif depth == 3:
# RGB
if a.dtype == _np.uint8 and use_palette:
# Indexed color (create a palette)
color_type = 3
else:
# RGB colors
color_type = 2
elif depth == 4:
# RGB and alpha
if a.dtype == _np.uint8 and use_palette:
color_type = 3
else:
color_type = 6
return color_type
def _validate_bitdepth(bitdepth, a, color_type):
if bitdepth not in [None, 1, 2, 4, 8, 16]:
raise ValueError('bitdepth %i is not valid. Valid values are '
'1, 2, 4, 8 or 16' % (bitdepth,))
if bitdepth is not None:
if color_type in [2, 4, 6]:
if 8*a.dtype.itemsize != bitdepth:
raise ValueError("For the given input, the bit depth must "
"match the data type of the array.")
elif color_type == 3:
if bitdepth == 16:
raise ValueError("Bit depth 16 not allowed when use_palette "
"is True.")
else:
# Given bitdepth is None
if color_type == 3:
bitdepth = 8
else:
bitdepth = 8*a.dtype.itemsize
return bitdepth
def _validate_timestamp(timestamp):
if timestamp is None:
return None
if len(timestamp) != 6:
raise ValueError("timestamp must have length 6")
return timestamp
def _validate_phys(phys):
if phys is not None:
if len(phys) == 2:
phys = tuple(phys) + (0,)
elif phys[2] not in [0, 1]:
raise ValueError('Third element of `phys` must be 0 or 1.')
phys = [int(x) for x in phys]
if phys[0] <= 0 or phys[1] <= 0:
raise ValueError('The pixels per unit in `phys` must be positive.')
return phys
def _validate_iccp(iccp):
if iccp is not None:
if len(iccp) != 2:
raise ValueError('`iccp` must have two elements.')
if type(iccp[0]) != str:
raise ValueError('First element of `iccp` must be str.')
try:
profile_name = _encode_latin1(iccp[0])
except UnicodeEncodeError:
raise ValueError('The profile name (the first element of `iccp`) '
'must be encodable as Latin-1.')
_validate_keyword(profile_name, 'profile name')
if type(iccp[1]) != bytes:
raise ValueError('Second element of `iccp` must be bytes.')
return iccp
def _validate_chromaticity(chromaticity):
if chromaticity is not None:
if len(chromaticity) != 4:
raise ValueError('chromaticity must be a sequence with length 4.')
for pair in chromaticity:
if len(pair) != 2:
raise ValueError('each item in chromaticity must be a '
'sequence of length 2.')
if not ((0 <= pair[0] <= 1) and (0 <= pair[1] <= 1)):
raise ValueError('each value in chromaticity must be between '
'0 and 1.')
return chromaticity
def _validate_sbit(sbit, color_type, bitdepth):
try:
len_sbit = len(sbit)
except TypeError:
sbit = (sbit,)
len_sbit = 1
try:
[operator.index(n) for n in sbit]
except Exception:
raise ValueError('Each value in sbit must be an integer.')
# Mapping from color_type to required length of sbit:
required_length = {0: 1, 2: 3, 3: 3, 4: 2, 6: 4}
if len_sbit != required_length[color_type]:
raise ValueError('For color type %d, len(sbit) must be %d' %
(color_type, required_length[color_type]))
for n in sbit:
if n < 1 or n > bitdepth:
raise ValueError('Each value in sbit must be greater than 0 '
'and less than or equal to the bit depth %s'
% bitdepth)
return sbit
def _common_validation(interlace, filter_type, phys, text_list, timestamp,
chromaticity):
if interlace not in [0, 1]:
raise ValueError('interlace must be 0 or 1.')
if filter_type is None:
filter_type = "auto"
phys = _validate_phys(phys)
text_list = _validate_text(text_list)
timestamp = _validate_timestamp(timestamp)
chromaticity = _validate_chromaticity(chromaticity)
return filter_type, phys, text_list, timestamp, chromaticity
def _add_background_color(background, palette, trans, bitdepth):
if len(background) != 3:
raise ValueError("background must have length 3 when "
"use_palette is True.")
index = _np.where((palette == background).all(axis=-1))[0]
if index.size > 0:
# The given background color is in the palette.
background = index[0]
else:
# The given background color is *not* in the palette. Is there
# room for one more color?
if bitdepth is None:
bitdepth = 8
if len(palette) == 2**bitdepth:
msg = ("The array already has the maximum of %i colors, and a "
"background color that is not in the array has been given. "
"With a bitdepth of %i, no more than %i colors are allowed "
"when using a palette." % (2**bitdepth, bitdepth,
2**bitdepth))
raise ValueError(msg)
else:
index = len(palette)
palette = _np.append(palette,
_np.array([background],
dtype=_np.uint8),
axis=0)
if trans is not None:
trans = _np.append(trans, [_np.uint8(255)])
background = index
return background, palette, trans
def _write_header_and_meta(f, dtype, shape, color_type, bitdepth, palette,
interlace, text_list, timestamp, sbit, gamma, iccp,
chromaticity, trans, background, phys):
# Write the PNG header.
f.write(b"\x89PNG\x0D\x0A\x1A\x0A")
# Write the chunks...
# IHDR chunk
if bitdepth is not None:
nbits = bitdepth
else:
nbits = dtype.itemsize*8
_write_ihdr(f, shape[1], shape[0], nbits, color_type, interlace)
# tEXt chunks, if any.
if text_list is not None:
for keyword, text_string in text_list:
_write_text(f, keyword, text_string)
if timestamp is not None:
_write_time(f, timestamp)
# sBIT must preceed PLTE (if present) and first IDAT chunk.
if sbit is not None:
_write_sbit(f, sbit)
if gamma is not None:
_write_gama(f, gamma)
# iCCP chunk, if `iccp` was given.
if iccp is not None:
_write_iccp(f, iccp)
# cHRM chunk, if `chromaticity` was given.
if chromaticity is not None:
_write_chrm(f, chromaticity)
# PLTE chunk, if requested.
if color_type == 3:
_write_plte(f, palette)
# tRNS chunk, if there is one.
if trans is not None:
_write_trns(f, trans)
# bKGD chunk, if there is one.
if background is not None:
_write_bkgd(f, background, color_type)
# pHYs chunk, if `phys` was given.
if phys is not None:
_write_phys(f, phys)
def write_png(fileobj, a, text_list=None, use_palette=False,
transparent=None, bitdepth=None, max_chunk_len=None,
timestamp=None, gamma=None, background=None,
filter_type=None, interlace=0, phys=None, iccp=None,
chromaticity=None, sbit=None):
"""
Write a numpy array to a PNG file.
Parameters
----------
fileobj : string or file object
If fileobj is a string, it is the name of the PNG file to be created.
Otherwise fileobj must be a file opened for writing.
a : numpy array
Must be an array of 8- or 16-bit unsigned integers. The shape of `a`
must be (m, n) or (m, n, d) with 1 <= d <= 4.
text_list : list of (keyword, text) tuples, optional
Each tuple is written to the file as a 'tEXt' chunk. See the Notes
for more information about text in PNG files.
use_palette : bool, optional
If True, *and* the data type of `a` is `numpy.uint8`, *and* the size
of `a` is (m, n, 3), then a PLTE chunk is created and an indexed color
image is created. (If the conditions on `a` are not true, this
argument is ignored and a palette is not created.) There must not be
more than 2**bitdepth distinct colors in `a`. If the conditions on `a`
are true but the array has more than 256 colors, a ValueError exception
is raised.
transparent : integer or 3-tuple of integers (r, g, b), optional
If the colors in `a` do not include an alpha channel (i.e. the shape
of `a` is (m, n), (m, n, 1) or (m, n, 3)), the `transparent` argument
can be used to specify a single color that is to be considered the
transparent color. This argument is ignored if `a` includes an
alpha channel, or if `use_palette` is True and the `transparent`
color is not in `a`. Otherwise, a 'tRNS' chunk is included in the
PNG file.
bitdepth : integer, optional
Bit depth of the output image. Valid values are 1, 2, 4 and 8.
Only valid for grayscale images with no alpha channel with an input
array having dtype numpy.uint8. If not given, the bit depth is
inferred from the data type of the input array `a`.
max_chunk_len : integer, optional
The data in a PNG file is stored in records called IDAT chunks.
`max_chunk_len` sets the maximum number of data bytes to stored in
each IDAT chunk. The default is None, which means that all the data
is written to a single IDAT chunk.
timestamp : tuple with length 6, optional
If this argument is not None, a 'tIME' chunk is included in the
PNG file. The value must be a tuple of six integers: (year, month,
day, hour, minute, second).
gamma : float, optional
If this argument is not None, a 'gAMA' chunk is included in the
PNG file. The argument is expected to be a floating point value.
The value written in the 'gAMA' chunk is int(gamma*100000 + 0.5).
background : int (for grayscale) or sequence of three ints (for RGB)
Set the default background color. When this option is used, a
'bKGD' chunk is included in the PNG file. When `use_palette`
is True, and `background` is not one of the colors in `a`, the
`background` color is included in the palette, and so it counts
towards the maximum number of 256 colors allowed in a palette.
filter_type : one of 0, 1, 2, 3, 4, "heuristic" or "auto", optional
Controls the filter type that is used per scanline in the IDAT
chunks. The default is "auto", which means the output data is
generated six time, once for each of the other possible filter
types, and the filter that generates the smallest output is used.
interlace : either 0 or 1
Interlace method to use. 0 means no interlace; 1 means Adam7.
phys : tuple with length 2 or 3, optional
If given, a `pHYs` chunk is written to the PNG file.
If `phys` is given, it must be a tuple of integers with length 2
or 3. The first two integers are the pixels per unit of the X
and Y axes, respectively. The third value, if given, must be 0
or 1. If the value is 1, the units of the first two values are
pixels per *meter*. If the third value is 0 (or not given),
the units of the first two values are undefined. In that case,
the values define the pixel aspect ratio only.
iccp : tuple with length 2, optional
ICCP color profile. If given, the argument must be a tuple of length 2.
The first element must be a string containing the profile name. The
profile name is subject to the same restrictions as the keywords in the
text_list argument; see the Notes for more information about these
restrictions. The second element is the profile data, and it must be a
bytes object. This data is not validated. It is written "as is" to
the PNG file.
chromaticity : array-like, optional
The four chromaticity values: white point, red, green and blue.
If given, the value must be a sequence of length four containing pairs
(x, y) of chromaticity values. The values must be floating point
in the interval [0, 1].
sbit : sequence of 1, 2, 3, or 4 integers, optional
If given, the value(s) are written in an `sBIT` chunk in the PNG
file. The values indicate the original number of significant bits
in each color (and alpha, if applicable) channel. The values must be
compatible with the color type and bit depth of the image data.
Notes
-----
If `a` is three dimensional (i.e. `a.ndim == 3`), the size of the last
dimension determines how the values in the last dimension are interpreted,
as follows:
a.shape[2] Interpretation
---------- --------------------
1 grayscale
2 grayscale and alpha
3 RGB
4 RGB and alpha
The `text_list` argument accepts a list of tuples of two strings argument.
The first item in each tuple is the *keyword*, and the second is the text
string. This argument allows `'tEXt'` chunks to be created. The
following is from the PNG specification::
The keyword indicates the type of information represented by the
text string. The following keywords are predefined and should be
used where appropriate:
Title Short (one line) title or caption for image
Author Name of image's creator
Description Description of image (possibly long)
Copyright Copyright notice
Creation Time Time of original image creation
Software Software used to create the image
Disclaimer Legal disclaimer
Warning Warning of nature of content
Source Device used to create the image
Comment Miscellaneous comment; conversion from GIF comment
Both keyword and text are interpreted according to the ISO 8859-1
(Latin-1) character set [ISO-8859]. The text string can contain any
Latin-1 character. Newlines in the text string should be represented
by a single linefeed character (decimal 10); use of other control
characters in the text is discouraged.
Keywords must contain only printable Latin-1 characters and spaces;
that is, only character codes 32-126 and 161-255 decimal are allowed.
To reduce the chances for human misreading of a keyword, leading and
trailing spaces are forbidden, as are consecutive spaces. Note also
that the non-breaking space (code 160) is not permitted in keywords,
since it is visually indistinguishable from an ordinary space.
"""
filter_type, phys, text_list, timestamp, chromaticity = _common_validation(
interlace, filter_type, phys, text_list, timestamp, chromaticity
)
a = _np.ascontiguousarray(a)
_validate_array(a)
color_type = _get_color_type(a, use_palette)
palette = None
trans = None
if color_type == 3:
# The array is 8 bit RGB or RGBA, and a palette is to be created.
# Note that this replaces `a` with the index array.
a, palette, trans = _palettize(a)
# `a` has the same shape as before, but now it is an array of indices
# into the array `palette`, which contains the colors. `trans` is
# either None (if there was no alpha channel), or an array the same
# length as `palette` containing the alpha values of the colors.
bd = bitdepth if bitdepth is not None else 8
max_num_colors = 2**bd
if len(palette) > max_num_colors:
raise ValueError("The array has %i colors. With bit depth %i, "
"no more than %i colors are allowed when using "
"a palette." %
(len(palette), bd, max_num_colors))
if background is not None:
# A default background color has been given, and we're creating
# an indexed palette (use_palette is True). Convert the given
# background color to an index. If the color is not in the
# palette, extend the palette with the new color (or raise an
# error if there are already 2**bitdepth colors).
background, palette, trans = _add_background_color(background,
palette, trans,
bitdepth)
if trans is None and transparent is not None:
# The array does not have an alpha channel. The caller has given
# a color value that should be considered to be transparent.
# We construct an array `trans` of alpha values, and set the
# alpha of the color that is to be transparent to 0. All other
# alpha values are set to 255 (fully opaque).
# `trans` only has entries for colors in the palette up to the
# given `transparent` color, so `trans` is not the same length as
# `palette` (unless the transparent color happens to be the last
# color in the palette).
pal_index = _np.nonzero((palette == transparent).all(axis=1))[0]
if pal_index.size > 0:
if pal_index.size > 1:
raise ValueError("Only one transparent color may "
"be given.")
trans = _np.zeros(pal_index[0]+1, dtype=_np.uint8)
trans[:-1] = 255
elif (color_type == 0 or color_type == 2) and transparent is not None:
# XXX Should do some validation of `transparent`...
trans = _np.asarray(transparent, dtype=_np.uint16)
bitdepth = _validate_bitdepth(bitdepth, a, color_type)
# Now that we have the color_type and bit-depth, we can validate the
# sbit argument, if one was given.
if sbit is not None:
sbit = _validate_sbit(sbit, color_type, bitdepth)
if hasattr(fileobj, 'write'):
# Assume it is a file-like object with a write method.
f = fileobj
else:
# Assume it is a filename.
f = open(fileobj, "wb")
_write_header_and_meta(f, a.dtype, a.shape, color_type, bitdepth, palette,
interlace, text_list, timestamp, sbit, gamma, iccp,
chromaticity, trans, background, phys)
# _write_data(...) writes the IDAT chunk(s).
_write_data(f, a, bitdepth, max_chunk_len=max_chunk_len,
filter_type=filter_type, interlace=interlace)
# IEND chunk
_write_iend(f)
if f != fileobj:
f.close()
def _msec_to_numden(delay):
"""
delay is the time delay in milliseconds.
Return value is the tuple (delay_num, delay_den) representing
the delay in seconds as the fraction delay_num/delay_den.
Each value in the tuple is an integer less than 65536.
"""
if delay == 0:
return (0, 1)
# Convert delay to seconds.
delay_sec = delay/1000.0
if delay_sec > 1:
f = _Fraction.from_float(1.0/delay_sec).limit_denominator(65535)
num = f.denominator
den = f.numerator
else:
f = _Fraction.from_float(delay_sec).limit_denominator(65535)
num = f.numerator
den = f.denominator
if (num, den) == (1, 0):
raise ValueError("delay=%r is too large to convert to "
"delay_num/delay_den" % (delay,))
if (num, den) == (0, 1):
raise ValueError("delay=%r is too small to convert to "
"delay_num/delay_den" % (delay,))
return num, den
def write_apng(fileobj, seq, delay=None, num_plays=0, default_image=None,
offset=None,
text_list=None, use_palette=False,
transparent=None, bitdepth=None,
max_chunk_len=None, timestamp=None, gamma=None,
background=None, filter_type=None, interlace=0, phys=None,
iccp=None, chromaticity=None, sbit=None):
"""
Write an APNG file from a sequence of numpy arrays.
Warning:
* This API is experimental, and will likely change.
* The function has not been thoroughly tested.
Parameters
----------
seq : sequence of numpy arrays
All the arrays must have the same shape and dtype.
delay : scalar or sequence of scalars, optional
The time duration that each frame is displayed, in milliseconds.
If `delay` is None (the default) or 0, the frames are played as
fast as possible. If delay is an integer, each frame is displayed
for the same duration. If `delay` is a sequence, it must have the
same length as `seq`.
num_plays : int
The number of times to repeat the animation. If 0, the animation
is repeated indefinitely.
default_image : numpy array
If this image is given, it is the image that is displayed by renderers
that do not support animated PNG files. If the renderer does support
animation, this image is not shown. If this argument is not given,
the image shown by renderers that do not support animation will be
`seq[0]`.
offset : sequence of tuples each with length 2, optional
If given, this must be a sequence of the form
[(row_offset0, col_offset0), (row_offset1, col_offset1), ...]
The length of the sequence must be the same as `seq`. It defines
the location of the image within the PNG output buffer.
text_list : list of (keyword, text) tuples, optional
Each tuple is written to the file as a 'tEXt' chunk.
use_palette : bool, optional
If True, *and* the data type of the arrays in `seq` is `numpy.uint8`,
*and* the size of each array is (m, n, 3), then a PLTE chunk is
created and an indexed color image is created. (If the conditions
on the arrays are not true, this argument is ignored and a palette
is not created.) There must not be more than 256 distinct colors in
the arrays. If the above conditions are true but the arrays have
more than 256 colors, a ValueError exception is raised.
transparent : integer or 3-tuple of integers (r, g, b), optional
If the colors in the input arrays do not include an alpha channel
(i.e. the shape of each array is (m, n), (m, n, 1) or (m, n, 3)),
the `transparent` argument can be used to specify a single color that
is to be considered the transparent color. This argument is ignored
if the arrays have an alpha channel.
bitdepth : integer, optional
Bit depth of the output image. Valid values are 1, 2, 4 and 8.
Only valid for grayscale images with no alpha channel with an input
array having dtype numpy.uint8. If not given, the bit depth is
inferred from the data type of the input arrays.
max_chunk_len : integer, optional
The data in a APNG file is stored in records called IDAT and fdAT
chunks. `max_chunk_len` sets the maximum number of data bytes to
stored in each chunk. The default is None, which means that all the
data from a frame is written to a single IDAT or fdAT chunk.
timestamp : tuple with length 6, optional
If this argument is not None, a 'tIME' chunk is included in the
PNG file. The value must be a tuple of six integers: (year, month,
day, hour, minute, second).
gamma : float, optional
If this argument is not None, a 'gAMA' chunk is included in the
PNG file. The argument is expected to be a floating point value.
The value written in the 'gAMA' chunk is int(gamma*100000 + 0.5).
background : int (for grayscale) or sequence of three ints (for RGB)
Set the default background color. When this option is used, a
'bKGD' chunk is included in the PNG file. When `use_palette`
is True, and `background` is not one of the colors in `a`, the
`background` color is included in the palette, and so it counts
towards the maximum number of 256 colors allowed in a palette.
filter_type : one of 0, 1, 2, 3, 4, "heuristic" or "auto", optional
Controls the filter type that is used per scanline in the IDAT
chunks. The default is "auto", which means the output data for
each frame is generated six time, once for each of the other
possible filter types, and the filter that generates the smallest
output is used.
interlace : either 0 or 1
Interlace method to use. 0 means no interlace; 1 means Adam7.
phys : tuple with length 2 or 3, optional
If given, a `pHYs` chunk is written to the PNG file.
If `phys` is given, it must be a tuple of integers with length 2
or 3. The first two integers are the pixels per unit of the X
and Y axes, respectively. The third value, if given, must be 0
or 1. If the value is 1, the units of the first two values are
pixels per *meter*. If the third value is 0 (or not given),
the units of the first two values are undefined. In that case,
the values define the pixel aspect ratio only.
iccp : tuple with length 2, optional
ICCP color profile. If given, the argument must be a tuple of length 2.
The first element must be a string containing the profile name. The
profile name is subject to the same restrictions as the keywords in the
text_list argument; see the Notes for more information about these
restrictions. The second element is the profile data, and it must be a
bytes object. This data is not validated. It is written "as is" to
the PNG file.
chromaticity : array-like, optional
The four chromaticity values: white point, red, green and blue.
If given, the value must be a sequence of length four containing pairs
(x, y) of chromaticity values. The values must be floating point
in the interval [0, 1].
sbit : sequence of 1, 2, 3, or 4 integers, optional
If given, the value(s) are written in an `sBIT` chunk in the PNG
file. The values indicate the original number of significant bits
in each color (and alpha, if applicable) channel. The values must be
compatible with the color type and bit depth of the image data.
Notes
-----
See the `write_png` docstring for additional details about some
of the arguments.
"""
filter_type, phys, text_list, timestamp, chromaticity = _common_validation(
interlace, filter_type, phys, text_list, timestamp, chromaticity
)
num_frames = len(seq)
if num_frames == 0:
raise ValueError("no frames given in `seq`")
if delay is None:
delay = [0] * num_frames
else:
try:
len(delay)
except TypeError:
delay = [delay] * num_frames
if len(delay) != num_frames:
raise ValueError('len(delay) must be the same as len(seq)')
# Validate seq
if type(seq) == _np.ndarray:
# seq is a single numpy array containing the frames.
seq = _np.ascontiguousarray(seq)
_validate_array(seq[0])
else:
# seq is not a numpy array, so it must be a sequence of numpy arrays,
# all with the same dtype.
for a in seq:
_validate_array(a)
if any(a.dtype != seq[0].dtype for a in seq[1:]):
raise ValueError("all arrays in `seq` must have the same dtype.")
seq = [_np.ascontiguousarray(a) for a in seq]
if offset is not None:
if len(offset) != len(seq):
raise ValueError('length of offset sequence must equal len(seq)')
else:
offset = [(0, 0)] * num_frames
# Overall width and height.
width = max(a.shape[1] + offset[k][1] for k, a in enumerate(seq))
height = max(a.shape[0] + offset[k][0] for k, a in enumerate(seq))
# Validate default_image
if default_image is not None:
_validate_array(default_image)
if default_image.dtype != seq[0].dtype:
raise ValueError('default_image must have the same data type as '
'the arrays in seq')
if default_image.shape[0] > height or default_image.shape[1] > width:
raise ValueError("The default image has shape (%i, %i), which "
"exceeds the overall image size implied by `seq` "
"and `offset`, which is (%i, %i)" %
(default_image.shape[:2] + (height, width)))
color_type = _get_color_type(seq[0], use_palette)
palette = None
trans = None
if color_type == 3:
# The arrays are 8 bit RGB or RGBA, and a palette is to be created.
if default_image is None:
seq, palette, trans = _palettize_seq(seq)
else:
tmp = [default_image] + [a for a in seq]
index_tmp, palette, trans = _palettize_seq(tmp)
default_image = index_tmp[0]
seq = index_tmp[1:]
# seq and default_image have the same shapes as before, but now the
# the arrays hold indices into the array `palette`, which contains
# the colors. `trans` is either None (if there was no alpha channel),
# or an array containing the alpha values of the colors.
bd = bitdepth if bitdepth is not None else 8
max_num_colors = 2**bd
if len(palette) > max_num_colors:
raise ValueError("The arrays have a total of %i colors. "
"With bit depth %i, no more than %i colors are "
"allowed when using a palette." %
(len(palette), bd, max_num_colors))
if background is not None:
# A default background color has been given, and we're creating
# an indexed palette (use_palette is True). Convert the given
# background color to an index. If the color is not in the
# palette, extend the palette with the new color (or raise an
# error if there are already 256 colors).
background, palette, trans = _add_background_color(background,
palette, trans,
bitdepth)
if trans is None and transparent is not None:
# The array does not have an alpha channel. The caller has given
# a color value that should be considered to be transparent.
pal_index = _np.nonzero((palette == transparent).all(axis=1))[0]
if pal_index.size > 0:
if pal_index.size > 1:
raise ValueError("Only one transparent color may "
"be given.")
trans = _np.zeros(pal_index[0]+1, dtype=_np.uint8)
trans[:-1] = 255
elif (color_type == 0 or color_type == 2) and transparent is not None:
# XXX Should do some validation of `transparent`...
trans = _np.asarray(transparent, dtype=_np.uint16)
if bitdepth == 8 and seq[0].dtype == _np.uint8:
bitdepth = None
bitdepth = _validate_bitdepth(bitdepth, seq[0], color_type)
# Now that we have the color_type and bit-depth, we can validate the
# sbit argument, if one was given.
if sbit is not None:
sbit = _validate_sbit(sbit, color_type, bitdepth)
# --- Open and write the file ---------
if hasattr(fileobj, 'write'):
# Assume it is a file-like object with a write method.
f = fileobj
else:
# Assume it is a filename.
f = open(fileobj, "wb")
_write_header_and_meta(f, seq[0].dtype, (height, width), color_type,
bitdepth, palette, interlace, text_list, timestamp,
sbit, gamma, iccp, chromaticity, trans, background,
phys)
# acTL chunk
_write_actl(f, num_frames, num_plays)
# Convert delays (which are in milliseconds) to the number of
# seconds expressed as the fraction delay_num/delay_den.
delay_num, delay_den = zip(*[_msec_to_numden(d) for d in delay])
sequence_number = 0
frame_number = 0
if default_image is None:
# fcTL chunk for the first frame
_write_fctl(f, sequence_number=sequence_number,
width=seq[0].shape[1], height=seq[0].shape[0],
x_offset=0, y_offset=0,
delay_num=delay_num[frame_number],
delay_den=delay_den[frame_number],
dispose_op=0, blend_op=0)
sequence_number += 1
frame_number += 1
# IDAT chunk(s) for the first frame (no sequence_number)
_write_data(f, seq[0], bitdepth, max_chunk_len=max_chunk_len,
filter_type=filter_type, interlace=interlace)
seq = seq[1:]
else:
# IDAT chunk(s) for the default image
_write_data(f, default_image, bitdepth, max_chunk_len=max_chunk_len,
filter_type=filter_type, interlace=interlace)
for frame in seq:
# fcTL chunk for the next frame
_write_fctl(f, sequence_number=sequence_number,
width=frame.shape[1], height=frame.shape[0],
x_offset=offset[frame_number][1],
y_offset=offset[frame_number][0],
delay_num=delay_num[frame_number],
delay_den=delay_den[frame_number],
dispose_op=0, blend_op=0)
sequence_number += 1
frame_number += 1
# fdAT chunk(s) for the next frame
num_chunks = _write_data(f, frame, bitdepth,
max_chunk_len=max_chunk_len,
sequence_number=sequence_number,
filter_type=filter_type, interlace=interlace)
sequence_number += num_chunks
# IEND chunk
_write_iend(f)
if f != fileobj:
f.close()
def _finddiff(img1, img2):
"""
Finds the bounds of the region where img1 and img2 differ.
img1 and img2 must be 2D or 3D numpy arrays with the same shape.
"""
if img1.shape != img2.shape:
raise ValueError('img1 and img2 must have the same shape')
if img1.ndim == 2:
mask = img1 != img2
else:
mask = _np.any(img1 != img2, axis=-1)
if _np.any(mask):
rows, cols = _np.where(mask)
row_range = rows.min(), rows.max() + 1
col_range = cols.min(), cols.max() + 1
else:
row_range = None
col_range = None
return row_range, col_range
class AnimatedPNGWriter(object):
"""
This class implements the interface required by the matplotlib class
`matplotlib.animation.MovieWriter`. An instance of this class may be
used as the `writer` argument of `matplotlib.animation.Animation.save()`.
This class is experimental. It may change without warning in the next
release.
"""
# I haven't tried to determine all the additional arguments that
# should be given in __init__, and I haven't checked what should
# be pulled from rcParams if a corresponding argument is not given.
def __init__(self, fps, filter_type=None):
self.fps = fps
# Convert frames-per-second to delay between frames in milliseconds.
self._delay = 1000/fps
self._filter_type = filter_type
def setup(self, fig, outfile, dpi, *args):
self.fig = fig
self.outfile = outfile
self.dpi = dpi
self._frames = []
self._prev_frame = None
def grab_frame(self, **savefig_kwargs):
img_io = _BytesIO()
self.fig.savefig(img_io, format='rgba', dpi=self.dpi, **savefig_kwargs)
raw = img_io.getvalue()
# A bit of experimentation suggested that taking the integer part of
# the following products is the correct conversion, but I haven't
# verified it in the matplotlib code. If this is not the correct
# conversion, the call of the reshape method after calling frombuffer
# will likely raise an exception.
height = int(self.fig.get_figheight() * self.dpi)
width = int(self.fig.get_figwidth() * self.dpi)
a = _np.frombuffer(raw, dtype=_np.uint8).reshape(height, width, 4)
if self._prev_frame is None:
self._frames.append((a, (0, 0), self._delay))
else:
rows, cols = _finddiff(a, self._prev_frame)
if rows is None:
# No difference, so just increment the delay of the previous
# frame.
img, offset, delay = self._frames[-1]
self._frames[-1] = (img, offset, delay + self._delay)
else:
# b is the rectangular region that contains the part
# of the image that changed.
b = a[rows[0]:rows[1], cols[0]:cols[1]]
offset = (rows[0], cols[0])
self._frames.append((b, offset, self._delay))
self._prev_frame = a
def finish(self):
for img, offset, delay in self._frames:
if not _np.all(img[:, :, 3] == 255):
break
else:
# All the alpha values are 255, so drop the alpha channel.
self._frames = [(img[:, :, :3], offset, delay)
for img, offset, delay in self._frames]
imgs, offsets, delays = zip(*self._frames)
write_apng(self.outfile, imgs, offset=offsets, delay=delays,
filter_type=self._filter_type)
@_contextlib.contextmanager
def saving(self, fig, outfile, dpi, *args):
"""
Context manager to facilitate writing the movie file.
All arguments are passed to `setup()`.
"""
self.setup(fig, outfile, dpi, *args)
yield
self.finish()
| bsd-2-clause |
beiko-lab/gengis | bin/Lib/site-packages/matplotlib/tri/trirefine.py | 4 | 14296 | """
Mesh refinement for triangular grids.
"""
from __future__ import print_function
import numpy as np
from matplotlib.tri.triangulation import Triangulation
import matplotlib.tri.triinterpolate
class TriRefiner(object):
"""
Abstract base class for classes implementing mesh refinement.
A TriRefiner encapsulates a Triangulation object and provides tools for
mesh refinement and interpolation.
Derived classes must implements:
- ``refine_triangulation(return_tri_index=False, **kwargs)`` , where
the optional keyword arguments *kwargs* are defined in each
TriRefiner concrete implementation, and which returns :
- a refined triangulation
- optionally (depending on *return_tri_index*), for each
point of the refined triangulation: the index of
the initial triangulation triangle to which it belongs.
- ``refine_field(z, triinterpolator=None, **kwargs)`` , where:
- *z* array of field values (to refine) defined at the base
triangulation nodes
- *triinterpolator* is a
:class:`~matplotlib.tri.TriInterpolator` (optional)
- the other optional keyword arguments *kwargs* are defined in
each TriRefiner concrete implementation
and which returns (as a tuple) a refined triangular mesh and the
interpolated values of the field at the refined triangulation nodes.
"""
def __init__(self, triangulation):
if not isinstance(triangulation, Triangulation):
raise ValueError("Expected a Triangulation object")
self._triangulation = triangulation
class UniformTriRefiner(TriRefiner):
"""
Uniform mesh refinement by recursive subdivisions.
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation`
The encapsulated triangulation (to be refined)
"""
# See Also
# --------
# :class:`~matplotlib.tri.CubicTriInterpolator` and
# :class:`~matplotlib.tri.TriAnalyzer`.
# """
def __init__(self, triangulation):
TriRefiner.__init__(self, triangulation)
def refine_triangulation(self, return_tri_index=False, subdiv=3):
"""
Computes an uniformly refined triangulation *refi_triangulation* of
the encapsulated :attr:`triangulation`.
This function refines the encapsulated triangulation by splitting each
father triangle into 4 child sub-triangles built on the edges midside
nodes, recursively (level of recursion *subdiv*).
In the end, each triangle is hence divided into ``4**subdiv``
child triangles.
The default value for *subdiv* is 3 resulting in 64 refined
subtriangles for each triangle of the initial triangulation.
Parameters
----------
return_tri_index : boolean, optional
Boolean indicating whether an index table indicating the father
triangle index of each point will be returned. Default value
False.
subdiv : integer, optional
Recursion level for the subdivision. Defaults value 3.
Each triangle will be divided into ``4**subdiv`` child triangles.
Returns
-------
refi_triangulation : :class:`~matplotlib.tri.Triangulation`
The returned refined triangulation
found_index : array-like of integers
Index of the initial triangulation containing triangle, for each
point of *refi_triangulation*.
Returned only if *return_tri_index* is set to True.
"""
refi_triangulation = self._triangulation
ntri = refi_triangulation.triangles.shape[0]
# Computes the triangulation ancestors numbers in the reference
# triangulation.
ancestors = np.arange(ntri, dtype=np.int32)
for _ in range(subdiv):
refi_triangulation, ancestors = self._refine_triangulation_once(
refi_triangulation, ancestors)
refi_npts = refi_triangulation.x.shape[0]
refi_triangles = refi_triangulation.triangles
# Now we compute found_index table if needed
if return_tri_index:
# We have to initialize found_index with -1 because some nodes
# may very well belong to no triangle at all, e.g., in case of
# Delaunay Triangulation with DuplicatePointWarning.
found_index = - np.ones(refi_npts, dtype=np.int32)
tri_mask = self._triangulation.mask
if tri_mask is None:
found_index[refi_triangles] = np.repeat(ancestors, 3)
else:
# There is a subtlety here: we want to avoid whenever possible
# that refined points container is a masked triangle (which
# would result in artifacts in plots).
# So we impose the numbering from masked ancestors first,
# then overwrite it with unmasked ancestor numbers.
ancestor_mask = tri_mask[ancestors]
found_index[refi_triangles[ancestor_mask, :]
] = np.repeat(ancestors[ancestor_mask], 3)
found_index[refi_triangles[~ancestor_mask, :]
] = np.repeat(ancestors[~ancestor_mask], 3)
return refi_triangulation, found_index
else:
return refi_triangulation
def refine_field(self, z, triinterpolator=None, subdiv=3):
"""
Refines a field defined on the encapsulated triangulation.
Returns *refi_tri* (refined triangulation), *refi_z* (interpolated
values of the field at the node of the refined triangulation).
Parameters
----------
z : 1d-array-like of length ``n_points``
Values of the field to refine, defined at the nodes of the
encapsulated triangulation. (``n_points`` is the number of points
in the initial triangulation)
triinterpolator : :class:`~matplotlib.tri.TriInterpolator`, optional
Interpolator used for field interpolation. If not specified,
a :class:`~matplotlib.tri.CubicTriInterpolator` will
be used.
subdiv : integer, optional
Recursion level for the subdivision. Defaults to 3.
Each triangle will be divided into ``4**subdiv`` child triangles.
Returns
-------
refi_tri : :class:`~matplotlib.tri.Triangulation` object
The returned refined triangulation
refi_z : 1d array of length: *refi_tri* node count.
The returned interpolated field (at *refi_tri* nodes)
Examples
--------
The main application of this method is to plot high-quality
iso-contours on a coarse triangular grid (e.g., triangulation built
from relatively sparse test data):
.. plot:: mpl_examples/pylab_examples/tricontour_smooth_user.py
"""
if triinterpolator is None:
interp = matplotlib.tri.CubicTriInterpolator(
self._triangulation, z)
else:
if not isinstance(triinterpolator,
matplotlib.tri.TriInterpolator):
raise ValueError("Expected a TriInterpolator object")
interp = triinterpolator
refi_tri, found_index = self.refine_triangulation(
subdiv=subdiv, return_tri_index=True)
refi_z = interp._interpolate_multikeys(
refi_tri.x, refi_tri.y, tri_index=found_index)[0]
return refi_tri, refi_z
@staticmethod
def _refine_triangulation_once(triangulation, ancestors=None):
"""
This function refines a matplotlib.tri *triangulation* by splitting
each triangle into 4 child-masked_triangles built on the edges midside
nodes.
The masked triangles, if present, are also splitted but their children
returned masked.
If *ancestors* is not provided, returns only a new triangulation:
child_triangulation.
If the array-like key table *ancestor* is given, it shall be of shape
(ntri,) where ntri is the number of *triangulation* masked_triangles.
In this case, the function returns
(child_triangulation, child_ancestors)
child_ancestors is defined so that the 4 child masked_triangles share
the same index as their father: child_ancestors.shape = (4 * ntri,).
"""
x = triangulation.x
y = triangulation.y
# According to tri.triangulation doc:
# neighbors[i,j] is the triangle that is the neighbor
# to the edge from point index masked_triangles[i,j] to point
# index masked_triangles[i,(j+1)%3].
neighbors = triangulation.neighbors
triangles = triangulation.triangles
npts = np.shape(x)[0]
ntri = np.shape(triangles)[0]
if ancestors is not None:
ancestors = np.asarray(ancestors)
if np.shape(ancestors) != (ntri,):
raise ValueError(
"Incompatible shapes provide for triangulation"
".masked_triangles and ancestors: {0} and {1}".format(
np.shape(triangles), np.shape(ancestors)))
# Initiating tables refi_x and refi_y of the refined triangulation
# points
# hint: each apex is shared by 2 masked_triangles except the borders.
borders = np.sum(neighbors == -1)
added_pts = (3*ntri + borders) / 2
refi_npts = npts + added_pts
refi_x = np.zeros(refi_npts)
refi_y = np.zeros(refi_npts)
# First part of refi_x, refi_y is just the initial points
refi_x[:npts] = x
refi_y[:npts] = y
# Second part contains the edge midside nodes.
# Each edge belongs to 1 triangle (if border edge) or is shared by 2
# masked_triangles (interior edge).
# We first build 2 * ntri arrays of edge starting nodes (edge_elems,
# edge_apexes) ; we then extract only the masters to avoid overlaps.
# The so-called 'master' is the triangle with biggest index
# The 'slave' is the triangle with lower index
# (can be -1 if border edge)
# For slave and master we will identify the apex pointing to the edge
# start
edge_elems = np.ravel(np.vstack([np.arange(ntri, dtype=np.int32),
np.arange(ntri, dtype=np.int32),
np.arange(ntri, dtype=np.int32)]))
edge_apexes = np.ravel(np.vstack([np.zeros(ntri, dtype=np.int32),
np.ones(ntri, dtype=np.int32),
np.ones(ntri, dtype=np.int32)*2]))
edge_neighbors = neighbors[edge_elems, edge_apexes]
mask_masters = (edge_elems > edge_neighbors)
# Identifying the "masters" and adding to refi_x, refi_y vec
masters = edge_elems[mask_masters]
apex_masters = edge_apexes[mask_masters]
x_add = (x[triangles[masters, apex_masters]] +
x[triangles[masters, (apex_masters+1) % 3]]) * 0.5
y_add = (y[triangles[masters, apex_masters]] +
y[triangles[masters, (apex_masters+1) % 3]]) * 0.5
refi_x[npts:] = x_add
refi_y[npts:] = y_add
# Building the new masked_triangles ; each old masked_triangles hosts
# 4 new masked_triangles
# there are 6 pts to identify per 'old' triangle, 3 new_pt_corner and
# 3 new_pt_midside
new_pt_corner = triangles
# What is the index in refi_x, refi_y of point at middle of apex iapex
# of elem ielem ?
# If ielem is the apex master: simple count, given the way refi_x was
# built.
# If ielem is the apex slave: yet we do not know ; but we will soon
# using the neighbors table.
new_pt_midside = np.empty([ntri, 3], dtype=np.int32)
cum_sum = npts
for imid in range(3):
mask_st_loc = (imid == apex_masters)
n_masters_loc = np.sum(mask_st_loc)
elem_masters_loc = masters[mask_st_loc]
new_pt_midside[:, imid][elem_masters_loc] = np.arange(
n_masters_loc, dtype=np.int32) + cum_sum
cum_sum += n_masters_loc
# Now dealing with slave elems.
# for each slave element we identify the master and then the inode
# onces slave_masters is indentified, slave_masters_apex is such that:
# neighbors[slaves_masters, slave_masters_apex] == slaves
mask_slaves = np.logical_not(mask_masters)
slaves = edge_elems[mask_slaves]
slaves_masters = edge_neighbors[mask_slaves]
diff_table = np.abs(neighbors[slaves_masters, :] -
np.outer(slaves, np.ones(3, dtype=np.int32)))
slave_masters_apex = np.argmin(diff_table, axis=1)
slaves_apex = edge_apexes[mask_slaves]
new_pt_midside[slaves, slaves_apex] = new_pt_midside[
slaves_masters, slave_masters_apex]
# Builds the 4 child masked_triangles
child_triangles = np.empty([ntri*4, 3], dtype=np.int32)
child_triangles[0::4, :] = np.vstack([
new_pt_corner[:, 0], new_pt_midside[:, 0],
new_pt_midside[:, 2]]).T
child_triangles[1::4, :] = np.vstack([
new_pt_corner[:, 1], new_pt_midside[:, 1],
new_pt_midside[:, 0]]).T
child_triangles[2::4, :] = np.vstack([
new_pt_corner[:, 2], new_pt_midside[:, 2],
new_pt_midside[:, 1]]).T
child_triangles[3::4, :] = np.vstack([
new_pt_midside[:, 0], new_pt_midside[:, 1],
new_pt_midside[:, 2]]).T
child_triangulation = Triangulation(refi_x, refi_y, child_triangles)
# Builds the child mask
if triangulation.mask is not None:
child_triangulation.set_mask(np.repeat(triangulation.mask, 4))
if ancestors is None:
return child_triangulation
else:
return child_triangulation, np.repeat(ancestors, 4)
| gpl-3.0 |
strawlab/pyopy | pyopy/hctsa/hctsa_benchmark.py | 1 | 10860 | # coding=utf-8
"""Benchmarks and checks the HCTSA python bindings."""
from itertools import product, izip
import os.path as op
import random
import time
from glob import glob
from datetime import datetime
from socket import gethostname
from lockfile import LockFile
import pandas as pd
import numpy as np
from pyopy.base import PyopyEngines, EngineException
from pyopy.config import PYOPY_EXTERNAL_TOOLBOXES_DIR
from pyopy.hctsa.hctsa_bindings import HCTSAOperations
from pyopy.hctsa.hctsa_catalog import HCTSACatalog
from pyopy.hctsa.hctsa_data import hctsa_sine, hctsa_noise, hctsa_noisysinusoid
from pyopy.hctsa.hctsa_install import hctsa_prepare_engine
from pyopy.hctsa.hctsa_transformers import hctsa_prepare_input
from pyopy.misc import ensure_dir
# Where benchmark and check results will live
HCTSA_BENCHMARKS_DIR = op.join(PYOPY_EXTERNAL_TOOLBOXES_DIR, 'hctsa_benchmarks')
# Operations that can potentially hang the system - which BTW should be properlly fixed
HCTSA_FORBIDDEN_OPERATIONS = {
'Oct2PyEngine': (('HCTSA_MF_ARMA_orders', 'Enters Oct2Py interact mode'),
('HCTSA_SY_DriftingMean', 'Has a bug (l is not defined) and enters Oct2Py interact mode'),
('HCTSA_TSTL_predict', 'Takes too long?',)),
}
# Time series
TS_FACTORIES = {
'sine': hctsa_sine,
'noise': hctsa_noise,
'noisysinusoid': hctsa_noisysinusoid
}
def check_benchmark_bindings(x,
xname,
engine='matlab',
n_jobs=None,
transferinfo='ramdisk',
extra=None,
operations=None,
forbidden=None,
dest_file=None,
random_order=True):
# Setup the engine
engine = PyopyEngines.engine_or_matlab_or_octave(engine)
hctsa_prepare_engine(engine)
# Setup the input for hctsa
size = len(x)
y = engine.put('y', hctsa_prepare_input(x, z_scored=True))
x = engine.put('x', hctsa_prepare_input(x, z_scored=False))
# Operations
if operations is None:
operations = HCTSAOperations.all()
# Some operations that make the entire experiment fail
if forbidden is None:
forbidden = dict(HCTSA_FORBIDDEN_OPERATIONS.get(engine.__class__.__name__, ()))
# The host
hostname = gethostname()
# Number of threads used in the engine
max_comp_threads = engine.max_comp_threads()
# tidy results
results = {
'host': [],
'engine': [],
'transplanter': [],
'transferinfo': [],
'date': [],
'extra': [],
'n_jobs': [],
'n_threads_matlab': [],
'xname': [],
'size': [],
'taken_s': [],
'operator': [],
'operation': [],
'output': [],
'value': [],
'error': []
}
#
# We want to check what is deterministic and what is not
# Newer versions of matlab are "deterministic" (always init the rng equally)
# So we will always get the same result if we use them in the same order...
# Let's just randomise the order of operations, useing clock-based seeded rng...
#
if random_order:
random.Random().shuffle(operations)
for opname, operation in operations:
print opname
start = time.time()
try:
if opname in forbidden:
raise EngineException(None, 'Forbidden operation')
result = operation.transform(y if HCTSACatalog.catalog().must_standardize(opname) else x, engine)
taken = time.time() - start
if not isinstance(result, dict):
result = {None: result}
for outname, fval in sorted(result.items()):
results['host'].append(hostname)
results['engine'].append(engine.__class__.__name__) # use whatami
results['transplanter'].append(engine.transplanter.__class__.__name__)
results['transferinfo'].append(transferinfo)
results['date'].append(datetime.now())
results['extra'].append(extra)
results['n_jobs'].append(n_jobs)
results['n_threads_matlab'].append(max_comp_threads)
results['xname'].append(xname)
results['size'].append(size)
results['taken_s'].append(taken)
results['operator'].append(operation.__class__.__name__)
results['operation'].append(opname)
results['output'].append(outname)
results['value'].append(fval)
results['error'].append(None)
except EngineException as engex:
taken = time.time() - start
results['host'].append(hostname)
results['engine'].append(engine.__class__.__name__) # use whatami
results['transplanter'].append(engine.transplanter.__class__.__name__)
results['transferinfo'].append(transferinfo)
results['date'].append(datetime.now())
results['extra'].append(extra)
results['n_jobs'].append(n_jobs)
results['n_threads_matlab'].append(max_comp_threads)
results['xname'].append(xname)
results['size'].append(size)
results['taken_s'].append(taken)
results['operator'].append(operation.__class__.__name__)
results['operation'].append(opname)
results['output'].append(np.nan)
results['value'].append(None)
results['error'].append(str(engex))
# Save the dataframe
df = pd.DataFrame(data=results)
if dest_file is None:
dest_file = op.join(HCTSA_BENCHMARKS_DIR, 'hctsa_checks_%s.pickle' % hostname)
ensure_dir(op.dirname(dest_file))
with LockFile(dest_file): # lame inefficient incrementality
if op.isfile(dest_file):
df = pd.concat((pd.read_pickle(dest_file), df))
df.to_pickle(dest_file)
def analyse():
# Load all the results
df = pd.concat(map(pd.read_pickle, glob(op.join(HCTSA_BENCHMARKS_DIR, '*.pickle'))))
# Reorder columns
columns = [u'host',
u'date',
u'engine',
u'transplanter',
u'transferinfo',
u'n_jobs',
u'n_threads_matlab',
u'extra',
u'xname',
u'operation',
u'operator',
u'output',
u'value',
u'size',
u'taken_s',
u'error']
df = df[columns]
# Make some stuff categorical
categoricals = ('host', 'engine', 'transplanter', 'transferinfo', 'extra', 'xname', 'operator', 'operation')
for categorical in categoricals:
df[categorical] = df[categorical].astype('category') # there must be something in pandas to do this at once
# One value was an empty list on one run, tisean routine, check (maybe concurrency?)
def is_float(val):
try:
float(val)
return True
except:
return False
float_values = df['value'].apply(is_float)
print '%d values were non-floats' % (~float_values).sum()
print df[~float_values]['operation']
df = df[float_values]
df = df.convert_objects(convert_numeric=True) # After removing these, value can be again converted to float
nodup = df.dropna(subset=['error'], axis=0).drop_duplicates(['operator', 'error']).sort('operation')
nodup.to_html(op.expanduser('~/dupes.html'))
print '\n'.join(nodup['operator'].unique())
operations_failing = map(lambda o: 'HCTSAOperations.%s[2],' % o, sorted(nodup['operation'].unique()))
print '\n'.join(operations_failing)
for opname, error in izip(nodup['operation'], nodup['error']):
print '-' * 80
print opname
print error
# Round to the 6th decimal
df['value'] = np.around(df['value'], decimals=6)
# Impact of running from pycharm
# Infinities, but not explicit errors (N.B. pandas isnull does not take into account infinities)
infinite = ~np.isfinite(df['value']) & ~np.isnan(df['value'])
# Errors (but not infinities)
nans = np.isnan(df['value'])
# Failed
failed = infinite | nans
# Features that are stochastic
catalog = HCTSACatalog.catalog()
def stochastic_failing(df, verbose=False, tooverbose=False):
for (xname, operation, output), oodf in df.groupby(['xname', 'operation', 'output']):
operator = oodf['operator'].iloc[0]
tagged_as_stochastic = catalog.operation(operation).has_tag('stochastic')
failing = 'OK' if (~np.isfinite(oodf['value'])).sum() == 0 else 'FAILING'
if oodf['value'].nunique() == 0:
print xname, operator, operation, output, failing, failing, failing, tagged_as_stochastic
elif oodf['value'].nunique() == 1:
if failing != 'OK' or verbose:
print xname, operator, operation, output, 'DETERMINISTIC', failing, tagged_as_stochastic
else:
print xname, operator, operation, output, 'RANDOMISED', failing, tagged_as_stochastic
if tooverbose:
for value, voodf in oodf.groupby('value'):
print '\t', value, map(str, voodf['host'].unique())
stochastic_failing(df)
FAILING_AFTER_CELL_NASTINESS = {
'Fails with sine': [HCTSAOperations.MF_GP_hyperparameters_covSEiso_covNoise_1_200_first[2]],
'Fails with noisysinusoid': [
HCTSAOperations.NL_TISEAN_d2_1_10_0[2],
HCTSAOperations.NL_TISEAN_d2_ac_10_001[2],
]
}
def test_one(operation, engine='matlab'):
engine = PyopyEngines.engine_or_matlab_or_octave(engine)
hctsa_prepare_engine(engine)
print operation.what().id()
for name, x in (('noise', hctsa_noise()),
('sine', hctsa_sine()),
('noisysinusoid', hctsa_noisysinusoid()),
('randn10000', np.random.RandomState(0).randn(10000))):
print '-' * 80
print name
x = engine.put('x', hctsa_prepare_input(x, z_scored=True))
try:
operation.transform(x, eng=engine)
except Exception as ex:
print ex
if __name__ == '__main__':
from joblib import Parallel, delayed
n_jobs = 4
Parallel(n_jobs=n_jobs)(delayed(check_benchmark_bindings)(x=xfact(),
xname=xname,
extra='pycharm',
n_jobs=n_jobs)
for (xname, xfact), _ in product(TS_FACTORIES.items(), range(4)))
| bsd-3-clause |
rknLA/sms-tools | lectures/09-Sound-description/plots-code/mfcc.py | 25 | 1103 | import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
mfcc = ess.MFCC(numberCoefficients = 12)
x = ess.MonoLoader(filename = '../../../sounds/speech-male.wav', sampleRate = fs)()
mfccs = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
mfcc_bands, mfcc_coeffs = mfcc(mX)
mfccs.append(mfcc_coeffs)
mfccs = np.array(mfccs)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (speech-male.wav)')
plt.subplot(2,1,2)
numFrames = int(mfccs[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.pcolormesh(frmTime, 1+np.arange(12), np.transpose(mfccs[:,1:]))
plt.ylabel('coefficients')
plt.title('MFCCs')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('mfcc.png')
plt.show()
| agpl-3.0 |
kubeflow/kfserving | docs/samples/explanation/alibi/imagenet/test_imagenet.py | 1 | 2521 | import argparse
import matplotlib.pyplot as plt
from tensorflow.keras.applications.inception_v3 import preprocess_input, decode_predictions
import numpy as np
import requests
import json
import os
from PIL import Image
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
PREDICT_TEMPLATE = 'http://{0}/v1/models/imagenet:predict'
EXPLAIN_TEMPLATE = 'http://{0}/v1/models/imagenet:explain'
def get_image_data():
data = []
image_shape = (299, 299, 3)
target_size = image_shape[:2]
image = Image.open("./cat-prediction.png").convert('RGB')
image = np.expand_dims(image.resize(target_size), axis=0)
data.append(image)
data = np.concatenate(data, axis=0)
return data
def predict(cluster_ip):
data = get_image_data()
images = preprocess_input(data)
payload = {
"instances": [images[0].tolist()]
}
# sending post request to TensorFlow Serving server
headers = {'Host': 'imagenet.default.example.com'}
url = PREDICT_TEMPLATE.format(cluster_ip)
print("Calling ", url)
r = requests.post(url, json=payload, headers=headers)
resp_json = json.loads(r.content.decode('utf-8'))
preds = np.array(resp_json["predictions"])
label = decode_predictions(preds, top=1)
plt.imshow(data[0])
plt.title(label[0])
plt.show()
def explain(cluster_ip):
data = get_image_data()
images = preprocess_input(data)
payload = {
"instances": [images[0].tolist()]
}
# sending post request to TensorFlow Serving server
headers = {'Host': 'imagenet.default.example.com'}
url = EXPLAIN_TEMPLATE.format(cluster_ip)
print("Calling ", url)
r = requests.post(url, json=payload, headers=headers)
if r.status_code == 200:
explanation = json.loads(r.content.decode('utf-8'))
f, axarr = plt.subplots(1, 2)
axarr[0].imshow(data[0])
axarr[1].imshow(explanation['data']['anchor'])
plt.show()
else:
print("Received response code and content", r.status_code, r.content)
parser = argparse.ArgumentParser()
parser.add_argument('--cluster_ip', default=os.environ.get("CLUSTER_IP"), help='Cluster IP of Istio Ingress Gateway')
parser.add_argument('--op', choices=["predict", "explain"], default="predict",
help='Operation to run')
args, _ = parser.parse_known_args()
if __name__ == "__main__":
if args.op == "predict":
predict(args.cluster_ip)
elif args.op == "explain":
explain(args.cluster_ip)
| apache-2.0 |
pianomania/scikit-learn | sklearn/ensemble/tests/test_iforest.py | 13 | 7616 | """
Testing for Isolation Forest algorithm (sklearn.ensemble.iforest).
"""
# Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.model_selection import ParameterGrid
from sklearn.ensemble import IsolationForest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris
from sklearn.utils import check_random_state
from sklearn.metrics import roc_auc_score
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_iforest():
"""Check Isolation Forest for various parameter settings."""
X_train = np.array([[0, 1], [1, 2]])
X_test = np.array([[2, 1], [1, 1]])
grid = ParameterGrid({"n_estimators": [3],
"max_samples": [0.5, 1.0, 3],
"bootstrap": [True, False]})
with ignore_warnings():
for params in grid:
IsolationForest(random_state=rng,
**params).fit(X_train).predict(X_test)
def test_iforest_sparse():
"""Check IForest for various parameter settings on sparse input."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"bootstrap": [True, False]})
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in grid:
# Trained on sparse format
sparse_classifier = IsolationForest(
n_estimators=10, random_state=1, **params).fit(X_train_sparse)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_classifier = IsolationForest(
n_estimators=10, random_state=1, **params).fit(X_train)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
assert_array_equal(sparse_results, dense_results)
def test_iforest_error():
"""Test that it gives proper exception on deficient input."""
X = iris.data
# Test max_samples
assert_raises(ValueError,
IsolationForest(max_samples=-1).fit, X)
assert_raises(ValueError,
IsolationForest(max_samples=0.0).fit, X)
assert_raises(ValueError,
IsolationForest(max_samples=2.0).fit, X)
# The dataset has less than 256 samples, explicitly setting
# max_samples > n_samples should result in a warning. If not set
# explicitly there should be no warning
assert_warns_message(UserWarning,
"max_samples will be set to n_samples for estimation",
IsolationForest(max_samples=1000).fit, X)
assert_no_warnings(IsolationForest(max_samples='auto').fit, X)
assert_no_warnings(IsolationForest(max_samples=np.int64(2)).fit, X)
assert_raises(ValueError, IsolationForest(max_samples='foobar').fit, X)
assert_raises(ValueError, IsolationForest(max_samples=1.5).fit, X)
def test_recalculate_max_depth():
"""Check max_depth recalculation when max_samples is reset to n_samples"""
X = iris.data
clf = IsolationForest().fit(X)
for est in clf.estimators_:
assert_equal(est.max_depth, int(np.ceil(np.log2(X.shape[0]))))
def test_max_samples_attribute():
X = iris.data
clf = IsolationForest().fit(X)
assert_equal(clf.max_samples_, X.shape[0])
clf = IsolationForest(max_samples=500)
assert_warns_message(UserWarning,
"max_samples will be set to n_samples for estimation",
clf.fit, X)
assert_equal(clf.max_samples_, X.shape[0])
clf = IsolationForest(max_samples=0.4).fit(X)
assert_equal(clf.max_samples_, 0.4*X.shape[0])
def test_iforest_parallel_regression():
"""Check parallel regression."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = IsolationForest(n_jobs=3,
random_state=0).fit(X_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = IsolationForest(n_jobs=1,
random_state=0).fit(X_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_iforest_performance():
"""Test Isolation Forest performs well"""
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
X_train = np.r_[X + 2, X - 2]
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng).fit(X_train)
# predict scores (the lower, the more normal)
y_pred = - clf.decision_function(X_test)
# check that there is at most 6 errors (false positive or false negative)
assert_greater(roc_auc_score(y_test, y_pred), 0.98)
def test_iforest_works():
# toy sample (the last two samples are outliers)
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [6, 3], [-4, 7]]
# Test LOF
clf = IsolationForest(random_state=rng, contamination=0.25)
clf.fit(X)
decision_func = - clf.decision_function(X)
pred = clf.predict(X)
# assert detect outliers:
assert_greater(np.min(decision_func[-2:]), np.max(decision_func[:-2]))
assert_array_equal(pred, 6 * [1] + 2 * [-1])
def test_max_samples_consistency():
# Make sure validated max_samples in iforest and BaseBagging are identical
X = iris.data
clf = IsolationForest().fit(X)
assert_equal(clf.max_samples_, clf._max_samples)
def test_iforest_subsampled_features():
# It tests non-regression for #5732 which failed at predict.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
clf = IsolationForest(max_features=0.8)
clf.fit(X_train, y_train)
clf.predict(X_test)
| bsd-3-clause |
wonderui/Hoop_Fantasy | nba_seer-0.1/nba_seer.py | 1 | 13329 | # import modules ----------------------
import nba_py
import nba_py.game
import nba_py.player
import nba_py.team
import pandas as pd
import numpy as np
import datetime
import pytz
old_settings = np.seterr(all='print')
np.geterr()
print('modules imported')
# define functions ----------------------
def get_games(date):
"""
:param date: datetime.date, the match day
:return: df, all the games on the given day
"""
return nba_py.Scoreboard(month=date.month,
day=date.day,
year=date.year,
league_id='00',
offset=0).game_header()[['GAME_ID', 'HOME_TEAM_ID', 'VISITOR_TEAM_ID']]
def get_players(games, all_players):
"""
:param games: df, some games
:param all_players: df, all players list of this season
:return: df, all players of the given games
"""
home_team_player = all_players[all_players['TEAM_ID'].isin(games['HOME_TEAM_ID'])][['PERSON_ID', 'TEAM_ID']]
home_team_player['Location'] = 'HOME'
away_team_player = all_players[all_players['TEAM_ID'].isin(games['VISITOR_TEAM_ID'])][['PERSON_ID', 'TEAM_ID']]
away_team_player['Location'] = 'AWAY'
players = pd.concat([home_team_player, away_team_player])
game_team = pd.concat([games[['HOME_TEAM_ID', 'GAME_ID']].rename(columns={'HOME_TEAM_ID': 'TEAM_ID'}),
games[['VISITOR_TEAM_ID', 'GAME_ID']].rename(columns={'VISITOR_TEAM_ID': 'TEAM_ID'})])
players = pd.merge(players, game_team, on='TEAM_ID')
team_team = pd.concat(
[games[['HOME_TEAM_ID', 'VISITOR_TEAM_ID']].rename(columns={'HOME_TEAM_ID': 'TEAM_ID',
'VISITOR_TEAM_ID': 'Against_Team_ID'}),
games[['VISITOR_TEAM_ID', 'HOME_TEAM_ID']].rename(columns={'VISITOR_TEAM_ID': 'TEAM_ID',
'HOME_TEAM_ID': 'Against_Team_ID'})])
players = pd.merge(players, team_team, on='TEAM_ID')
players = pd.merge(players, all_players[['PERSON_ID', 'DISPLAY_FIRST_LAST', 'TEAM_ABBREVIATION']], on='PERSON_ID')
return players
def get_players_p(games, game_stats_logs):
"""
:param games: df, some games
:param game_stats_logs: df, all previous game stats logs imported from sql
:return: df, all players of the given games at the match date
"""
players = pd.DataFrame()
for i in games.index:
players = players.append(game_stats_logs[(game_stats_logs['GAME_ID'] == games.iloc[i]['GAME_ID']) &
(game_stats_logs['TEAM_ID'] == games.iloc[i]['HOME_TEAM_ID'])])
players = players.append(game_stats_logs[(game_stats_logs['GAME_ID'] == games.iloc[i]['GAME_ID']) &
(game_stats_logs['TEAM_ID'] == games.iloc[i]['VISITOR_TEAM_ID'])])
players['Location'] = players.apply(lambda x: 'HOME' if x['TEAM_ID'] ==
int(games[games['GAME_ID'] == x['GAME_ID']]['HOME_TEAM_ID'])
else 'AWAY', axis=1)
team_team = pd.concat(
[games[['HOME_TEAM_ID', 'VISITOR_TEAM_ID']].rename(columns={'HOME_TEAM_ID': 'TEAM_ID',
'VISITOR_TEAM_ID': 'Against_Team_ID'}),
games[['VISITOR_TEAM_ID', 'HOME_TEAM_ID']].rename(columns={'VISITOR_TEAM_ID': 'TEAM_ID',
'HOME_TEAM_ID': 'Against_Team_ID'})])
return pd.merge(players, team_team,
on='TEAM_ID')[['PLAYER_ID', 'TEAM_ID', 'Location', 'GAME_ID',
'Against_Team_ID']].rename(columns={'PLAYER_ID': 'PERSON_ID'})
def get_last_n_game_logs(game_stats_logs, player_id, game_id, n):
"""
:param game_stats_logs: df, all previous game stats logs imported from sql
:param player_id: int, player id
:param game_id: str, game id
:param n: int, size of games
:return: df, the n game log of the player before the given game
"""
player_game_logs = game_stats_logs[game_stats_logs['PLAYER_ID'] == player_id]
last_n_game = player_game_logs[player_game_logs['GAME_ID_O'] < game_id].sort_values('GAME_ID_O').tail(n)
return last_n_game[['MINS', 'PTS', 'AST', 'OREB', 'DREB', 'STL', 'BLK', 'TO', 'FGM', 'FGA', 'FG3M']]
def get_score_36(game_logs):
"""
:param game_logs: df, game logs
:return: list, [0]the average fantasy score(int, 36mins) of the given game log, [1]the score cov(float) of the
given game log
"""
convert_to_36 = lambda x: x[['PTS', 'AST', 'OREB', 'DREB', 'STL', 'BLK',
'TO', 'FGM', 'FGA', 'FG3M']] * 36 / x['MINS']
stats = game_logs.apply(convert_to_36, axis=1)
stats['SCO'] = stats['PTS'] * 1 + stats['AST'] * 1.5 + stats['OREB'] * 1.2 + stats['DREB'] * 1.2 + \
stats['STL'] * 2 + stats['BLK'] * 2 + stats['TO'] * -1
stats['EFF'] = stats['SCO'] / 36
stats = stats[abs(stats['EFF']) <= 2.5]
return stats['SCO'].mean(), stats['SCO'].std() / stats['SCO'].mean()
def get_ma(game_stats_logs, row, n):
"""
:param game_stats_logs: df, all previous game stats logs imported from sql
:param row: pd.series, player id and game id
:param n: int, size of ma
:return: float, average fantasy score of the player in n games before the given game
"""
player_id = row['PERSON_ID']
game_id_o = row['GAME_ID'][3:5] + row['GAME_ID'][:3] + row['GAME_ID'][-5:]
ma_n = get_score_36(get_last_n_game_logs(game_stats_logs, player_id, game_id_o, n))[0]
return round(float(ma_n), 2)
def get_min(game_stats_logs, row, n):
"""
:param game_stats_logs: df, all previous game stats logs imported from sql
:param row: pd.series, player id and game id
:param n: int, size of ma
:return: float, average mins the player played in n games before the given game
"""
player_id = row['PERSON_ID']
game_id_o = row['GAME_ID'][3:5] + row['GAME_ID'][:3] + row['GAME_ID'][-5:]
min_n = get_last_n_game_logs(game_stats_logs, player_id, game_id_o, n)['MINS'].mean()
return round(float(min_n), 2)
def get_min_cov(game_stats_logs, row, n):
"""
:param game_stats_logs: df, all previous game stats logs imported from sql
:param row: pd.series, player id and game id
:param n: int, size of ma
:return: float, cov of mins the player played in n games before the given game
"""
player_id = row['PERSON_ID']
game_id_o = row['GAME_ID'][3:5] + row['GAME_ID'][:3] + row['GAME_ID'][-5:]
min_cov_n = get_last_n_game_logs(game_stats_logs,
player_id,
game_id_o,
n)['MINS'].std() / get_last_n_game_logs(game_stats_logs,
player_id,
game_id_o,
n)['MINS'].mean()
return round(float(min_cov_n), 3)
def get_sco_cov(game_stats_logs, row, n):
"""
:param game_stats_logs: df, all previous game stats logs imported from sql
:param row: pd.series, player id and game id
:param n: int, size of ma
:return: float, cov of scores the player get in n games before the given game
"""
player_id = row['PERSON_ID']
game_id_o = row['GAME_ID'][3:5] + row['GAME_ID'][:3] + row['GAME_ID'][-5:]
sco_cov_n = get_score_36(get_last_n_game_logs(game_stats_logs, player_id, game_id_o, n))[1]
return round(float(sco_cov_n), 3)
def last_n_games_days(game_stats_logs, row, n):
"""
:param game_stats_logs: df, all previous game stats logs imported from sql
:param row: pd.series, player id and game id
:param n: int, size of the spread of games
:return: int, the days last for last n games
"""
player_id = row['PERSON_ID']
game_id_o = row['GAME_ID'][3:5] + row['GAME_ID'][:3] + row['GAME_ID'][-5:]
player_stats_logs = game_stats_logs[game_stats_logs['PLAYER_ID'] == player_id]
ordered_logs = player_stats_logs.sort_values('GAME_ID_O')
player_5g = ordered_logs[(ordered_logs['GAME_ID_O'] < game_id_o) &
(ordered_logs['MINS'].notnull())].tail(n)
if len(player_5g) != 0:
min_d = datetime.datetime.strptime(player_5g['GAME_DATE_EST'].min()[:10], '%Y-%m-%d').date()
max_d = datetime.datetime.strptime(player_5g['GAME_DATE_EST'].max()[:10], '%Y-%m-%d').date()
return (max_d - min_d).days + 1
else:
return None
def days_rest(game_stats_logs, row):
"""
:param game_stats_logs: df, all previous game stats logs imported from sql
:param row: pd.series, player id and game id
:return: int, the days player have rest till this game
"""
player_id = row['PERSON_ID']
game_id_o = row['GAME_ID'][3:5] + row['GAME_ID'][:3] + row['GAME_ID'][-5:]
player_stats_logs = game_stats_logs[game_stats_logs['PLAYER_ID'] == player_id]
ordered_logs = player_stats_logs.sort_values('GAME_ID_O')
last_game = ordered_logs[(ordered_logs['GAME_ID_O'] < game_id_o) &
(ordered_logs['MINS'].notnull())].tail(1)
if len(last_game) != 0:
last_g_d = datetime.datetime.strptime(last_game['GAME_DATE_EST'].max()[:10], '%Y-%m-%d').date()
ustz = pytz.timezone('America/New_York')
us_time = datetime.datetime.now(ustz)
today = us_time.date()
return (today - last_g_d).days - 1
else:
return None
def location_aff(game_stats_logs, row):
"""
:param game_stats_logs: df, all previous game stats logs imported from sql
:param row: pd.series, player id and game id
:return: list, [0]home game affection, [1] away game affection
"""
player_id = row['PERSON_ID']
game_id_o = row['GAME_ID'][3:5] + row['GAME_ID'][:3] + row['GAME_ID'][-5:]
player_stats_logs = game_stats_logs[game_stats_logs['PLAYER_ID'] == player_id].sort_values('GAME_ID_O')
player_stats_home = player_stats_logs[(player_stats_logs['LOCATION'] == 'HOME') &
(player_stats_logs['MINS'].notnull()) &
(player_stats_logs['GAME_ID_O'] < game_id_o)].tail(20)
home_score_20 = get_score_36(player_stats_home)[0]
player_stats_away = player_stats_logs[(player_stats_logs['LOCATION'] == 'AWAY') &
(player_stats_logs['MINS'].notnull()) &
(player_stats_logs['GAME_ID_O'] < game_id_o)].tail(20)
away_score_20 = get_score_36(player_stats_away)[0]
player_stats_all = player_stats_logs[(player_stats_logs['MINS'].notnull()) &
(player_stats_logs['GAME_ID_O'] < game_id_o)].tail(40)
recent_score_40 = get_score_36(player_stats_all)[0]
return home_score_20 / recent_score_40, away_score_20 / recent_score_40
def get_exp_sco(players, game_stats_logs):
"""
:param players: df, players list
:param game_stats_logs: df, all previous game stats logs imported from sql
:return: df, all players with their expect fantasy score
"""
players['5_g_d'] = players.apply(lambda x: last_n_games_days(game_stats_logs, x, 5), axis=1)
print('5games days complete!')
players['d_rest'] = players.apply(lambda x: days_rest(game_stats_logs, x), axis=1)
print('days rest complete!')
players['MA_20'] = players.apply(lambda x: get_ma(game_stats_logs, x, 20), axis=1)
print('ma20 complete!')
players['MA_10'] = players.apply(lambda x: get_ma(game_stats_logs, x, 10), axis=1)
print('ma10 complete!')
players['MA_5'] = players.apply(lambda x: get_ma(game_stats_logs, x, 5), axis=1)
print('ma5 complete!')
players['MIN_20'] = players.apply(lambda x: get_min(game_stats_logs, x, 20), axis=1)
print('min20 complete!')
players['MIN_10'] = players.apply(lambda x: get_min(game_stats_logs, x, 10), axis=1)
print('min10 complete!')
players['MIN_5'] = players.apply(lambda x: get_min(game_stats_logs, x, 5), axis=1)
print('min5 complete!')
players['MIN_COV_20'] = players.apply(lambda x: get_min_cov(game_stats_logs, x, 20), axis=1)
print('min_cov_20 complete!')
players['SCO_COV_20'] = players.apply(lambda x: get_sco_cov(game_stats_logs, x, 20), axis=1)
print('sco_cov_20 complete!')
players = players[players['SCO_COV_20'] > 0].copy()
print('sco cov less than 0 droped!')
players['home_aff'] = players.apply(lambda x: location_aff(game_stats_logs, x)[0], axis=1)
players['away_aff'] = players.apply(lambda x: location_aff(game_stats_logs, x)[1], axis=1)
print('location affect complete!')
players['EXP_SCO'] = round(players[['MA_20', 'MA_10', 'MA_5']].mean(axis=1) *
players[['MIN_20', 'MIN_10', 'MIN_5']].mean(axis=1) / 36, 2)
players['EXP_SCO_L'] = players.apply(lambda x: x['EXP_SCO'] * x['home_aff'] if x['Location'] == 'HOME'
else x['EXP_SCO'] * x['away_aff'], axis=1)
print('all done!')
return players
print('functions defined')
| mit |
gallantlab/pycortex | cortex/dataset/views.py | 1 | 15278 | import json
import h5py
import numpy as np
from six import string_types
from .. import options
from .braindata import BrainData, VolumeData, VertexData
default_cmap = options.config.get("basic", "default_cmap")
def normalize(data):
if isinstance(data, tuple):
if len(data) == 3:
if data[0].dtype == np.uint8:
return VolumeRGB(data[0][...,0], data[0][...,1], data[0][...,2], *data[1:])
return Volume(*data)
elif len(data) == 2:
return Vertex(*data)
else:
raise TypeError("Invalid input for Dataview")
elif isinstance(data, Dataview):
return data
else:
raise TypeError("Invalid input for Dataview")
def _from_hdf_data(h5, name, xfmname=None, **kwargs):
"""Decodes a __hash named node from an HDF file into the
constituent Vertex or Volume object"""
dnode = h5.get("/data/%s"%name)
if dnode is None:
dnode = h5.get(name)
attrs = {k: u(v) for (k, v) in dnode.attrs.items()}
subj = attrs['subject']
#support old style xfmname saving as attribute
if xfmname is None and 'xfmname' in attrs:
xfmname = attrs['xfmname']
mask = None
if 'mask' in attrs:
if attrs['mask'].startswith("__"):
mask = h5['/subjects/%s/transforms/%s/masks/%s'%(attrs['subject'], xfmname, attrs['mask'])].value
else:
mask = attrs['mask']
#support old style RGB volumes
if dnode.dtype == np.uint8 and dnode.shape[-1] in (3, 4):
alpha = None
if dnode.shape[-1] == 4:
alpha = dnode[..., 3]
if xfmname is None:
return VertexRGB(dnode[...,0], dnode[...,1], dnode[...,2], subj,
alpha=alpha, **kwargs)
return VolumeRGB(dnode[...,0], dnode[...,1], dnode[...,2], subj, xfmname,
alpha=alpha, mask=mask, **kwargs)
if xfmname is None:
return Vertex(dnode, subj, **kwargs)
return Volume(dnode, subj, xfmname, mask=mask, **kwargs)
def _from_hdf_view(h5, data, xfmname=None, vmin=None, vmax=None, **kwargs):
if isinstance(data, string_types):
return _from_hdf_data(h5, data, xfmname=xfmname, vmin=vmin, vmax=vmax, **kwargs)
if len(data) == 2:
dim1 = _from_hdf_data(h5, data[0], xfmname=xfmname[0])
dim2 = _from_hdf_data(h5, data[1], xfmname=xfmname[1])
cls = Vertex2D if isinstance(dim1, Vertex) else Volume2D
return cls(dim1, dim2, vmin=vmin[0], vmin2=vmin[1],
vmax=vmax[0], vmax2=vmax[1], **kwargs)
elif len(data) == 4:
red, green, blue = [_from_hdf_data(h5, d, xfmname=xfmname) for d in data[:3]]
alpha = None
if data[3] is not None:
alpha = _from_hdf_data(h5, data[3], xfmname=xfmname)
cls = VertexRGB if isinstance(red, Vertex) else VolumeRGB
return cls(red, green, blue, alpha=alpha, **kwargs)
else:
raise ValueError("Invalid Dataview specification")
class Dataview(object):
def __init__(self, cmap=None, vmin=None, vmax=None, description="", state=None, **kwargs):
if self.__class__ == Dataview:
raise TypeError('Cannot directly instantiate Dataview objects')
self.cmap = cmap if cmap is not None else default_cmap
self.vmin = vmin
self.vmax = vmax
self.state = state
self.attrs = kwargs
if 'priority' not in self.attrs:
self.attrs['priority'] = 1
self.description = description
def copy(self, *args, **kwargs):
kwargs.update(self.attrs)
return self.__class__(*args,
cmap=self.cmap,
vmin=self.vmin,
vmax=self.vmax,
description=self.description,
state=self.state,
**kwargs)
@property
def priority(self):
return self.attrs['priority']
@priority.setter
def priority(self, value):
self.attrs['priority'] = value
def to_json(self, simple=False):
if simple:
return dict()
desc = self.description
if hasattr(desc, 'decode'):
desc = desc.decode()
sdict = dict(
state=self.state,
attrs=self.attrs.copy(),
desc=desc)
try:
sdict.update(dict(
cmap=[self.cmap],
vmin=[self.vmin if self.vmin is not None else np.percentile(np.nan_to_num(self.data), 1)],
vmax=[self.vmax if self.vmax is not None else np.percentile(np.nan_to_num(self.data), 99)]
))
except AttributeError:
pass
return sdict
@staticmethod
def from_hdf(node):
data = json.loads(u(node[0]))
desc = node[1]
try:
cmap = json.loads(u(node[2]))
except:
cmap = u(node[2])
vmin = json.loads(u(node[3]))
vmax = json.loads(u(node[4]))
state = json.loads(u(node[5]))
attrs = json.loads(u(node[6]))
try:
xfmname = json.loads(u(node[7]))
except ValueError:
xfmname = None
if not isinstance(vmin, list):
vmin = [vmin]
if not isinstance(vmax, list):
vmax = [vmax]
if not isinstance(cmap, list):
cmap = [cmap]
if len(data) == 1:
xfm = None if xfmname is None else xfmname[0]
return _from_hdf_view(node.file, data[0], xfmname=xfm, cmap=cmap[0], description=desc,
vmin=vmin[0], vmax=vmax[0], state=state, **attrs)
else:
views = [_from_hdf_view(node.file, d, xfmname=x) for d, x in zip(data, xfname)]
raise NotImplementedError
def _write_hdf(self, h5, name="data", data=None, xfmname=None):
views = h5.require_group("/views")
view = views.require_dataset(name, (8,), h5py.special_dtype(vlen=str))
view[0] = json.dumps(data)
view[1] = self.description
try:
view[2] = json.dumps([self.cmap])
view[3] = json.dumps([self.vmin])
view[4] = json.dumps([self.vmax])
except AttributeError:
#For VolumeRGB/Vertex, there is no cmap/vmin/vmax
view[2] = "null"
view[3:5] = "null"
view[5] = json.dumps(self.state)
view[6] = json.dumps(self.attrs)
view[7] = json.dumps(xfmname)
return view
@property
def raw(self):
from matplotlib import colors, cm, pyplot as plt
import glob, os
# Get colormap from matplotlib or pycortex colormaps
## -- redundant code, here and in cortex/quicklflat.py -- ##
if isinstance(self.cmap, string_types):
if not self.cmap in cm.__dict__:
# unknown colormap, test whether it's in pycortex colormaps
cmapdir = options.config.get('webgl', 'colormaps')
colormaps = glob.glob(os.path.join(cmapdir, "*.png"))
colormaps = dict(((os.path.split(c)[1][:-4],c) for c in colormaps))
if not self.cmap in colormaps:
raise Exception('Unkown color map!')
I = plt.imread(colormaps[self.cmap])
cmap = colors.ListedColormap(np.squeeze(I))
# Register colormap while we're at it
cm.register_cmap(self.cmap,cmap)
else:
cmap = cm.get_cmap(self.cmap)
elif isinstance(self.cmap, colors.Colormap):
cmap = self.cmap
# Normalize colors according to vmin, vmax
norm = colors.Normalize(self.vmin, self.vmax)
cmapper = cm.ScalarMappable(norm=norm, cmap=cmap)
color_data = cmapper.to_rgba(self.data.flatten()).reshape(self.data.shape+(4,))
# rollaxis puts the last color dimension first, to allow output of separate channels: r,g,b,a = dataset.raw
color_data = (np.clip(color_data, 0, 1) * 255).astype(np.uint8)
return np.rollaxis(color_data, -1)
class Multiview(Dataview):
def __init__(self, views, description=""):
for view in views:
if not isinstance(view, Dataview):
raise TypeError("Must be a View object!")
raise NotImplementedError
self.views = views
def uniques(self, collapse=False):
for view in self.views:
for sv in view.uniques(collapse=collapse):
yield sv
class Volume(VolumeData, Dataview):
"""
Encapsulates a 3D volume or 4D volumetric movie. Includes information on how
the volume should be colormapped for display purposes.
Parameters
----------
data : ndarray
The data. Can be 3D with shape (z,y,x), 1D with shape (v,) for masked data,
4D with shape (t,z,y,x), or 2D with shape (t,v). For masked data, if the
size of the given array matches any of the existing masks in the database,
that mask will automatically be loaded. If it does not, an error will be
raised.
subject : str
Subject identifier. Must exist in the pycortex database.
xfmname : str
Transform name. Must exist in the pycortex database.
mask : ndarray, optional
Binary 3D array with shape (z,y,x) showing which voxels are selected.
If masked data is given, the mask will automatically be loaded if it
exists in the pycortex database.
cmap : str or matplotlib colormap, optional
Colormap (or colormap name) to use. If not given defaults to matplotlib
default colormap.
vmin : float, optional
Minimum value in colormap. If not given, defaults to the 1st percentile
of the data.
vmax : float, optional
Maximum value in colormap. If not given defaults to the 99th percentile
of the data.
description : str, optional
String describing this dataset. Displayed in webgl viewer.
**kwargs
All additional arguments in kwargs are passed to the VolumeData and Dataview
"""
def __init__(self, data, subject, xfmname, mask=None,
cmap=None, vmin=None, vmax=None, description="", **kwargs):
super(Volume, self).__init__(data, subject, xfmname, mask=mask,
cmap=cmap, vmin=vmin, vmax=vmax,
description=description, **kwargs)
# set vmin and vmax
self.vmin = self.vmin if self.vmin is not None else \
np.percentile(np.nan_to_num(self.data), 1)
self.vmax = self.vmax if self.vmax is not None else \
np.percentile(np.nan_to_num(self.data), 99)
def _write_hdf(self, h5, name="data"):
datanode = VolumeData._write_hdf(self, h5)
viewnode = Dataview._write_hdf(self, h5, name=name,
data=[self.name],
xfmname=[self.xfmname])
return viewnode
@property
def raw(self):
r, g, b, a = super(Volume, self).raw
return VolumeRGB(r, g, b, self.subject, self.xfmname, a,
description=self.description, state=self.state,
**self.attrs)
class Vertex(VertexData, Dataview):
"""
Encapsulates a 1D vertex map or 2D vertex movie. Includes information on how
the data should be colormapped for display purposes.
Parameters
----------
data : ndarray
The data. Can be 1D with shape (v,), or 2D with shape (t,v). Here, v can
be the number of vertices in both hemispheres, or the number of vertices
in either one of the hemispheres. In that case, the data for the other
hemisphere will be filled with zeros.
subject : str
Subject identifier. Must exist in the pycortex database.
cmap : str or matplotlib colormap, optional
Colormap (or colormap name) to use. If not given defaults to matplotlib
default colormap.
vmin : float, optional
Minimum value in colormap. If not given, defaults to the 1st percentile
of the data.
vmax : float, optional
Maximum value in colormap. If not given defaults to the 99th percentile
of the data.
description : str, optional
String describing this dataset. Displayed in webgl viewer.
**kwargs
All additional arguments in kwargs are passed to the VolumeData and Dataview
"""
def __init__(self, data, subject, cmap=None, vmin=None, vmax=None, description="", **kwargs):
super(Vertex, self).__init__(data, subject, cmap=cmap, vmin=vmin, vmax=vmax,
description=description, **kwargs)
# set vmin and vmax
self.vmin = self.vmin if self.vmin is not None else \
np.percentile(np.nan_to_num(self.data), 1)
self.vmax = self.vmax if self.vmax is not None else \
np.percentile(np.nan_to_num(self.data), 99)
def _write_hdf(self, h5, name="data"):
datanode = VertexData._write_hdf(self, h5)
viewnode = Dataview._write_hdf(self, h5, name=name, data=[self.name])
return viewnode
@property
def raw(self):
r, g, b, a = super(Vertex, self).raw
return VertexRGB(r, g, b, self.subject, a,
description=self.description, state=self.state,
**self.attrs)
def map(self, target_subj, surface_type='fiducial',
hemi='both', fs_subj=None, **kwargs):
"""Map this data from this surface to another surface
Calls `cortex.freesurfer.vertex_to_vertex()` with this
vertex object as the first argument.
NOTE: Requires either previous computation of mapping matrices
(with `cortex.db.get_mri_surf2surf_matrix`) or active
freesurfer environment.
Parameters
----------
target_subj : str
freesurfer subject to which to map
Other Parameters
----------------
kwargs map to `cortex.freesurfer.vertex_to_vertex()`
"""
# Input check
if hemi not in ['lh', 'rh', 'both']:
raise ValueError("`hemi` kwarg must be 'lh', 'rh', or 'both'")
# lazy load
from ..database import db
mats = db.get_mri_surf2surf_matrix(self.subject, surface_type,
hemi='both', target_subj=target_subj, fs_subj=fs_subj,
**kwargs)
new_data = [mats[0].dot(self.left), mats[1].dot(self.right)]
if hemi == 'both':
new_data = np.hstack(new_data)
elif hemi == 'lh':
new_data = np.hstack([new_data[0], np.nan * np.zeros(new_data[1].shape)])
elif hemi == 'rh':
new_data = np.hstack([np.nan * np.zeros(new_data[0].shape), new_data[1]])
vx = Vertex(new_data, target_subj, vmin=self.vmin, vmax=self.vmax, cmap=self.cmap)
return vx
def u(s, encoding='utf8'):
try:
return s.decode(encoding)
except AttributeError:
return s
from .viewRGB import VolumeRGB, VertexRGB, Colors
from .view2D import Volume2D, Vertex2D
| bsd-2-clause |
RRShieldsCutler/clusterpluck | clusterpluck/scripts/mpi_collapse.py | 1 | 4106 | #!/usr/bin/env Python
import argparse
import sys
import numpy as np
import pandas as pd
import warnings
from clusterpluck.scripts.cluster_dictionary import build_cluster_map
from clusterpluck.scripts.orfs_in_common import generate_index_list
from clusterpluck.scripts.orfs_in_common import pick_a_cluster
from functools import partial
from scoop import futures
# usage = python -m scoop -vv -n 480 mpi_collapse -i [input.csv] -m [.mpfa key] -o [output.csv]
# The arg parser
def make_arg_parser():
parser = argparse.ArgumentParser(description='Collapse ORF matrix into a scored cluster matrix. Run with "python -m scoop -vv -n 480 mpi_parallel_collapse [args]"')
parser.add_argument('-i', '--input', help='Input is the ORF matrix CSV file.', default='-')
parser.add_argument('-m', '--mpfa', help='The multi-protein fasta file (.mpfa) from which to build the dictionary')
parser.add_argument('-b', '--bread', help='Where to find the cluster information in the header for the sequence (default="ref|,|")', default='ref|,|')
parser.add_argument('-o', '--output', help='Where to save the output csv; default to screen', required=False, default='-')
return parser
def generate_chunk_list(in_csv2):
header = pd.read_csv(in_csv2, header=0, engine='c', index_col=0, nrows=0)
header = list(header.columns)
print('Extracted headers from input file...\n')
return header
def parallel_clustermean(mx, c_list):
i = len(c_list)
mat = np.zeros((i, 1))
c_i = 0
for cluster2 in c_list:
mx_dubsub = mx.filter(like=cluster2, axis=0) # subsets the smaller matrix by rows belonging to one cluster
# finds the mean of the cells in the cluster x cluster2 matrix
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning) # np doesn't like taking mean of empty slices
cc_mean = np.nanmean(mx_dubsub.values, dtype='float64')
mat[c_i, 0] = cc_mean # saves this mean into the pre-existing array at the right location
c_i += 1
del mx
dfmeans = pd.DataFrame(mat)
dfmeans = dfmeans.round(decimals=2)
dfmeans.index = c_list
return dfmeans
def main():
parser = make_arg_parser()
args = parser.parse_args()
# Parse command line
with open(args.mpfa, 'r') as inf:
# Generates dictionary with each unique 'refseq_cluster' as keys, ORFs as values
cluster_map = build_cluster_map(inf, bread=args.bread)
with open(args.input, 'r') as in_csv:
print('\nOk, processing input file in pieces...\n')
inkey = generate_index_list(in_csv)
# print(len(inkey))
with open(args.input, 'r') as in_csv2:
headers = generate_chunk_list(in_csv2)
# print(len(headers))
c_list = list(cluster_map.keys())
# ct = len(c_list)
# print('Found %d clusters...' % ct)
data_to_pool = []
grabbed_clusters = []
for cluster in c_list:
grab = pick_a_cluster(headers, cluster) # uses the name of the cluster to get a list of all orfs for a particular unique cluster
if not grab:
pass
else:
# print(grab)
grabbed_clusters.extend([cluster])
with open(args.input, 'r') as inf3:
mx = pd.read_csv(inf3, sep=',', header=0, usecols=grab, engine='c') # loads in only the columns from the grab list, i.e. all cols for a unique cluster
mx.index = inkey # reindexes the df with the orf labels after importing specific columns with usecols
data_to_pool.append(mx) # create the list of dfs to map over for multiprocessing
if __name__ == '__main__':
print('\nSending data to Workers... work, Workers, work!')
results = list(futures.map(partial(parallel_clustermean, c_list=c_list), data_to_pool))
print('\nFile processing complete; writing output file...\n')
del data_to_pool
with open(args.output, 'w') if args.output != '-' else sys.stdout as outf:
outdf = pd.concat(results, axis=1)
outdf.columns = grabbed_clusters # names the columns (and index, next line) according to clusters in the order they were processed
# outdf.index = c_list
outdf.sort_index(axis=0, inplace=True) # ensure that the clusters are in order on cols and rows
outdf.sort_index(axis=1, inplace=True)
outdf.to_csv(outf)
if __name__ == '__main__':
main()
| mit |
alexeyum/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 4 | 26200 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
from sklearn.exceptions import DataConversionWarning
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
for metric in PAIRWISE_BOOLEAN_FUNCTIONS:
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
res[np.isnan(res)] = 0
assert_true(np.sum(res != 0) == 0)
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1.]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
wazeerzulfikar/scikit-learn | sklearn/mixture/tests/test_gmm.py | 44 | 20880 | # Important note for the deprecation cleaning of 0.20 :
# All the functions and classes of this file have been deprecated in 0.18.
# When you remove this file please remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
import unittest
import copy
import sys
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import (assert_true, assert_greater,
assert_raise_message, assert_warns_message,
ignore_warnings)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.gmm._sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.gmm._sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.gmm._sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
x = mixture.gmm._sample_gaussian(
[0, 0], [[4, 3], [1, .1]], covariance_type='full', random_state=42)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, spherecv, 'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
with ignore_warnings(category=DeprecationWarning):
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
with ignore_warnings(category=DeprecationWarning):
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
with ignore_warnings(category=DeprecationWarning):
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
with ignore_warnings(category=DeprecationWarning):
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
with ignore_warnings(category=DeprecationWarning):
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
with ignore_warnings(category=DeprecationWarning):
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.dpgmm._DPGMMBase):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def score(self, g, X):
with ignore_warnings(category=DeprecationWarning):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
with ignore_warnings(category=DeprecationWarning):
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_n_parameters():
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
with ignore_warnings(category=DeprecationWarning):
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
with ignore_warnings(category=DeprecationWarning):
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
Kamp9/scipy | scipy/stats/_discrete_distns.py | 34 | 21220 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from scipy.misc import logsumexp
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
"""
More precise calculation than log(sf)
"""
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
g1 = sqrt(1.0 / tmp)
g2 = 1.0 / tmp
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
Note the difference to the numpy ``random_integers`` which
returns integers on a *closed* interval ``[low, high]``.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return self._random_state.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| bsd-3-clause |
andreashorn/lead_dbs | ext_libs/OSS-DBS/OSS_platform/Electrode_files/PINS_L303_floating_profile.py | 1 | 30501 | # -*- coding: utf-8 -*-
###
### This file is generated automatically by SALOME v8.3.0 with dump python functionality
###### Run with DPS_lead_position_V9.py
import sys
import salome
salome.salome_init()
theStudy = salome.myStudy
import salome_notebook
notebook = salome_notebook.NoteBook(theStudy)
###
### GEOM component
###
########################################### extra code 1 V10 15/12/18#############################################
###### This file runs with DBS_lead_position_V10.py
import os
sys.path.insert( 0, r'{}'.format(os.getcwd()))
sys.path.append('/usr/local/lib/python2.7/dist-packages')
#from pandas import read_csv
##### DEFAULT LIST #####
#Lead2nd_Enable = True
#Xt = 0
#Yt = 5
#Zt = 0
#X_2nd = 0
#Y_2nd = 5
#Z_2nd = 0
#OZ_angle = 0
#Xm = 0
#Ym = 0
#Zm = 0
#encap_thickness = 0.1
#ROI_radial = 13
#Vertice_enable = False
#Brain_map = '/home/trieu/electrode_dir/brain_elipse.brep'
#if(Lead2nd_Enable):
# Xt2 = 0
# Yt2 = -5
# Zt2 = 0
# OX_angle2 = 0
# OY_angle2 = 0
# OZ_angle2 = 0
##### VARIABLE LIST #####
########## End of variable list#############
if Z_2nd == Zt:
Z_2nd_artif = Zt+1.0 # just to ensure the rotation is possible
else:
Z_2nd_artif=Z_2nd
#for Lead-DBS, the tip point should be shifted down (they use the middle of the lowest contact as the reference point)
Zt_tip=Zt-3.0 # as for Medtronic3391
Vert_array =[0];
number_vertex = len(Vert_array)
Vert = []
VolumeObject1 = []
ContactObject1 = []
VolumeObject2 = []
ContactObject2 = []
print " DBS_lead's Geometry buid\n"
######################################### end of extra code 1 ########################################
######################################################################################################
from salome.geom import geomBuilder
import math
import SALOMEDS
geompy = geomBuilder.New(theStudy)
O = geompy.MakeVertex(0, 0, 0)
OX = geompy.MakeVectorDXDYDZ(1, 0, 0)
OY = geompy.MakeVectorDXDYDZ(0, 1, 0)
OZ = geompy.MakeVectorDXDYDZ(0, 0, 1)
Circle_1 = geompy.MakeCircle(O, OZ, 0.65)
Contact_1 = geompy.MakePrismVecH(Circle_1, OZ, 1.5*stretch+1.5)
geompy.TranslateDXDYDZ(Contact_1, 0, 0, 0.85)
Contact_1_full = geompy.MakePrismVecH(Circle_1, OZ, 3.0*stretch)
geompy.TranslateDXDYDZ(Contact_1_full, 0, 0, 0.85)
Contact_2 = geompy.MakeTranslation(Contact_1_full, 0, 0, 1.5+4.5*stretch)
Contact_3 = geompy.MakeTranslation(Contact_1_full, 0, 0, 1.5+10.5*stretch)
Contact_4 = geompy.MakeTranslation(Contact_1, 0, 0, 1.5+16.5*stretch)
Cylinder_1 = geompy.MakeCylinderRH(0.65, 149.365)
Sphere_1 = geompy.MakeSphereR(0.65)
Fuse_1 = geompy.MakeFuseList([Cylinder_1, Sphere_1], True, True)
Cylinder_2 = geompy.MakeCylinderRH(encap_thickness+0.65, 149.365)
Sphere_2 = geompy.MakeSphereR(encap_thickness+0.65)
Fuse_2 = geompy.MakeFuseList([Cylinder_2, Sphere_2], True, True)
encap_layer = geompy.MakeCutList(Fuse_2, [Fuse_1], True)
geompy.TranslateDXDYDZ(Circle_1, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Contact_1, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Contact_2, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Contact_3, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Contact_4, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Cylinder_1, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Sphere_1, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Fuse_1, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Cylinder_2, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Sphere_2, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Fuse_2, 0, 0, 0.65)
geompy.TranslateDXDYDZ(encap_layer, 0, 0, 0.65)
Sphere_ROI = geompy.MakeSphereR(ROI_radial)
encap_outer_ROI = geompy.MakeCutList(encap_layer, [Sphere_ROI], True)
encap_inner_ROI = geompy.MakeCutList(encap_layer, [encap_outer_ROI], True)
Fuse_all_lead_encap_ROI = geompy.MakeFuseList([Sphere_ROI, Fuse_2], True, True)
ROI = geompy.MakeCutList(Sphere_ROI, [Fuse_2], True)
CV1 = geompy.MakeCylinderRH(0.65, 1.5+1.5*stretch)
geompy.TranslateDXDYDZ(CV1, 0, 0, 1.5)
CV1_full = geompy.MakeCylinderRH(0.65, 3.0*stretch)
geompy.TranslateDXDYDZ(CV1_full, 0, 0, 1.5)
CV2 = geompy.MakeTranslation(CV1_full, 0, 0, 1.5+4.5*stretch)
CV3 = geompy.MakeTranslation(CV1_full, 0, 0, 1.5+10.5*stretch)
CV4 = geompy.MakeTranslation(CV1, 0, 0, 1.5+16.5*stretch)
##################################################################################################################
########################################### extra code 2 V10 15/12/18#############################################
print " Load brain image \n"
if (Brain_map[-4:] == 'brep'):
brain_solid = geompy.ImportBREP( Brain_map )
elif (Brain_map[-4:] == 'step'):
brain_solid = geompy.ImportSTEP( Brain_map )
elif (Brain_map[-4:] == 'iges'):
brain_solid = geompy.ImportIGES( Brain_map )
elif (Brain_map[-4:] == '.stl'):
brain_solid = geompy.ImportSTL( Brain_map )
else:
print " unknow imported file format"
Fuse_all_lead_encap_ROI_no_internal_face = geompy.RemoveInternalFaces(Fuse_all_lead_encap_ROI)
#################################################### Geometry and extra code interface ##############################################################
VolumeObject1 = [ encap_outer_ROI,ROI,encap_inner_ROI,CV1,CV2,CV3,CV4] # Declare objects included to partition, encap_outer_ROI always @1st position
Volume_name1 = ['encap_outer_ROI1','ROI1','encap_inner_ROI1','CV1_1','CV1_2','CV1_3','CV1_4'] # Declare name of the group in the partition for volume
ContactObject1 = [Contact_1,Contact_2,Contact_3,Contact_4]
Contact_name1 = ['Contact1_1','Contact1_2','Contact1_3','Contact1_4']
if(Lead2nd_Enable): ################## 2nd LEAD ###############################################
VolumeObject2 = [ROI]*len(VolumeObject1)
ContactObject2 = [Contact_1]*len(ContactObject1)
Volume_name2 = [ 'encap_outer_ROI2','ROI2','encap_inner_ROI2','CV2_1','CV2_2','CV2_3','CV2_4']
Contact_name2 = ['Contact2_1','Contact2_2','Contact2_3','Contact2_4']
##############################################################################################################################################
print "Position 2nd Fuse all object at [{},{},{}], [{}',{}',{}']\n".format(Xt2,Yt2,Zt2,OX_angle2,OY_angle2,OZ_angle2)
Fuse_all_lead_encap_ROI_no_internal_face2 = geompy.MakeTranslation(Fuse_all_lead_encap_ROI_no_internal_face,Xt2,Yt2,Zt2)
OX2 = geompy.MakeTranslation(OX,Xt2,Yt2,Zt2)
OY2 = geompy.MakeTranslation(OY,Xt2,Yt2,Zt2)
OZ2 = geompy.MakeTranslation(OZ,Xt2,Yt2,Zt2)
geompy.Rotate(Fuse_all_lead_encap_ROI_no_internal_face2, OX2,OX_angle2*math.pi/180.0)
geompy.Rotate(Fuse_all_lead_encap_ROI_no_internal_face2, OY2,OY_angle2*math.pi/180.0)
geompy.Rotate(Fuse_all_lead_encap_ROI_no_internal_face2, OZ2,OZ_angle2*math.pi/180.0)
print "Position 2nd Lead at [{},{},{}], [{}',{}',{}']\n".format(Xt2,Yt2,Zt2,OX_angle2,OY_angle2,OZ_angle2)
for i in range(0,len(VolumeObject1)):
VolumeObject2[i] = geompy.MakeTranslation(VolumeObject1[i],Xt2,Yt2,Zt2)
geompy.Rotate(VolumeObject2[i], OX2,OX_angle2*math.pi/180.0)
geompy.Rotate(VolumeObject2[i], OY2,OY_angle2*math.pi/180.0)
geompy.Rotate(VolumeObject2[i], OZ2,OZ_angle2*math.pi/180.0)
for i in range(0,len(ContactObject1)):
ContactObject2[i] = geompy.MakeTranslation(ContactObject1[i],Xt2,Yt2,Zt2)
geompy.Rotate(ContactObject2[i], OX2,OX_angle2*math.pi/180.0)
geompy.Rotate(ContactObject2[i], OY2,OY_angle2*math.pi/180.0)
geompy.Rotate(ContactObject2[i], OZ2,OZ_angle2*math.pi/180.0)
print "Cut outer ROI2 with brain\n"
cut_outer_ROI = geompy.MakeCutList(VolumeObject2[0], [brain_solid], True)
VolumeObject2[0] = geompy.MakeCutList(VolumeObject2[0], [cut_outer_ROI], True)
print "Cut ROI2 with brain\n"
VolumeObject2[1] = geompy.MakeCommonList([VolumeObject2[1], brain_solid], True)
print "Group 2nd:volume and area extraction for group ID identification process\n"
Volume2_Pro = [geompy.BasicProperties( VolumeObject2[0])]*len(VolumeObject2)
Contact2_Pro = [geompy.BasicProperties( ContactObject2[0])]*len(ContactObject2)
for i in range(0,len(VolumeObject2)):
Volume2_Pro[i] = geompy.BasicProperties( VolumeObject2[i])
for i in range(0,len(ContactObject2)):
Contact2_Pro[i] = geompy.BasicProperties( ContactObject2[i])
################## LEAD 1st #############################################################
#print "Position 1st Fuse all object at [{},{},{}], [{}',{}',{}']\n".format(Xt,Yt,Zt,OX_angle,OY_angle,OZ_angle)
geompy.TranslateDXDYDZ(Fuse_all_lead_encap_ROI_no_internal_face,Xt,Yt,Zt_tip)
OX1 = geompy.MakeTranslation(OX,Xt,Yt,Zt_tip)
OY1 = geompy.MakeTranslation(OY,Xt,Yt,Zt_tip)
OZ1 = geompy.MakeTranslation(OZ,Xt,Yt,Zt_tip)
geompy.Rotate(Fuse_all_lead_encap_ROI_no_internal_face, OZ1,OZ_angle*math.pi/180.0)
Vertex_1 = geompy.MakeVertex(X_2nd,Y_2nd,Z_2nd)
Vertex_O = geompy.MakeVertex(Xt,Yt,Zt)
Vertex_3 = geompy.MakeVertex(Xt,Yt,Z_2nd_artif)
if X_2nd!=Xt or Y_2nd!=Yt:
Fuse_all_lead_encap_ROI_no_internal_face=geompy.MakeRotationThreePoints(Fuse_all_lead_encap_ROI_no_internal_face, Vertex_O, Vertex_3, Vertex_1)
#print "Position 1st Lead at [{},{},{}], [{}',{}',{}']\n".format(Xt,Yt,Zt,OX_angle,OY_angle,OZ_angle)
for i in range(0,len(VolumeObject1)):
geompy.TranslateDXDYDZ(VolumeObject1[i],Xt,Yt,Zt_tip)
geompy.Rotate(VolumeObject1[i], OZ1,OZ_angle*math.pi/180.0)
if X_2nd!=Xt or Y_2nd!=Yt:
VolumeObject1[i]=geompy.MakeRotationThreePoints(VolumeObject1[i], Vertex_O, Vertex_3, Vertex_1)
for i in range(0,len(ContactObject1)):
geompy.TranslateDXDYDZ(ContactObject1[i],Xt,Yt,Zt_tip)
geompy.Rotate(ContactObject1[i], OZ1,OZ_angle*math.pi/180.0)
if X_2nd!=Xt or Y_2nd!=Yt:
ContactObject1[i]=geompy.MakeRotationThreePoints(ContactObject1[i], Vertex_O, Vertex_3, Vertex_1)
print "Cut outer ROI1 with brain\n"
cut_outer_ROI = geompy.MakeCutList(VolumeObject1[0], [brain_solid], True)
VolumeObject1[0] = geompy.MakeCutList(VolumeObject1[0], [cut_outer_ROI], True)
print "Cut ROI1 with brain\n"
VolumeObject1[1] = geompy.MakeCommonList([VolumeObject1[1], brain_solid], True)
print "Group 1st:volume and area extraction for group ID identification process\n"
Volume1_Pro = [geompy.BasicProperties( VolumeObject1[0])]*len(VolumeObject1)
Contact1_Pro = [geompy.BasicProperties( ContactObject1[0])]*len(ContactObject1)
for i in range(0,len(VolumeObject1)):
Volume1_Pro[i] = geompy.BasicProperties( VolumeObject1[i])
for i in range(0,len(ContactObject1)):
Contact1_Pro[i] = geompy.BasicProperties( ContactObject1[i])
print "Create reference groups for ID identification process\n"
if(Lead2nd_Enable):
Rest = geompy.MakeCutList(brain_solid, [Fuse_all_lead_encap_ROI_no_internal_face,Fuse_all_lead_encap_ROI_no_internal_face2], True)
Partition_profile = geompy.MakePartition(VolumeObject1+VolumeObject2+ContactObject1+ContactObject2, [], [], [], geompy.ShapeType["SOLID"], 0, [], 0)
###reference_volume
reference_volume = VolumeObject1 + VolumeObject2
reference_volume_Pro = Volume1_Pro + Volume2_Pro
Volume_name = Volume_name1+Volume_name2
### reference_area
reference_surface = ContactObject1 + ContactObject2
reference_surface_Pro = Contact1_Pro + Contact2_Pro
Contact_name = Contact_name1+Contact_name2
Group_volume = [geompy.CreateGroup(Partition_profile, geompy.ShapeType["SOLID"])] * (len(VolumeObject1)+len(VolumeObject2)+1) # +1 is Rest Group
Group_surface = [geompy.CreateGroup(Partition_profile, geompy.ShapeType["FACE"])] * (len(ContactObject1)+len(ContactObject2))
else:
Rest = geompy.MakeCutList(brain_solid, [Fuse_all_lead_encap_ROI_no_internal_face], True)
Partition_profile = geompy.MakePartition(VolumeObject1+ContactObject1, [], [], [], geompy.ShapeType["SOLID"], 0, [], 0)
###reference_volume
reference_volume = VolumeObject1
reference_volume_Pro = Volume1_Pro
Volume_name = Volume_name1
### reference_area
reference_surface = ContactObject1
reference_surface_Pro = Contact1_Pro
Contact_name = Contact_name1
Group_volume = [geompy.CreateGroup(Partition_profile, geompy.ShapeType["SOLID"])] * (len(VolumeObject1)+1) # +1 is Rest Group
Group_surface = [geompy.CreateGroup(Partition_profile, geompy.ShapeType["FACE"])] * len(ContactObject1)
### find out subshape and subshape ID
Group_surface_ListIDs =[]
Group_volume_ListIDs =[]
Group_partition_volume = []
Group_partition_surface = []
### find group volume ID ######################################################################
Partition_volume_IDsList = geompy.SubShapeAllIDs(Partition_profile, geompy.ShapeType["SOLID"]) # list all sub shape volume in Partition
print "Partition_volume_IDsList",Partition_volume_IDsList, '\n'
for ref_ind in range (0, len(reference_volume)):
temp_volume = []
for sub_ind in range (0, len (Partition_volume_IDsList)):
subshape = geompy.GetSubShape(Partition_profile, [Partition_volume_IDsList[sub_ind]]) # get subshape
subshape_Pro = geompy.BasicProperties(subshape) # extract volume of subshape
Common_volume = geompy.MakeCommonList([subshape, reference_volume[ref_ind]], True) # check common intersection
Common_volume_Pro = geompy.BasicProperties(Common_volume)
print "volume difference",abs(Common_volume_Pro[2]-subshape_Pro[2]),"/",abs(Common_volume_Pro[2]-reference_volume_Pro[ref_ind][2])
# if ( common volume = subshape) and (common volume = ref volume) => ref volume = sub shape
if (abs(Common_volume_Pro[2]-subshape_Pro[2])< 0.0003) and (abs(Common_volume_Pro[2]-reference_volume_Pro[ref_ind][2])<0.0003):
Group_partition_volume.append([Volume_name[ref_ind],Partition_volume_IDsList[sub_ind]])
# if ( common volume = subshape) and (common volume < ref volume) => sub shape belong to ref volume
elif (abs(Common_volume_Pro[2]-subshape_Pro[2])< 0.0003) and ((Common_volume_Pro[2] - reference_volume_Pro[ref_ind][2])<-0.0003):
temp_volume.append( Partition_volume_IDsList[sub_ind] )
if len(temp_volume) >1 : # the volume is devided
Group_partition_volume.append([Volume_name[ref_ind],temp_volume ])
print Volume_name[ref_ind]," is devided and has sub IDs:{}\n".format(temp_volume)
if len(reference_volume) != len(Group_partition_volume):
print "Geometry-volume error please check ROI diameter and DBS lead Position ",len(reference_volume),len(Group_partition_volume)
print 'Group_partition_volume',Group_partition_volume,'\n'
### find group surface ID ######################################################################
Partition_surface_IDsList = geompy.SubShapeAllIDs(Partition_profile, geompy.ShapeType["FACE"]) # list all sub shape face in Partition
print 'Partition_surface_IDsList',Partition_surface_IDsList,'\n'
sub_face = [] ## store devided faces
for reff_ind in range (0, len (reference_surface)):
temp_surface = []
for subf_ind in range (0, len(Partition_surface_IDsList)):
subshapef = geompy.GetSubShape(Partition_profile, [Partition_surface_IDsList[subf_ind]]) # get subshape
Common_face = geompy.MakeCommonList([subshapef, reference_surface[reff_ind]], True) # check common intersection
Common_face_Pro = geompy.BasicProperties(Common_face)
subshapef_Pro = geompy.BasicProperties(subshapef) # extract volume of subshape
print "area difference",abs(Common_face_Pro[1]-subshapef_Pro[1]),"/",abs(Common_face_Pro[1]-reference_surface_Pro[reff_ind][1])
# if ( common face = subface) and (common face = ref face) => ref face = sub face
if (abs(Common_face_Pro[1]-subshapef_Pro[1])<0.000001 )and (abs(Common_face_Pro[1]-reference_surface_Pro[reff_ind][1])<0.000001):
Group_partition_surface.append([ Contact_name[reff_ind],Partition_surface_IDsList[subf_ind] ])
# if ( common face = subface) and (common face < ref face) => sub face belong to ref face
elif (abs(Common_face_Pro[1]-subshapef_Pro[1])<0.000001 ) and ((Common_face_Pro[1] - reference_surface_Pro[reff_ind][1])<-0.000001):
temp_surface.append(Partition_surface_IDsList[subf_ind])
if len(temp_surface) >1 : # the face is devided
Group_partition_surface.append( [Contact_name[reff_ind],temp_surface ])
print Contact_name[reff_ind]," is devided and has sub IDs:{}\n".format(temp_surface)
if len(reference_surface) != len(Group_partition_surface): #+len(Group_partition_Multi_surface):
print "Geometry-Surface error please check ROI diameter and DBS lead Position ",len(reference_surface),len(Group_partition_surface),'\n'
print 'Group_partition_surface',Group_partition_surface,'\n'
if(Lead2nd_Enable):
Partition_profile = geompy.MakePartition(VolumeObject1+VolumeObject2+ContactObject1+ContactObject2+[Rest], [], [], [], geompy.ShapeType["SOLID"], 0, [], 0)
else:
Partition_profile = geompy.MakePartition(VolumeObject1+ContactObject1+[Rest], [], [], [], geompy.ShapeType["SOLID"], 0, [], 0)
new_volume_ID= geompy.SubShapeAllIDs(Partition_profile, geompy.ShapeType["SOLID"])
ID= list(set(Partition_volume_IDsList) ^ set (new_volume_ID))
Group_partition_volume.append(['Rest_1',ID[0]])
print "REST ID:",ID
print 'Group_partition_volume',Group_partition_volume,'\n'
print"Create volume and surface group under partition_profile\n"
for i_solid in range (0,len (Group_partition_volume)):
Group_volume[i_solid] = geompy.CreateGroup(Partition_profile, geompy.ShapeType["SOLID"])
if (isinstance (Group_partition_volume[i_solid][1],list) == False):
geompy.UnionIDs(Group_volume[i_solid], [Group_partition_volume[i_solid][1]])
if (isinstance (Group_partition_volume[i_solid][1],list) == True):
geompy.UnionIDs(Group_volume[i_solid], Group_partition_volume[i_solid][1])
#############################################
for i_surface in range (0,len (Group_partition_surface)):
Group_surface[i_surface] = geompy.CreateGroup(Partition_profile, geompy.ShapeType["FACE"])
if (isinstance (Group_partition_surface[i_surface][1],list) == False): # not a list
geompy.UnionIDs(Group_surface[i_surface], [Group_partition_surface[i_surface][1]])
if (isinstance (Group_partition_surface[i_surface][1],list) == True): # it is a list
geompy.UnionIDs(Group_surface[i_surface], Group_partition_surface[i_surface][1])
print "Translate whole partition to Xm,Ym,Zm\n"
geompy.TranslateDXDYDZ(Partition_profile, Xm, Ym, Zm)
### add Vertices to geometry
if(Vertice_enable):
for ver_ind in range (0,number_vertex):
print"Add vertices to model\n"
Vert.append(geompy.MakeVertex(Vert_array[ver_ind][0],Vert_array[ver_ind][1],Vert_array[ver_ind][2]))
geompy.TranslateDXDYDZ(Vert[ver_ind], Xm, Ym, Zm) ###Translate vertices to Xm,Ym,Zm
geompy.addToStudy( Vert[ver_ind], 'Vert_{}'.format(ver_ind))
print"add to study\n"
############################################ end of extra code 2 ############################################
#############################################################################################################
geompy.addToStudy( O, 'O' )
geompy.addToStudy( OX, 'OX' )
geompy.addToStudy( OY, 'OY' )
geompy.addToStudy( OZ, 'OZ' )
#geompy.addToStudy( Circle_1, 'Circle_1' )
geompy.addToStudy( Contact_1, 'Contact_1' )
geompy.addToStudy( Contact_2, 'Contact_2' )
geompy.addToStudy( Contact_3, 'Contact_3' )
geompy.addToStudy( Contact_4, 'Contact_4' )
geompy.addToStudy( CV1, 'CV1' )
geompy.addToStudy( CV2, 'CV2' )
geompy.addToStudy( CV3, 'CV3' )
geompy.addToStudy( CV4, 'CV4' )
#geompy.addToStudy( Cylinder_1, 'Cylinder_1' )
#geompy.addToStudy( Sphere_1, 'Sphere_1' )
#geompy.addToStudy( Fuse_1, 'Fuse_1' )
#geompy.addToStudy( Cylinder_2, 'Cylinder_2' )
#geompy.addToStudy( Sphere_2, 'Sphere_2' )
#geompy.addToStudy( Fuse_2, 'Fuse_2' )
#geompy.addToStudy( encap_layer, 'encap_layer' )
#geompy.addToStudy( Sphere_ROI, 'Sphere_ROI' )
geompy.addToStudy( ROI, 'ROI' )
geompy.addToStudy( encap_outer_ROI, 'encap_outer_ROI' )
geompy.addToStudy( encap_inner_ROI, 'encap_inner_ROI' )
geompy.addToStudy( Fuse_all_lead_encap_ROI, 'Fuse_all_lead_encap_ROI' )
################################################################################################################
####################################### extra code 3 V10 15/12/18##############################################/
#for i in range(0,len(VolumeObject2)):/
# geompy.addToStudy( VolumeObject2[i], 'VolumeObject2_{}'.format(i) )
#for i in range(0,len(ContactObject2)):
# geompy.addToStudy( ContactObject2[i], 'ContactObject2_{}'.format(i) )
#for i in range(0,len(VolumeObject1)):
# geompy.addToStudy( VolumeObject1[i], 'VolumeObject1_{}'.format(i) )
#for i in range(0,len(ContactObject1)):
# geompy.addToStudy( ContactObject1[i], 'ContactObject1_{}'.format(i) )
geompy.addToStudy( Partition_profile, 'Partition_profile' )
for i_solid1 in range (0,len (Group_partition_volume)):
geompy.addToStudyInFather( Partition_profile, Group_volume [i_solid1], Group_partition_volume[i_solid1][0])
for i_surface1 in range (0,len (Group_partition_surface)):
geompy.addToStudyInFather( Partition_profile, Group_surface [i_surface1], Group_partition_surface[i_surface1][0])
##################################### end of extra code 3##########################################
###################################################################################################
Contact1_1=Group_surface[0]
Contact1_2=Group_surface[1]
Contact1_3=Group_surface[2]
Contact1_4=Group_surface[3]
encap_inner_ROI1=Group_volume[2]
encap_outer_ROI1=Group_volume[0]
ROI1=Group_volume[1]
Rest_1=Group_volume[7]
Floating_contacts=[]
float_indices=[]
for i in xrange(len(Phi_vector)):
Floating_contacts.append(Group_volume[i+3]) #because the first contact is Group_volume[3]
float_indices.append(i+3)
Auto_group_for_floating = geompy.CreateGroup(Partition_profile, geompy.ShapeType["SOLID"])
geompy.UnionList(Auto_group_for_floating, Floating_contacts[:])
geompy.addToStudyInFather( Partition_profile, Auto_group_for_floating, 'Auto_group_for_floating' )
###
### SMESH component
###
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New(theStudy)
Mesh_1 = smesh.Mesh(Partition_profile)
NETGEN_1D_2D_3D = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D)
NETGEN_3D_Parameters_1 = NETGEN_1D_2D_3D.Parameters()
NETGEN_3D_Parameters_1.SetMaxSize( 25.4615 )
NETGEN_3D_Parameters_1.SetSecondOrder( 0 )
NETGEN_3D_Parameters_1.SetOptimize( 1 )
NETGEN_3D_Parameters_1.SetFineness( 0 )
NETGEN_3D_Parameters_1.SetMinSize( 0.000374134 )
NETGEN_3D_Parameters_1.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_1.SetFuseEdges( 1 )
NETGEN_3D_Parameters_1.SetQuadAllowed( 0 )
NETGEN_1D_2D = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_1)
Sub_mesh_1 = NETGEN_1D_2D.GetSubMesh()
NETGEN_2D_Parameters_1 = NETGEN_1D_2D.Parameters()
NETGEN_2D_Parameters_1.SetMaxSize( 0.05 )
NETGEN_2D_Parameters_1.SetSecondOrder( 0 )
NETGEN_2D_Parameters_1.SetOptimize( 1 )
NETGEN_2D_Parameters_1.SetFineness( 4 )
NETGEN_2D_Parameters_1.SetMinSize( 0.0001 )
NETGEN_2D_Parameters_1.SetUseSurfaceCurvature( 1 )
NETGEN_2D_Parameters_1.SetFuseEdges( 1 )
NETGEN_2D_Parameters_1.SetQuadAllowed( 0 )
NETGEN_1D_2D_1 = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_2)
Sub_mesh_2 = NETGEN_1D_2D_1.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_2D_Parameters_1,Contact1_2)
NETGEN_1D_2D_2 = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_3)
Sub_mesh_3 = NETGEN_1D_2D_2.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_2D_Parameters_1,Contact1_3)
NETGEN_1D_2D_3 = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_4)
Sub_mesh_4 = NETGEN_1D_2D_3.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_2D_Parameters_1,Contact1_4)
NETGEN_1D_2D_3D_1 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=encap_inner_ROI1)
Sub_mesh_5 = NETGEN_1D_2D_3D_1.GetSubMesh()
NETGEN_3D_Parameters_2 = NETGEN_1D_2D_3D_1.Parameters()
NETGEN_3D_Parameters_2.SetMaxSize( encap_thickness )
NETGEN_3D_Parameters_2.SetSecondOrder( 0 )
NETGEN_3D_Parameters_2.SetOptimize( 1 )
NETGEN_3D_Parameters_2.SetFineness( 2 )
NETGEN_3D_Parameters_2.SetMinSize( 0.00283583 )
NETGEN_3D_Parameters_2.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_2.SetFuseEdges( 1 )
NETGEN_3D_Parameters_2.SetQuadAllowed( 0 )
isDone = Mesh_1.SetMeshOrder( [ [ Sub_mesh_4, Sub_mesh_3, Sub_mesh_2, Sub_mesh_1, Sub_mesh_5 ] ])
NETGEN_1D_2D_3D_2 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=encap_outer_ROI1)
Sub_mesh_6 = NETGEN_1D_2D_3D_2.GetSubMesh()
NETGEN_3D_Parameters_3 = NETGEN_1D_2D_3D_2.Parameters()
NETGEN_3D_Parameters_3.SetMaxSize( encap_thickness )
NETGEN_3D_Parameters_3.SetSecondOrder( 0 )
NETGEN_3D_Parameters_3.SetOptimize( 1 )
NETGEN_3D_Parameters_3.SetFineness( 2 )
NETGEN_3D_Parameters_3.SetMinSize( 0.0333798 )
NETGEN_3D_Parameters_3.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_3.SetFuseEdges( 1 )
NETGEN_3D_Parameters_3.SetQuadAllowed( 0 )
isDone = Mesh_1.SetMeshOrder( [ [ Sub_mesh_4, Sub_mesh_3, Sub_mesh_2, Sub_mesh_1, Sub_mesh_5, Sub_mesh_6 ] ])
NETGEN_1D_2D_3D_3 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=ROI1)
Sub_mesh_7 = NETGEN_1D_2D_3D_3.GetSubMesh()
NETGEN_3D_Parameters_4 = NETGEN_1D_2D_3D_3.Parameters()
NETGEN_3D_Parameters_4.SetMaxSize( 25.4615 )
NETGEN_3D_Parameters_4.SetSecondOrder( 0 )
NETGEN_3D_Parameters_4.SetOptimize( 1 )
NETGEN_3D_Parameters_4.SetFineness( 2 )
NETGEN_3D_Parameters_4.SetMinSize( 0.00328242 )
NETGEN_3D_Parameters_4.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_4.SetFuseEdges( 1 )
NETGEN_3D_Parameters_4.SetQuadAllowed( 0 )
isDone = Mesh_1.SetMeshOrder( [ [ Sub_mesh_4, Sub_mesh_3, Sub_mesh_2, Sub_mesh_1, Sub_mesh_5, Sub_mesh_6, Sub_mesh_7 ] ])
NETGEN_1D_2D_3D_4 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=Rest_1)
Sub_mesh_8 = NETGEN_1D_2D_3D_4.GetSubMesh()
NETGEN_3D_Parameters_5 = NETGEN_1D_2D_3D_4.Parameters()
NETGEN_3D_Parameters_5.SetMaxSize( 2.5 )
NETGEN_3D_Parameters_5.SetSecondOrder( 0 )
NETGEN_3D_Parameters_5.SetOptimize( 1 )
NETGEN_3D_Parameters_5.SetFineness( 2 )
NETGEN_3D_Parameters_5.SetMinSize( 0.000374134 )
NETGEN_3D_Parameters_5.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_5.SetFuseEdges( 1 )
NETGEN_3D_Parameters_5.SetQuadAllowed( 0 )
NETGEN_1D_2D_3D_5 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=Group_volume[3])
Sub_mesh_9 = NETGEN_1D_2D_3D_5.GetSubMesh()
NETGEN_3D_Parameters_6 = NETGEN_1D_2D_3D_5.Parameters()
NETGEN_3D_Parameters_6.SetMaxSize( 25.4615 )
NETGEN_3D_Parameters_6.SetSecondOrder( 0 )
NETGEN_3D_Parameters_6.SetOptimize( 1 )
NETGEN_3D_Parameters_6.SetFineness( 2 )
NETGEN_3D_Parameters_6.SetMinSize( 0.000374134 )
NETGEN_3D_Parameters_6.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_6.SetFuseEdges( 1 )
NETGEN_1D_2D_3D_6 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=Group_volume[4])
Sub_mesh_10 = NETGEN_1D_2D_3D_6.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_3D_Parameters_6,Group_volume[4])
NETGEN_1D_2D_3D_7 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=Group_volume[5])
Sub_mesh_11 = NETGEN_1D_2D_3D_7.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_3D_Parameters_6,Group_volume[5])
NETGEN_1D_2D_3D_8 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=Group_volume[6])
Sub_mesh_12 = NETGEN_1D_2D_3D_8.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_3D_Parameters_6,Group_volume[6])
NETGEN_3D_Parameters_6.SetQuadAllowed( 0 )
#isDone = Mesh_1.SetMeshOrder( [ [ Sub_mesh_4, Sub_mesh_3, Sub_mesh_2, Sub_mesh_1, Sub_mesh_5,Sub_mesh_9,Sub_mesh_6, Sub_mesh_7, Sub_mesh_8 ] ])
isDone = Mesh_1.SetMeshOrder( [ [ Sub_mesh_4, Sub_mesh_3, Sub_mesh_2, Sub_mesh_1, Sub_mesh_5,Sub_mesh_9,Sub_mesh_10,Sub_mesh_11,Sub_mesh_12,Sub_mesh_6, Sub_mesh_7, Sub_mesh_8 ] ])
#if Phi_vector[0]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_1 )
#if Phi_vector[1]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_2 )
#if Phi_vector[2]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_3 )
#if Phi_vector[3]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_4 )
if Phi_vector[0]==0.0:
Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_9 )
if Phi_vector[1]==0.0:
Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_10 )
if Phi_vector[2]==0.0:
Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_11 )
if Phi_vector[3]==0.0:
Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_12 )
isDone = Mesh_1.Compute()
if Phi_vector[0]!=0.0:
Mesh_1.GroupOnGeom(Group_volume[3],'Flt_cnt1',SMESH.VOLUME)
if Phi_vector[1]!=0.0:
Mesh_1.GroupOnGeom(Group_volume[4],'Flt_cnt2',SMESH.VOLUME)
if Phi_vector[2]!=0.0:
Mesh_1.GroupOnGeom(Group_volume[5],'Flt_cnt3',SMESH.VOLUME)
if Phi_vector[3]!=0.0:
Mesh_1.GroupOnGeom(Group_volume[6],'Flt_cnt4',SMESH.VOLUME)
if Phi_vector[0]!=None:
Mesh_1.GroupOnGeom(Contact1_1,'C1_1',SMESH.FACE)
if Phi_vector[1]!=None:
Mesh_1.GroupOnGeom(Contact1_2,'C1_2',SMESH.FACE)
if Phi_vector[2]!=None:
Mesh_1.GroupOnGeom(Contact1_3,'C1_3',SMESH.FACE)
if Phi_vector[3]!=None:
Mesh_1.GroupOnGeom(Contact1_4,'C1_4',SMESH.FACE)
Encap_contact = Mesh_1.GroupOnGeom(encap_inner_ROI1,'Encap_contact',SMESH.VOLUME)
Encap_rest = Mesh_1.GroupOnGeom(encap_outer_ROI1,'Encap_rest',SMESH.VOLUME)
RegOfInt = Mesh_1.GroupOnGeom(ROI1,'RegOfInt',SMESH.VOLUME)
Rst = Mesh_1.GroupOnGeom(Rest_1,'Rst',SMESH.VOLUME)
## Set names of Mesh objects
smesh.SetName(NETGEN_1D_2D_3D.GetAlgorithm(), 'NETGEN 1D-2D-3D')
smesh.SetName(NETGEN_1D_2D.GetAlgorithm(), 'NETGEN 1D-2D')
smesh.SetName(NETGEN_2D_Parameters_1, 'NETGEN 2D Parameters_1')
smesh.SetName(NETGEN_3D_Parameters_2, 'NETGEN 3D Parameters_2')
smesh.SetName(NETGEN_3D_Parameters_1, 'NETGEN 3D Parameters_1')
smesh.SetName(NETGEN_3D_Parameters_5, 'NETGEN 3D Parameters_5')
smesh.SetName(NETGEN_3D_Parameters_6, 'NETGEN 3D Parameters_6')
smesh.SetName(NETGEN_3D_Parameters_3, 'NETGEN 3D Parameters_3')
smesh.SetName(NETGEN_3D_Parameters_4, 'NETGEN 3D Parameters_4')
smesh.SetName(Sub_mesh_4, 'Sub-mesh_4')
smesh.SetName(Sub_mesh_1, 'Sub-mesh_1')
smesh.SetName(Sub_mesh_3, 'Sub-mesh_3')
smesh.SetName(Sub_mesh_2, 'Sub-mesh_2')
smesh.SetName(Mesh_1.GetMesh(), 'Mesh_1')
smesh.SetName(Rst, 'Rst')
smesh.SetName(Sub_mesh_12, 'Sub_mesh_12')
smesh.SetName(Sub_mesh_11, 'Sub_mesh_11')
smesh.SetName(Sub_mesh_10, 'Sub_mesh_10')
smesh.SetName(RegOfInt, 'RegOfInt')
smesh.SetName(Encap_rest, 'Encap_rest')
smesh.SetName(Encap_contact, 'Encap_contact')
smesh.SetName(Sub_mesh_7, 'Sub-mesh_7')
smesh.SetName(Sub_mesh_6, 'Sub-mesh_6')
smesh.SetName(Sub_mesh_5, 'Sub-mesh_5')
smesh.SetName(Sub_mesh_8, 'Sub-mesh_8')
smesh.SetName(Sub_mesh_9, 'Sub-mesh_9')
#if Phi_vector[0]!=None:
#
# smesh.SetName(C1_1, 'C1_1')
#if Phi_vector[1]!=None:
#
# smesh.SetName(C1_2, 'C1_2')
#if Phi_vector[2]!=None:
#
# smesh.SetName(C1_3, 'C1_3')
#if Phi_vector[3]!=None:
#
# smesh.SetName(C1_4, 'C1_4')
Mesh_1.ExportMED(os.environ['PATIENTDIR']+'/Meshes/Mesh_unref.med')
#if salome.sg.hasDesktop():
# salome.sg.updateObjBrowser(True)
import killSalome
killSalome.killAllPorts()
| gpl-3.0 |
rafaeltg/Deep-Learning-Algorithms | pydl/ts/stats.py | 2 | 4655 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.tsa.stattools as stools
from statsmodels.tsa.seasonal import seasonal_decompose
__all__ = ['acf', 'pacf', 'test_stationarity', 'decompose', 'correlated_lags']
def acf(ts, nlags=20, plot=False, ax=None):
"""
Autocorrelation function
:param ts: time series
:param nlags: number of lags to calculate the acf function
:param plot: whether to plot the acf or not
:param ax: custom plot axes
:return:
- acf value for each lag
- confidence level value
- plotted ax (if 'plot' is true)
"""
lag_acf = stools.acf(ts, nlags=nlags)
conf_level = 1.96/np.sqrt(len(ts))
if plot:
if ax is None:
ax = plt.gca(xlim=(1, nlags), ylim=(-1.0, 1.0))
ax.plot(lag_acf)
ax.axhline(y=-conf_level, linestyle='--', color='gray')
ax.axhline(y=conf_level, linestyle='--', color='gray')
ax.set_title('Autocorrelation Function')
ax.set_xlabel('Lags')
ax.set_ylabel('ACF')
return lag_acf, conf_level, ax
return lag_acf.tolist(), conf_level
def pacf(ts, nlags=20, method='ols', alpha=None, plot=False, ax=None):
"""
Partial autocorrelation function
:param ts: time series
:param nlags: number of lags to calculate the acf function
:param method:
:param alpha:
:param plot: whether to plot the pacf or not
:param ax: custom plot axes
:return:
"""
if alpha is not None:
lag_pacf, confint = stools.pacf(ts, nlags=nlags, method=method, alpha=alpha)
else:
lag_pacf = stools.pacf(ts, nlags=nlags, method=method)
if plot:
if ax is None:
ax = plt.gca(xlim=(1, nlags), ylim=(-1.0, 1.0))
ax.plot(lag_pacf)
ax.axhline(y=0, linestyle='--', color='gray')
ax.set_title('Partial Autocorrelation Function')
ax.set_xlabel('Lags')
ax.set_ylabel('PACF')
if alpha is not None:
ax.plot(confint[:, 0], linestyle='--', color='red')
ax.plot(confint[:, 1], linestyle='--', color='red')
return lag_pacf, confint, ax
else:
conf_level = 1.96/np.sqrt(len(ts))
ax.axhline(y=-conf_level, linestyle='--', color='gray')
ax.axhline(y=conf_level, linestyle='--', color='gray')
return lag_pacf, ax
if alpha:
return lag_pacf, confint
else:
return lag_pacf
def test_stationarity(ts, to_file='', log_result=False):
"""
Perform Augmented Dickey-Fuller test
"""
if isinstance(ts, np.ndarray):
ts = ts.flatten()
dftest = stools.adfuller(ts, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])
for k, v in dftest[4].items():
dfoutput['Critical Value (%s)' % k] = v
if to_file != '':
dfoutput.to_csv(to_file)
elif log_result:
print('Results of Dickey-Fuller Test:')
print(dfoutput)
return dfoutput
def decompose(ts, plot=False, axes=None):
"""
Seasonal decomposition (Trend + Seasonality + Residual)
:param ts: time series
:param plot: whether to plot the seasonal components or not
:param axes: custom list of plot axes
:return:
"""
decomposition = seasonal_decompose(ts, freq=7)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
if plot:
if axes is None or not isinstance(axes, np.ndarray):
_, axes = plt.subplots(4, 1, sharex=True)
axes[0].plot(ts)
axes[0].set_title('Original')
axes[1].plot(trend)
axes[1].set_title('Trend')
axes[2].plot(seasonal)
axes[2].set_title('Seasonality')
axes[3].plot(residual)
axes[3].set_title('Residuals')
return trend, seasonal, residual, axes
return trend, seasonal, residual
def correlated_lags(ts, corr_lags=1, max_lags=100):
"""
Return the index of the correlated lags.
:param ts: time series
:param corr_lags: number of correlated lags to return. If -1, return all
:param max_lags: number of lags to calculate the acf function
"""
assert max_lags > corr_lags, "'max_lags' must be greater than 'corr_lags'"
acfs, conf = acf(ts, max_lags)
acfs = np.asarray(acfs)
idx = np.argsort(acfs)
most_corr = []
for i in idx[-2::-1]:
if acfs[i] > conf:
most_corr.append(i)
if len(most_corr) == corr_lags:
break
return sorted(most_corr)
| mit |
evgchz/scikit-learn | sklearn/cross_decomposition/cca_.py | 18 | 3129 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
ritviksahajpal/EPIC | SSURGO/SSURGO_to_csv.py | 1 | 12793 | ##################################################################
# SSURGO_to_csv.py Apr 2015
# ritvik sahajpal ([email protected])
#
##################################################################
import constants, logging, os, us, csv, pdb, glob
import numpy as np
import pandas as pd
def open_or_die(path_file, perm='r', header=None, sep=' ', delimiter=' ', usecols=[]):
"""
Open file or quit gracefully
:param path_file: Path of file to open
:return: Handle to file (netCDF), or dataframe (csv) or numpy array
"""
try:
if os.path.splitext(path_file)[1] == '.txt':
df = pd.read_csv(path_file, sep=sep, header=header, usecols=usecols)
return df
else:
logging.info('Invalid file type')
except:
logging.info('Error opening file '+path_file)
def component_aggregation(group):
# Sort by depth, makes it easier to process later
group.sort('hzdept_r',inplace=True)
# Determine number of soil layers
list_depths = np.append(group['hzdepb_r'],group['hzdept_r'])
num_layers = len(np.unique(list_depths))-1 # Exclude 0
if(num_layers <= 0):
logging.warn('Incorrect number of soil layers '+str(num_layers)+' '+str(group['cokey']))
return
return group
def read_ssurgo_tables(soil_dir):
# Read in SSURGO data
pd_mapunit = open_or_die(soil_dir+os.sep+constants.MAPUNIT+'.txt' ,sep=constants.SSURGO_SEP,header=None,usecols=constants.mapunit_vars.keys())
pd_component = open_or_die(soil_dir+os.sep+constants.COMPONENT+'.txt',sep=constants.SSURGO_SEP,header=None,usecols=constants.component_vars.keys())
pd_chorizon = open_or_die(soil_dir+os.sep+constants.CHORIZON+'.txt' ,sep=constants.SSURGO_SEP,header=None,usecols=constants.chorizon_vars.keys())
pd_muaggatt = open_or_die(soil_dir+os.sep+constants.MUAGGATT+'.txt' ,sep=constants.SSURGO_SEP,header=None,usecols=constants.muaggatt_vars.keys())
pd_chfrags = open_or_die(soil_dir+os.sep+constants.CHFRAGS+'.txt' ,sep=constants.SSURGO_SEP,header=None,usecols=constants.chfrags_vars.keys())
# if any of the dataframes are empty then return a error value
if ((pd_mapunit is None) or (pd_component is None) or (pd_chorizon is None) or (pd_muaggatt is None) or (pd_chfrags is None)):
raise ValueError('Empty dataframe from one of SSURGO files')
# Rename dataframe columns from integers to SSURGO specific names
pd_mapunit.rename(columns=constants.mapunit_vars ,inplace=True)
pd_component.rename(columns=constants.component_vars,inplace=True)
pd_chorizon.rename(columns=constants.chorizon_vars ,inplace=True)
pd_muaggatt.rename(columns=constants.muaggatt_vars ,inplace=True)
pd_chfrags.rename(columns=constants.chfrags_vars ,inplace=True)
# Sum up Fragvol_r in pd_chfrags
# See http://www.nrel.colostate.edu/wiki/nri/images/2/21/Workflow_NRI_SSURGO_2010.pdf
pd_chfrags = pd_chfrags.groupby('chkey').sum().reset_index(level=0)
# Aggregate pd_chorizon data based on cokey
chorizon_agg = pd_chorizon.groupby('cokey').apply(component_aggregation)
# Join chfrags and chorizon_agg data
chfrags_chor = chorizon_agg.merge(pd_chfrags,left_on='chkey',right_on='chkey')
# Join chfrags_chor data to the component table
ccomp = chfrags_chor.merge(pd_component,left_on='cokey',right_on='cokey')
# Join the chor_comp data to pd_muaggatt table
# Set how='outer' since we do not want to miss any mukey's
muag_ccomp = ccomp.merge(pd_muaggatt,left_on='mukey',right_on='mukey', how='outer')
# Join muag_ccomp to mapunit data
# Set how='outer' since we do not want to miss any mukey's
map_data = muag_ccomp.merge(pd_mapunit,left_on='mukey',right_on='mukey', how='outer')
return map_data
def SSURGO_to_csv():
sgo_data = pd.DataFrame()
for st in constants.list_st:
logging.info(st)
# For each state, process the SSURGO tabular files
for dir_name, subdir_list, file_list in os.walk(constants.data_dir):
if('_'+st+'_' in dir_name and constants.TABULAR in subdir_list):
logging.info(dir_name[-3:]) # County FIPS code
try:
tmp_df = read_ssurgo_tables(dir_name+os.sep+constants.TABULAR)
except ValueError:
logging.info('Empty dataframe from one of SSURGO files')
continue
tmp_df['state'] = st
tmp_df['county'] = dir_name[-3:]
tmp_df['FIPS'] = int(us.states.lookup(st).fips+dir_name[-3:])
sgo_data = pd.concat([tmp_df,sgo_data],ignore_index =True)
# Drop columns with all missing values
sgo_data.dropna(axis=1,how='all',inplace=True)
# Replace hydgrp values with integers
sgo_data.replace(constants.hydgrp_vars,inplace=True)
# If any null values exist, replace with mean of value in mukey
df3 = pd.DataFrame()
logging.info('If any null values exist, replace with mean of value in mukey')
if(np.any(sgo_data.isnull())):
df1 = sgo_data.set_index('mukey')
df2 = sgo_data.groupby('mukey').mean()
df3 = df1.combine_first(df2)
# If any null values remain, replace by county mean
logging.info('If any null values remain, replace by county mean')
if(np.any(df3.isnull())):
df1 = df3.reset_index().set_index('FIPS')
cnt_mean = sgo_data.groupby(['FIPS']).mean()
df3 = df1.combine_first(cnt_mean)
else:
pass
# If any null values remain, replace by state mean
logging.info('If any null values remain, replace by state mean')
if(np.any(df3.isnull())):
df1 = df3.reset_index().set_index('state')
st_mean = sgo_data.groupby(['state']).mean()
df3 = df1.combine_first(st_mean)
else:
pass
else:
pass
df3.reset_index(inplace=True)
# Convert niccdcd and hydgrp to integers
df3['hydgrp'] = df3['hydgrp'].astype(int)
df3['niccdcd'] = df3['niccdcd'].astype(int)
# Drop components with non zero initial depth
#logging.info('Drop faulty components')
#drop_df = df3.groupby('cokey').filter(lambda x: x['hzdept_r'].min() <= 0)
logging.info('Select the dominant component')
dom_df = df3.groupby('mukey').apply(lambda g: g[g['comppct_r']==g['comppct_r'].max()])
#drop_df.to_csv(constants.out_dir+'drop.csv')
out_ssurgo_dir = constants.r_soil_dir+os.sep+constants.SOIL+os.sep
constants.make_dir_if_missing(out_ssurgo_dir)
df3.to_csv(out_ssurgo_dir+os.sep+constants.all)
dom_df.to_csv(out_ssurgo_dir+os.sep+constants.dominant)
logging.info('Done!')
return dom_df
def write_epic_soil_file(group):
if(not(os.path.isfile(constants.t_soil_dir+str(int(group.mukey.iloc[0]))+'.sol'))):
epic_file = open(constants.t_soil_dir+str(int(group.mukey.iloc[0]))+'.sol', 'w')
num_layers = len(group.hzdepb_r)
# Line 1
epic_file.write(str(group.mukey.iloc[0])+' State: '+str(group.state.iloc[0])+' FIPS: '+str(group.FIPS.iloc[0])+'\n')
# Line 2
epic_file.write(('{:8.2f}'*10+'\n').format(group.albedodry_r.iloc[0],group.hydgrp.iloc[0],0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0))
# Line 3
epic_file.write(('{:8.2f}'*9+'\n').format(0.0,0.0,100.0,0.0,0.0,0.0,0.0,0.0,0.0))
# Soil characteristics per soil layer
epic_file.write(''.join(['{:8.2f}'.format(n*constants.CONV_DEPTH) for n in group.hzdepb_r])+'\n') # Depth to bottom of layer (m)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.dbthirdbar_r])+'\n') # Bulk Density (T/m^3)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.wfifteenbar_r])+'\n') # Soil water content at wilting point (1500 KPA), (m/m)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.wthirdbar_r])+'\n') # Water content at field capacity (33 KPA), (m/m)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.sandtotal_r])+'\n') # Sand content (%)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.silttotal_r])+'\n') # Silt content (%)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in np.zeros(num_layers)])+'\n') # Initial Org N concentration (g/T) ---zeros---
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.ph1to1h2o_r])+'\n') # Soil pH ()
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.sumbases_r])+'\n') # Sum of bases (cmol/kg)
epic_file.write(''.join(['{:8.2f}'.format(n*constants.OM_TO_WOC) for n in group.om_r])+'\n') # Organic matter content (%)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.caco3_r])+'\n') # CaCO3 content (%)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.cec7_r])+'\n') # Cation exchange capacity (cmol/kg)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.Fragvol_r])+'\n') # Coarse fragment content (% by vol)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in np.zeros(num_layers)])+'\n') # Initial NO3 conc (g/T) ---zeros---
epic_file.write(''.join(['{:8.2f}'.format(n) for n in np.zeros(num_layers)])+'\n') # Initial Labile P (g/T) ---zeros---
epic_file.write(''.join(['{:8.2f}'.format(n) for n in np.zeros(num_layers)])+'\n') # Crop residue (T/ha) ---zeros---
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.dbovendry_r])+'\n') # Oven dry Bulk Density (T/m^3)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in np.zeros(num_layers)])+'\n') # ---zeros---
epic_file.write(''.join(['{:8.2f}'.format(n*constants.CONV_KSAT) for n in group.ksat_r])+'\n') # Saturated conductivity (mm/h)
for i in range(constants.ZERO_LINES):
epic_file.write(''.join(['{:8.2f}'.format(n) for n in np.zeros(num_layers)])+'\n')
# EPIC constant lines
epic_file.write('\n\n\n')
epic_file.write(' 275. 200. 150. 140. 130. 120. 110.\n')
epic_file.write(' 0.20 0.40 0.50 0.60 0.80 1.00 1.20\n')
epic_file.write(' .004 .006 .008 .009 .010 .010 .010\n')
epic_file.close()
else:
logging.info('File exists: '+constants.t_soil_dir+str(group.mukey.iloc[0])+'.sol')
def csv_to_EPIC(df):
try:
df.groupby('mukey').apply(write_epic_soil_file)
except Exception,e:
logging.info(str(e))
# Output ieSlList.dat
epic_SlList_file = open(constants.out_dir+os.sep+constants.SLLIST, 'w')
idx = 1
for filename in glob.iglob(os.path.join(constants.t_soil_dir, '*.sol')):
epic_SlList_file.write(('%5s "soils//%s"\n')%(idx,os.path.basename(filename)))
idx += 1
epic_SlList_file.close()
if __name__ == '__main__':
df = SSURGO_to_csv()
csv_to_EPIC(df)
#def uniq_vals(group):
# try:
# return group[group['cokey'] == mode(np.array(group.cokey))[0][0]]
# except Exception, e:
# logger.info(e)
#def wavg(val_col_name, wt_col_name):
# def inner(group):
# return (group[val_col_name] * group[wt_col_name]).sum() / group[wt_col_name].sum()
# inner.__name__ = val_col_name
# return inner
#def wt_mean(group):
# # custom function for calculating a weighted mean
# # values passed in should be vectors of equal length
# g = group.groupby('layer_id')
# for key,val in epic_soil_vars.iteritems():
# group[val] = group[val] / g[val].transform('sum') * group['compct_r']
# return group
#def average_mukey_soil_vars(group):
# return group.mean(numeric_only=True)
#df4 = pd.DataFrame()
#df5 = pd.DataFrame()
#logger.info('Compute weighted means')
#for key,val in epic_soil_vars.iteritems():
# print val
# df4[val] = df3.groupby(['mukey','layer_id']).apply(wavg(val, 'comppct_r'))
#cols = [col for col in df4.columns if col not in ['mukey', 'layer_id']]
#tmp_df4 = df4[cols]
#df3.reset_index(inplace=True)
#df4.reset_index(inplace=True)
#df5 = df3[df3.columns.difference(tmp_df4.columns)]
#df6 = df5.groupby('mukey').apply(uniq_vals)
#df7 = df4.merge(df6,on=['mukey','layer_id'])
#df3.to_csv(out_dir+'SSURGO3.csv')
#df4.to_csv(out_dir+'SSURGO4.csv')
#df5.to_csv(out_dir+'SSURGO5.csv')
#df6.to_csv(out_dir+'SSURGO6.csv')
#df7.to_csv(out_dir+'SSURGO7.csv')
#logger.info('Done!')
#pdb.set_trace()
#logger.info('Done!')
| mit |
pauldeng/nilmtk | nilmtk/disaggregate/maximum_likelihood_estimation.py | 5 | 27147 | import pandas as pd
import numpy as np
from disaggregator import Disaggregator
from matplotlib import pyplot as plt
from datetime import timedelta
from scipy.stats import poisson, norm
from sklearn import mixture
class MLE(Disaggregator):
"""
Disaggregation of a single appliance based on its features and
using the maximum likelihood of all features.
Attributes
----------
appliance: str
Name of the appliance
stats: list of dicts
One dict for feature with:
units: tuple
For instance: ('power','active')
resistive: boolean
To decide if 'apparent' == 'active'
thDelta: int
Treshold for delta values on the power. Used on train_on_chunk method
thLikelihood: int
Treshold for the maximum likelihood
sample_period: str
For resampling in training and disaggregate methods
sample_method: str
Pandas method for resampling
onpower: dict
{'name':str, 'gmm': str, 'model': sklearn model}
offpower: dict
{'name':str, 'gmm': str, 'model': sklearn model}
duration: dict
{'name':str, 'gmm': str, 'model': sklearn model}
onpower_train: pandas.Dataframe()
Training samples of onpower
offpower_train: pandas.Dataframe()
Training samples of offpower
duaration_train: pandas.Dataframe()
Training samples of duration
powerNoise: int
For the disaggregate_chunk method, minimum delta value of a event to be
considered, otherwise is noise.
powerPair: int
For the disaggregate_chunk method, max delta value difference between
onpower and offpower
timeWindow: int
For the disaggregate_chunk method, a time frame to speed up
disaggregate_chunk method.
TODO:
-----
* Build a method for choosing thLikelihood automatically based on its
optimization using ROC curve.
* Method for measuring ROC curve.
"""
def __init__(self):
"""
Inizialise of the model by default
"""
super(MLE, self).__init__()
# Metadata
self.appliance = None
self.stats = []
self.units = None
self.resistive = False
self.thDelta = 0
self.thLikelihood = 0
self.sample_period = None
self.sampling_method = None
# FEATURES:
self.onpower = {'name': 'gmm', 'model': mixture.GMM(n_components=2)}
self.offpower = {'name': 'gmm', 'model': mixture.GMM(n_components=2)}
self.duration = {'name': 'poisson', 'model': poisson(0)}
# Trainings:
self.onpower_train = pd.DataFrame(columns=['onpower'])
self.offpower_train = pd.DataFrame(columns=['offpower'])
self.duration_train = pd.DataFrame(columns=['duration'])
# Constrains
self.powerNoise = 0 # Background noise in the main
self.powerPair = 0 # Max diff between onpower and offpower
self.timeWindow = 0 # To avoid high computation
def __retrain(self, feature, feature_train):
print "Training " + feature_train.columns[0]
if feature['name'] == 'gmm':
feature['model'].fit(feature_train)
elif feature['name'] == 'norm':
mu, std = norm.fit(feature_train)
feature['model'] = norm(loc=mu, scale=std)
elif feature['name'] == 'poisson':
self.onpower['model'] = poisson(feature_train.mean())
else:
raise NameError(
"Name of the model for " +
str(feature_train.columns[0]) +
" unknown or not implemented")
def __physical_quantity(self, chunk):
if not self.resistive:
print "Checking units"
units_mismatched = True
for name in chunk.columns:
if name == self.units:
units = name
units_mismatched = False
if units_mismatched:
stringError = self.appliance + " cannot be disaggregated. " + self.appliance + \
" is a non-resistive element and units mismatches: disaggregated data is in " + \
str(self.units) + \
" and aggregated data is " + str(units)
raise ValueError(stringError)
else:
units = chunk.columns[0]
return units
def __pdf(self, feature, delta):
if feature['name'] == 'norm':
score = feature['model'].pdf(delta)
elif feature['name'] == 'gmm':
score = np.exp(feature['model'].score([delta]))[0]
elif feature['name'] == 'poisson':
# Decimal values produce odd values in poisson (bug)
delta = np.round(delta)
score = feature['model'].pmf(delta)
else:
raise AttributeError("Wrong model for" + feature['name'] +
" It must be: gmm, norm or poisson")
return score
def __pdf2(self, feature, delta):
if feature['name'] == 'norm':
score = feature['model'].pdf(delta)
elif feature['name'] == 'gmm':
score = np.exp(feature['model'].score([delta]))
elif feature['name'] == 'poisson':
# Decimal values produce odd values in poisson (bug)
delta = np.round(delta)
score = feature['model'].pmf(delta)
else:
raise AttributeError("Wrong model for" + feature['name'] +
" It must be: gmm, norm or poisson")
return score
def update(self, **kwargs):
"""
This method will update attributes of the model passed by kwargs.
Parameters
----------
kwargs : key word arguments
Notes
-----
"""
print "Updating model"
print kwargs
for key in kwargs:
setattr(self, key, kwargs[key])
def train(self, metergroup):
"""
Train using ML.
Call disaggregate_chunk method
Parameters
----------
metergroup : a nilmtk.MeterGroup object
Notes
-----
* Inizialise "stats" and "feature_train" on the model.
* Instance is initialised to 1. Use meter.instance to provide more
information (TODO)
"""
# Inizialise stats and training data:
self.stats = []
self.onpower_train = pd.DataFrame(columns=['onpower'])
self.offpower_train = pd.DataFrame(columns=['offpower'])
self.duration_train = pd.DataFrame(columns=['duration'])
# Calling train_on_chunk by instance and identifier:
instance = 1 # initial instance.
for meter in metergroup.meters:
for chunk in meter.power_series():
identifier = (meter.appliances[0].type['type'], instance)
print identifier
if chunk.empty:
print "Chunk empty"
else:
print "Training on chunk"
self.train_on_chunk(pd.DataFrame(chunk.resample(
self.sample_period,
how=self.sampling_method)),
identifier
)
# self.train_on_chunk(pd.DataFrame(chunk),identifier)
instance += 1
def train_on_chunk(self, chunk, identifier):
"""
Extracts features from chunk, concatenates feature_train
(onpower_train, offpower_train and duration_train) with new features
and retrains feature
models.
Updates stats attribute.
Parameters
----------
chunk : pd.DataFrame where each column represents a disaggregated
appliance identifier : tuple of (nilmtk.appliance, int) representing
instance of that appliance for this chunk
Notes
-----
* Disaggregates only the selected appliance.(TODO: Disaggregates many)
"""
# EXTRACT FEATURES:
# find units:
self.__setattr__('units', chunk.columns[0])
# Loading treshold for getting events:
thDelta = getattr(self, 'thDelta')
chunk.index.name = 'date_time'
# To prevent learning many samples at the middle of a edge:
chunk.ix[:, 0][chunk.ix[:, 0] < thDelta] = 0
# Learning edges
chunk['delta'] = chunk.ix[:, 0].diff()
chunk.delta.fillna(0, inplace=True)
edges = chunk[np.abs(chunk['delta']) > thDelta].delta
# Pairing on/off events
if len(edges) > 1:
offpower = edges[edges.apply(np.sign).diff() == -2]
onpower = edges[edges.apply(np.sign).diff(-1) == 2]
duration = offpower.reset_index().date_time - \
onpower.reset_index().date_time
duration = duration.astype('timedelta64[s]')
# Set consistent index for concatenation:
onpower = pd.DataFrame(onpower).reset_index(drop=True)
onpower.columns = ['onpower']
offpower = pd.DataFrame(offpower).reset_index(drop=True)
offpower.columns = ['offpower']
duration = pd.DataFrame(duration).reset_index(drop=True)
duration.columns = ['duration']
# Len of samples:
print "Samples of onpower: " + str(len(onpower))
print "Samples of offpower: " + str(len(offpower))
print "Samples of duration: " + str(len(duration))
number_of_events = len(onpower)
# Features (concatenation)
self.onpower_train = pd.concat(
[self.onpower_train, onpower]).reset_index(drop=True)
self.offpower_train = pd.concat(
[self.offpower_train, offpower]).reset_index(drop=True)
self.duration_train = pd.concat(
[self.duration_train, duration]).reset_index(drop=True)
else:
number_of_events = 0
print """WARNING: No paired events found on this chunk.
Is it thDelta too high?"""
# RE-TRAIN FEATURE MODELS:
self.__retrain(self.onpower, self.onpower_train)
self.__retrain(self.offpower, self.offpower_train)
self.__retrain(self.duration, self.duration_train)
# UPDATE STATS:
stat_dict = {'appliance': identifier[
0], 'instance': identifier[1], 'Nevents': number_of_events}
instanceFound = False
if len(self.stats) == 0:
self.stats.append(stat_dict)
else:
for stat in self.stats:
if ((stat['appliance'] == stat_dict['appliance']) and
(stat['instance'] == stat_dict['instance'])):
index = self.stats.index(stat)
self.stats[index]['Nevents'] = self.stats[
index]['Nevents'] + number_of_events
instanceFound = True
if not instanceFound:
self.stats.append(stat_dict)
def disaggregate(self, mains, output_datastore):
"""
Passes each chunk from mains generator to disaggregate_chunk()
and passes the output to _write_disaggregated_chunk_to_datastore()
Will have a default implementation in super class.
Can be overridden for more simple in-memory disaggregation,
or more complex out-of-core disaggregation.
Parameters
----------
mains : nilmtk.ElecMeter (single-phase) or nilmtk.MeterGroup (multi-phase)
output_datastore : instance of nilmtk.DataStore or str of datastore location
"""
dis_main = pd.DataFrame()
chunk_number = 0
for chunk in mains.power_series():
dis_chunk = self.disaggregate_chunk(
pd.DataFrame(chunk.resample(self.sample_period, how=self.sampling_method)))
dis_main = pd.concat([dis_main, dis_chunk])
chunk_number += 1
print str(chunk_number) + " chunks disaggregated"
# Saving output datastore:
output_datastore.append(key=mains.key, value=dis_main)
def disaggregate_chunk(self, chunk):
"""
Checks units.
Disaggregates "chunk" with MaximumLikelihood algorithm.
Optimization:
Filters events with powerNoise.
Filters paired-events with powerPair.
Windowing with timeWindow for speeding up.
Parameters
----------
chunk : pd.DataFrame (in NILMTK format)
Returns
-------
chunk : pd.DataFrame where each column represents a disaggregated appliance
Notes
-----
* Disaggregation is not prooved. (TODO: verify the process with the Groundtruth)
* Disaggregates only the selected appliance.(TODO: Disaggregates many)
"""
# An resistive element has active power equal to apparent power.
# Checking power units.
units = self.__physical_quantity()
# EVENTS OUT OF THE CHUNK:
# Delta values:
column_name = 'diff_' + units[1]
chunk[column_name] = chunk.loc[:, units].diff()
# Filter the noise.
chunk['onpower'] = (chunk[column_name] > self.powerNoise)
chunk['offpower'] = (chunk[column_name] < -self.powerNoise)
events = chunk[(chunk.onpower == True) | (chunk.offpower == True)]
detection_list = []
singleOnevent = 0
# Max Likelihood algorithm (optimized):
for onevent in events[events.onpower == True].iterrows():
# onTime = onevent[0]
# deltaOn = onevent[1][1]
# windowning:
offevents = events[(events.offpower == True) & (events.index > onevent[0]) & (
events.index < onevent[0] + timedelta(seconds=self.timeWindow))]
# Filter paired events:
offevents = offevents[
abs(onevent[1][1] - offevents[column_name].abs()) < self.powerPair]
# Max likelihood computation:
if not offevents.empty:
# pon = self.__pdf(self.onpower, onevent[1][1])
for offevent in offevents.iterrows():
# offTime = offevent[0]
# deltaOff = offevent[1][1]
# poff = self.__pdf(self.offpower, offevent[1][1])
# duration = offevent[0] - onTime
# pduration = self.__pdf(self.duration, (offevent[0] - onTime).total_seconds())
likelihood = self.__pdf(self.onpower, onevent[1][1]) * \
self.__pdf(self.offpower, offevent[1][1]) * \
self.__pdf(self.duration, (offevent[0] - \
onevent[0]).total_seconds())
detection_list.append(
{'likelihood': likelihood, 'onTime': onevent[0],
'offTime': offevent[0], 'deltaOn': onevent[1][1]})
else:
singleOnevent += 1
# Passing detections to a pandas.DataFrame
detections = pd.DataFrame(
columns=('onTime', 'offTime', 'likelihood', 'deltaOn'))
for i in range(len(detection_list)):
detections.loc[i] = [detection_list[i]['onTime'], detection_list[i][
'offTime'], detection_list[i]['likelihood'], detection_list[i]['deltaOn']]
detections = detections[detections.likelihood >= self.thLikelihood]
# Constructing dis_chunk (power of disaggregated appliance)
dis_chunk = pd.DataFrame(
index=chunk.index, columns=[str(units[0]) + '_' + str(units[1])])
dis_chunk.fillna(0, inplace=True)
# Ruling out overlapped detecttions ordering by likelihood value.
detections = detections.sort('likelihood', ascending=False)
for row in detections.iterrows():
# onTime = row[1][0] offTime = row[1][1] deltaOn = row[1][3]
if ((dis_chunk[(dis_chunk.index >= row[1][0]) and
(dis_chunk.index < row[1][1])].sum().values[0]) == 0):
# delta = chunk[chunk.index == onTime][column_name].values[0]
dis_chunk[(dis_chunk.index >= row[1][0]) & (
dis_chunk.index < row[1][1])] = row[1][3]
# Stat information:
print str(len(events)) + " events found."
print str(len(events[events.onpower == True])) + " onEvents found"
print str(singleOnevent) + " onEvents no paired."
return dis_chunk
def no_overfitting(self):
"""
Crops feature_train(onpower_train, offpower_train and duration_train)
to get same samples from different appliances(same model-appliance)
and avoids overfittings to a many samples appliance.
Updates stats attribute.
Does the retraining.
"""
# Instance with minimun length should be the maximum length
train_len = []
[train_len.append(st['Nevents']) for st in self.stats]
train_len = np.array(train_len)
max_len = train_len[train_len != 0].min()
# CROPS FEATURE SAMPLES
onpower_train = pd.DataFrame()
offpower_train = pd.DataFrame()
duration_train = pd.DataFrame()
start = 0
end = 0
for ind in np.arange(len(self.stats)):
if self.stats[ind]['Nevents'] != 0:
if ind == 0:
start = 0
else:
start = end
end += self.stats[ind]['Nevents']
aux = self.onpower_train[start:end]
aux = aux[:max_len]
onpower_train = pd.concat([onpower_train, aux])
aux = self.offpower_train[start:end]
aux = aux[:max_len]
offpower_train = pd.concat([offpower_train, aux])
aux = self.duration_train[start:end]
aux = aux[:max_len]
duration_train = pd.concat([duration_train, aux])
# udating stats:
self.stats[ind]['Nevents'] = max_len
self.onpower_train = onpower_train
self.offpower_train = offpower_train
self.duration_train = duration_train
# RE-TRAINS FEATURES:
self.__retrain(self.onpower, self.onpower_train)
self.__retrain(self.offpower, self.offpower_train)
self.__retrain(self.duration, self.duration_train)
def check_cdfIntegrity(self, step):
"""
Cheks integrity of feature model distributions.
CDF has to be bounded by one.
Parameters
----------
step: resolution step size on the x-axis for pdf and cdf functions.
"""
# Selecting bins automatically:
x_max = self.onpower_train.max().values[0]
x_min = 0
step = 1
x_onpower = np.arange(x_min, x_max, step).reshape(-1, 1)
x_max = 0
x_min = self.offpower_train.min().values[0]
step = 1
x_offpower = np.arange(x_min, x_max, step).reshape(-1, 1)
x_max = self.duration_train.max().values[0]
x_min = 0
step = 1
x_duration = np.arange(x_min, x_max, step).reshape(-1, 1)
# Evaluating score for:
# Onpower
y_onpower = self.__pdf2(self.onpower, x_onpower)
print "Onpower cdf: " + str(y_onpower.sum())
# Offpower
y_offpower = self.__pdf2(self.offpower, x_offpower)
print "Offpower cdf: " + str(y_offpower.sum())
# duration
y_duration = self.__pdf2(self.duration, x_duration)
print "Duration cdf: " + str(y_duration.sum())
# Plots:
# fig1 = plt.figure()
# ax1 = fig1.add_subplot(311)
# ax2 = fig1.add_subplot(312)
# ax3 = fig1.add_subplot(313)
# ax1.plot(x_onpower, y_onpower)
# ax1.set_title("PDF CDF: Onpower")
# ax1.set_ylabel("density")
# ax1.set_xlabel("Watts")
# ax2.plot(x_offpower, y_offpower)
# ax2.set_title(" PDF CDF: Offpower")
# ax2.set_ylabel("denisty")
# ax2.set_xlabel("Watts")
# ax3.plot(x_duration, y_duration)
# ax3.set_title("PDF CDF: Duration")
# ax3.set_ylabel("density")
# ax3.set_xlabel("Seconds")
def featuresHist(self, **kwargs):
"""
Visualization tool to check if feature model distributions fit
to samples for feature training (onpower_train, offpower_train
and duration_train)
Parameters
----------
kwargs : keyword arguments list with bins_onpower, bins_offpower and bin_duration.
bins_feature: numpy.arange for plotting the hist with specified bin sizes.
"""
# Selecting bins automatically:
bins_onpower = np.arange(self.onpower_train.min().values[0],
self.onpower_train.max().values[0],
(self.onpower_train.max().values[0] -
self.onpower_train.min().values[0]) / 50)
bins_offpower = np.arange(self.offpower_train.min().values[0],
self.offpower_train.max().values[0],
(self.offpower_train.max().values[0] -
self.offpower_train.min().values[0]) / 50)
bins_duration = np.arange(self.duration_train.min().values[0],
self.duration_train.max().values[0],
(self.duration_train.max().values[0] -
self.duration_train.min().values[0]) / 50)
# If a bin has been specified update the bin sizes.
for key in kwargs:
if key == 'bins_onpower':
bins_onpower = kwargs[key]
elif key == 'bins_offpower':
bins_offpower = kwargs[key]
elif key == 'bins_duration':
bins_duration = kwargs[key]
else:
print "Non valid kwarg"
# Plot structure:
fig = plt.figure()
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
# Evaluating score for:
# Onpower
x = np.arange(bins_onpower.min(), bins_onpower.max() + \
np.diff(bins_onpower)[0], np.diff(bins_onpower)[0] / float(1000)).reshape(-1, 1)
y = self.__pdf2(self.onpower, x)
norm = pd.cut(
self.onpower_train.onpower, bins=bins_onpower).value_counts().max() / max(y)
# Plots for Onpower
ax1.hist(
self.onpower_train.onpower.values, bins=bins_onpower, alpha=0.5)
ax1.plot(x, y * norm)
ax1.set_title("Feature: Onpower")
ax1.set_ylabel("Counts")
ax1.set_xlabel("Watts")
# Offpower
x = np.arange(bins_offpower.min(), bins_offpower.max() + \
np.diff(bins_offpower)[0], np.diff(bins_offpower)[0] / float(1000)).reshape(-1, 1)
y = self.__pdf2(self.offpower, x)
norm = pd.cut(self.offpower_train.offpower,
bins=bins_offpower).value_counts().max() / max(y)
# Plots for Offpower
ax2.hist(self.offpower_train.offpower.values,
bins=bins_offpower, alpha=0.5)
ax2.plot(x, y * norm)
ax2.set_title("Feature: Offpower")
ax2.set_ylabel("Counts")
ax2.set_xlabel("Watts")
# Duration
x = np.arange(bins_duration.min(), bins_duration.max() + \
np.diff(bins_duration)[0], np.diff(bins_duration)[0] / float(1000)).reshape(-1, 1)
y = self.__pdf2(self.duration, x)
norm = pd.cut(self.duration_train.duration,
bins=bins_duration).value_counts().max() / max(y)
# Plots for duration
ax3.hist(self.duration_train.duration.values,
bins=bins_duration, alpha=0.5)
ax3.plot(x, y * norm)
ax3.set_title("Feature: Duration")
ax3.set_ylabel("Counts")
ax3.set_xlabel("Seconds")
def featuresHist_colors(self, **kwargs):
"""
Visualization tool to check if samples for feature training
(onpower_train, offpower_train and duration_train) are equal
for each appliance (same model appliance).
Each appliance represented by a different color.
Parameters
----------
kwargs : keyword arguments list with bins_onpower, bins_offpower and bin_duration.
bins_feature: numpy.arange for plotting the hist with specified bin sizes.
"""
# Selecting bins automatically:
bins_onpower = np.arange(self.onpower_train.min().values[0],
self.onpower_train.max().values[0],
(self.onpower_train.max().values[0] -
self.onpower_train.min().values[0]) / 50)
bins_offpower = np.arange(self.offpower_train.min().values[0],
self.offpower_train.max().values[0],
(self.offpower_train.max().values[0] -
self.offpower_train.min().values[0]) / 50)
bins_duration = np.arange(self.duration_train.min().values[0],
self.duration_train.max().values[0],
(self.duration_train.max().values[0] -
self.duration_train.min().values[0]) / 50)
# If a bin has been specified update the bin sizes.
# Updating bins with specified values.
for key in kwargs:
if key == 'bins_onpower':
bins_onpower = kwargs[key]
elif key == 'bins_offpower':
bins_offpower = kwargs[key]
elif key == 'bins_duration':
bins_duration = kwargs[key]
else:
print "Non valid kwarg"
# Plot:
fig1 = plt.figure()
ax1 = fig1.add_subplot(311)
ax2 = fig1.add_subplot(312)
ax3 = fig1.add_subplot(313)
start = 0
end = 0
for ind in np.arange(len(self.stats)):
if self.stats[ind]['Nevents'] != 0:
if ind == 0:
start = 0
else:
start = end
end += self.stats[ind]['Nevents']
ax1.hist(
self.onpower_train[start:end].onpower.values, bins=bins_onpower, alpha=0.5)
ax2.hist(
self.offpower_train[start:end].offpower.values, bins=bins_offpower, alpha=0.5)
ax3.hist(
self.duration_train[start:end].duration.values, bins=bins_duration, alpha=0.5)
ax1.set_title("Feature: Onpower")
ax1.set_xlabel("Watts")
ax1.set_ylabel("Counts")
ax2.set_title("Feature: Offpower")
ax2.set_xlabel("Watts")
ax2.set_ylabel("Counts")
ax3.set_title("Feature: Duration")
ax3.set_xlabel("Seconds")
ax3.set_ylabel("Counts")
| apache-2.0 |
Chandra-MARX/marx-test | tests/positions.py | 1 | 21371 | '''
In every |marx| simulation, one or more sources are placed at some sky position.
|marx| simulates photons coming from that position, traces them through the
mirror and gratings and finally places them on the chip. With a known
aspect solution, chip coordinates can then be transformed back to sky
coordinates. In general, this will not recover the exact sky position where a
photon started out. A big part of that is scatter in the mirrors, which blurs
the image (see :ref:`sect-tests.PSF` for tests of the PSF).
However, with a large number of photons, we can fit the average position which
should be close to the real sky position.
In real observations, other factors contribute, such as the finite
resolution of the detectors (|marx| usually takes that into account, but it can
be switched of through the ``--pixadj="EXACT"`` switch in :marxtool:`marx2fits`)
and the uncertainty of the aspect solution.
Within a single observation, positions will be less certain for fainter sources
(due to Poisson statistics) and for sources at a larger off-axis angles (due to the
larger PSF).
'''
import shutil
import subprocess
import os
from collections import OrderedDict
from marxtest import base
from marxtest.process_utils import marxpars_from_asol
title = 'Coordinates on the sky and the chip'
tests = ['ONC', 'RegularGrid', 'RegularGridHRCI']
class ONC(base.MarxTest):
'''The `Orion Nebula Cluster (ONC) <http://simbad.u-strasbg.fr/simbad/sim-basic?Ident=onc>`_
is a dense star forming region with about 1600 X-ray sources observed
in the COUP survey by
`Getman et al (2005) <http://adsabs.harvard.edu/abs/2005ApJS..160..319G>`_ .
We simulate this field with |marx| and then run a source detection to check
how well we recover the input coordinates. This will depend on the number
of counts detected and the position in the field.
To simplify the simulation input, we assume that all sources have flat
lightcurves and are
monoenergetic at the observed mean energy (the energy matters because
the effective area is energy dependent and so is the PSF).
We write a short C code that reads an input coordiante list and generates
the photons in this manner. We compile
the code, and call it as a :ref:`sect-usersource`.
'''
title = 'Chandra Orion Ultradeep project'
obsid = 3744
figures = OrderedDict([('ds9', {'alternative': '',
'caption': '`ds9`_ image of the observed data (left) and simulation (right). The sources detected in the simulation are overlayed. There are few cases where the read-out streak is identified as source or where two close sources are detected as one larger resolved source. The COUP catalog used as input is based on much longer merged observations and has been checked against optical and IR observations to remove such spurious detections.'}),
('dist', {'alternative': 'Scatter plot with distance from aimpoint vs coordinate error in the fit.',
'caption': 'Apart from a few outliers close to the aimpoint (mostly confused sources, see above), the distribution of coordinate errors follows spreads out with increasing distance, i.e. size of the PSF.'})
])
summary='For this field, we know the true input coordinates so we can check how well |marx| reproduces those. In the center of the field (about one armin) the coordiante error is less than the size of an ACIS pixel for all sources and the average error never grows much beyond 1 ACIS pixel even for far off-axis source. The upper envelope of the distribution of errors is approximate linear and reaches 1 arcsec at a distance of 200 arcsec. No strong correlation of coordiante error and count rate of the source is apparent, indicating that the dominant error is not just due to Poisson counting statistics.'
@base.Python
def step_2(self):
'''Make input coordinate table
Coordinates are relative to pointing direction in arcmin'''
import os
from astropy.table import Table
from astropy.io import fits
asolfile = self.get_data_file('asol')
asol = fits.getheader(asolfile, 1)
coup = Table.read(os.path.join(self.pkg_data, 'COUP.tsv'),
format='ascii.fast_tab')
tab = Table()
tab['RA'] = (coup['RAJ2000'] - asol['RA_NOM']) * 60
tab['DEC'] = (coup['DEJ2000'] - asol['DEC_NOM']) * 60
tab['weight'] = 10**(coup['Lt'] - 27)
tab['energy'] = coup['<E>']
tab.write('coup.marxin', format='ascii.no_header', overwrite=True)
@base.CCode
def step_5(self):
'''C code for a grid of sources.
(``user.h`` and ``jdmath.h`` are shipped with |marx|.)'''
ccode=r'''
#include <stdio.h>
#include <stdlib.h>
#include <jdmath.h>
#include "user.h"
/* This user source implements many point sources via a file that
* specifies the source positions and energies. The current implementation
* assumes the format:
* RA Dec weight energy
* Here RA, Dec specifiy the source position, weight specifies the strength
* of the source in relation to the others.
*/
typedef struct
{
double cosx, cosy, cosz;
double weight;
double energy;
}
Point_Source_Type;
static unsigned int Num_Points;
static Point_Source_Type *Point_Sources;
static unsigned int Max_Num_Points;
static char *do_realloc (char *p, unsigned int len)
{
if (p == NULL)
p = malloc (len);
else
p = realloc (p, len);
if (p == NULL)
fprintf (stderr, "Not enough memory\n");
return p;
}
static void free_sources (void)
{
if (Point_Sources == NULL)
return;
free ((char *) Point_Sources);
Point_Sources = NULL;
}
static int add_source (double ra, double dec, double weight, double energy)
{
Point_Source_Type *p;
double cosx, cosy, cosz;
/* Convert to God's units from arc-min */
ra = ra * (PI/(180.0 * 60.0));
dec = dec * (PI/(180.0 * 60.0));
if (Max_Num_Points == Num_Points)
{
Max_Num_Points += 32;
p = (Point_Source_Type *)do_realloc ((char *)Point_Sources, Max_Num_Points * sizeof (Point_Source_Type));
if (p == NULL)
{
free_sources ();
return -1;
}
Point_Sources = p;
}
p = Point_Sources + Num_Points;
/* Note the the minus sign is to generate a vector pointing from the
* source to the origin
*/
p->cosx = -cos (dec) * cos (ra);
p->cosy = -cos (dec) * sin(ra);
p->cosz = -sin (dec);
p->weight = weight;
p->energy = energy;
Num_Points += 1;
return 0;
}
static void normalize_sources (void)
{
double total;
unsigned int i;
total = 0;
for (i = 0; i < Num_Points; i++)
{
Point_Sources[i].weight += total;
total = Point_Sources[i].weight;
}
for (i = 0; i < Num_Points; i++)
Point_Sources[i].weight /= total;
/* Make sure no round-off error affects the weight of the last point */
Point_Sources[Num_Points - 1].weight = 1.0;
}
int user_open_source (char **argv, int argc, double area,
double cosx, double cosy, double cosz)
{
FILE *fp;
char line[1024];
char *file;
unsigned int linenum;
file = argv[0];
if (file == NULL)
{
fprintf (stderr, "UserSource Model requires FILE as argument\n");
return -1;
}
fp = fopen (file, "r");
if (fp == NULL)
{
fprintf (stderr, "Unable to open %s\n", file);
return -1;
}
linenum = 0;
while (NULL != fgets (line, sizeof (line), fp))
{
double ra, dec, weight, energy;
linenum++;
if (4 != sscanf (line, "%lf %lf %lf %lf", &ra, &dec, &weight, &energy))
continue;
if (weight <= 0.0)
{
fprintf (stderr, "weight on line %d of %s must be positive\n",
linenum, file);
free_sources ();
return -1;
}
if (-1 == add_source (ra, dec, weight, energy))
{
fclose (fp);
return -1;
}
}
fclose (fp);
if (Num_Points == 0)
{
fprintf (stderr, "%s contains no sources\n", file);
return -1;
}
normalize_sources ();
return 0;
}
void user_close_source (void)
{
free_sources ();
}
int user_create_ray (double *delta_t, double *energy,
double *cosx, double *cosy, double *cosz)
{
double r;
Point_Source_Type *p;
p = Point_Sources;
r = JDMrandom ();
while (r > p->weight)
p++;
*delta_t = -1.0;
*energy = p->energy;
*cosx = p->cosx;
*cosy = p->cosy;
*cosz = p->cosz;
return 0;
}
int main (int a, char **b)
{
(void) a;
(void) b;
return 1;
}
'''
return 'pnts.c', ccode
@base.Python
def step_6(self):
'''compile USER code
|marx| ships with a few examples of user sources. We pick one
of them, copy them to the right directory and compile it with gcc.
'''
marxpath = self.conf.get('marx', 'path')
src = os.path.join(marxpath,
'share', 'doc', 'marx', 'examples', 'user-source')
shutil.copy(os.path.join(src, 'user.h'),
os.path.join(self.basepath, 'user.h'))
jdmath_h = os.path.join(marxpath, 'include')
jdmath_a = os.path.join(marxpath, 'lib', 'libjdmath.a')
subprocess.call(['gcc',
'-shared', 'pnts.c', '-o', 'pnts.so', '-fPIC',
'-I' + jdmath_h, jdmath_a])
@base.Shell
def step_7(self):
'''Unzip fits file.
MARX cannot read zipped fits files, so we need to unzip the .fits.gz asol
files that we downloaded from the archive. On the other hand, `CIAO`_
tools work on both zipped or unzipped files, so there is no need to
unzip all of them, just the files that MARX reads as input.
'''
asol = self.get_data_file('asol')
return [f'gunzip -f {asol}']
@base.Marx
def step_8(self):
'''run marx USER source matching observation'''
asol = self.get_data_file('asol')
evt = self.get_data_file('evt2')
pars = marxpars_from_asol(self.conf, asol, evt)
pars['OutputDir'] = 'COUP'
pars['SourceType'] = 'USER'
pars['UserSourceFile'] = os.path.join(self.basepath, 'pnts.so')
pars['UserSourceArgs'] = os.path.join(self.basepath, 'coup.marxin')
return pars
@base.Marx2fits
def step_9(self):
'turn into fits file'
return '--pixadj=EDSER', 'COUP', 'COUP.fits'
@base.Ciao
def step_10(self):
'''ds9 image of the PSF
In the observation, the brightest sources are piled-up. We don't bother
simulating this here, so we just set the scaling limits to bring out
the fainter details and ignore the bright peaks.
'''
return ['''ds9 -log -cmap heat {0} COUP.fits -scale limits 0 2000 -frame 1 -regions command 'text 5:35:15 -5:22:09 # text=Observation font="helvetica 24"' -frame 2 -regions command 'text 5:35:15 -5:22:09 # text=MARX font="helvetica 24"' -region load src.fits -saveimage {1} -exit'''.format(self.get_data_file("evt2"), self.figpath(list(self.figures.keys())[0]))]
@base.Ciao
def step_11(self):
'''Source detection'''
out = ['dmcopy "COUP.fits[EVENTS][bin x=2500:5500:2,y=2500:5500:2]" im.fits option=image clobber=yes',
'mkpsfmap im.fits psf.map 1.4 ecf=0.5',
'celldetect im.fits src.fits psffile=psf.map clobber=yes'
]
return out
@base.Python
def step_15(self):
'''Check position of detected sources'''
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy.io import fits
src = Table.read('src.fits')
srcin = Table.read(os.path.join(self.pkg_data, 'COUP.tsv'),
format='ascii.fast_tab')
src_co = SkyCoord(src['RA'], src['DEC'], unit='deg')
srcin_co = SkyCoord(srcin['RAJ2000'], srcin['DEJ2000'], unit='deg')
idx, d2d, d3d = src_co.match_to_catalog_sky(srcin_co)
asolfile = self.get_data_file('asol')
asol = fits.getheader(asolfile, 1)
cen = SkyCoord(asol['RA_NOM'], asol['DEC_NOM'], unit='deg')
d = cen.separation(src_co).arcsec
fig = plt.figure()
ax1 = plt.subplot(111)
scat1 = ax1.scatter(d, d2d.arcsec, c=np.log10(src['NET_COUNTS']), lw=1)
ax1.set_xlabel('distance from aimpoint [arcsec]')
ax1.set_ylabel('coordinate error [arcsec]')
ax1.set_xlim([0, 350])
ax1.set_ylim([0, 2])
cbar1 = fig.colorbar(scat1, ax=ax1)
cbar1.set_label('log(net counts per source)')
fig.savefig(self.figpath(list(self.figures.keys())[1]))
class RegularGrid(base.MarxTest):
'''In this example we place a radial grid of sources on the sky. Each source
emits an equal number of photons (exactly, no Poisson statistics) so that
we can compare the accuracy of the position we recover. Note that the
*detected* number of photons will be smaller for off-axis photons because
of vignetting!
We write a short C code that generates the photons in this manner, compile
it, and call is as a :ref:`sect-usersource`.
'''
DetectorType = 'ACIS-I'
title = 'Regular Grid (ACIS)'
figures = OrderedDict([('ds9', {'alternative': 'Sources positioned like knots in a spider web.',
'caption': '`ds9`_ image of the simulation. The size of the PSF increases further away from the aimpoint.'}),
('hist', {'alternative': 'Plot is described in the caption.',
'caption': '*left*: The error in the position (measured radially to the optical axis) increases with the distance to the optical axis. One part of this is just that the effective area and thus the number of counts decreases. There is also a systematic trend where sources at larger off-acis angle are systematically fitted too close to the center. Further investigation is necessary to check if this is a problem of |marx| related or :ciao:`celldetect`. In any case, the typical offset is below 0.2 arcsec, which is less then half a pixel in ACIS. *right*: Difference in position angle between input and fit. (Outliers beyond the plot range are not shown.)'})
])
summary = 'The input position is typically recovered to much better than one pixel for sources with a few hundred counts. There is a small systematic trend that needs to be studied further.'
@base.CCode
def step_5(self):
'''C code for a grid of sources.
(``user.h`` is shipped with |marx|.)'''
ccode='''
#include <stdio.h>
#include <math.h>
#include "user.h"
static double Source_CosX;
static double Source_CosY;
static double Source_CosZ;
int user_open_source (char **argv, int argc, double area,
double cosx, double cosy, double cosz)
{
return 0;
}
void user_close_source (void)
{
}
static double To_Radians = (M_PI / 180.0 / 3600.0);
#define ARC_SECONDS_PER_CELL 50
#define ANGULAR_STEPS 16
int user_create_ray (double *delta_t, double *energy,
double *cosx, double *cosy, double *cosz)
{
static int last_i = 0;
static int last_j = 0;
double theta, phi;
double cos_theta, sin_theta;
if (last_j == ANGULAR_STEPS){
last_j = 0;
last_i++;
}
if (last_i == 20) last_i = 0;
theta = To_Radians * last_i * ARC_SECONDS_PER_CELL;
phi = (10. /180 * M_PI) + last_j * 2 * M_PI / ANGULAR_STEPS;
sin_theta = sin(theta);
*cosx = -cos (theta);
*cosy = sin_theta * cos (phi);
*cosz = sin_theta * sin (phi);
*delta_t = -1.0;
*energy = -1.0;
if (last_i ==0){
last_i++;
}
else {
last_j++;
}
return 0;
}
int main (int a, char **b)
{
(void) a;
(void) b;
return 1;
}'''
return 'radialgrid.c', ccode
@base.Python
def step_6(self):
'''compile USER code'''
marxpath = self.conf.get('marx', 'path')
src = os.path.join(marxpath, 'share', 'doc', 'marx', 'examples',
'user-source', 'user.h')
shutil.copy(os.path.join(src),
os.path.join(self.basepath, 'user.h'))
subprocess.call(['gcc', '-lm', '-fPIC',
'-shared', 'radialgrid.c', '-o', 'radialgrid.so'])
@base.Marx
def step_7(self):
'''run USER source'''
return {'SourceType': 'USER', 'OutputDir': 'points',
'GratingType': 'NONE',
'SourceRA': 90., 'SourceDEC': 0.,
'RA_Nom': 90., 'Dec_Nom': 0, 'Roll_Nom': 0,
'DetectorType': self.DetectorType,
'UserSourceFile': os.path.join(self.basepath, 'radialgrid.so'),
'NumRays': -100000, 'ExposureTime': 0}
@base.Marx2fits
def step_8(self):
'turn into fits file'
return '--pixadj=EDSER', 'points', 'points.fits'
@base.Ciao
def step_10(self):
'''ds9 image of the PSF'''
return ['''ds9 -width 500 -height 500 -log -cmap heat points.fits -pan to 4097 4097 physical -zoom 0.5 -bin factor 2 -grid -saveimage {0} -exit'''.format(self.figpath(list(self.figures.keys())[0]))]
@base.Ciao
def step_11(self):
'''Source detection'''
out = ['dmcopy "points.fits[EVENTS][bin x=3000:5100:2,y=3000:5100:2]" im.fits option=image clobber=yes',
'mkpsfmap im.fits psf.map 1.4 ecf=0.5 clobber=yes',
'celldetect im.fits src.fits psffile=psf.map clobber=yes'
]
return out
@base.Python
def step_15(self):
'''Check position of detected sources'''
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
from astropy.coordinates import SkyCoord
src = Table.read('src.fits')
# Find distance from input position.
src['RA_INPUT'] = src['RA'] - (src['RA'] // (360./16.)) * (360./16.) - 10.
# Problem: Might expect source at 1.0,
# but measure at 0.99. In this case distance to next lower source
# is 0.99. Thus shift input by 0.005 (about 50 arcsec / 2)
# before integer devision
src['DEC_INPUT'] = src['DEC'] - ((0.005 + src['DEC']) // (50./3600.)) * (50./3600.)
cen = SkyCoord(90., 0, unit='deg')
det = SkyCoord(src['RA'], src['DEC'], unit='deg')
d = cen.separation(det).arcsec
d_err = np.mod(d + 10, 50.) - 10
ang = cen.position_angle(det).degree
# Subtract offset that we placed in the C code to avoid 0./360. ambiguity
# Step width is 360./16 = 22.5 deg
# Offset is 10 deg. Complement we find here is 12.5 deg.
ang = ang - 12.5
ang_err = np.mod(ang + 2, 360. / 16.) - 2
ind = d > 10
fig = plt.figure(figsize=(8, 4))
ax1 = plt.subplot(121)
scat1 = ax1.scatter(d, d_err, c=src['NET_COUNTS'], lw=1)
ax1.set_xlabel('distance [arcsec]')
ax1.set_ylabel('distance error [arcsec]')
ax1.set_xlim([-10, 620])
ax1.set_ylim([-1, 0.5])
ax2 = plt.subplot(122)
scat2 = ax2.scatter(ang, ang_err, c=src['NET_COUNTS'], lw=1)
ax2.set_xlabel('pos ang [deg]')
ax2.set_ylabel('pos ang error [deg]')
ax2.set_xlim([-5, 350])
ax2.set_ylim([-0.3, 0.3])
cbar2 = fig.colorbar(scat2, ax=ax2)
cbar2.set_label('net counts per source')
fig.savefig(self.figpath(list(self.figures.keys())[1]))
class RegularGridHRCI(RegularGrid):
'''Same as above, but with HRC-I as a detector.
The field-of-view for the HRC-I is larger for than for ACIS-I, but the PSF becomes
very large at large off-axis angles and thus the positional uncertainty
will be so large that a comparison to |marx| is no longer helpful to test
the accuracy of the |marx| simulations.
'''
figures = OrderedDict([('ds9', {'alternative': 'Sources positioned like knots in a spider web. The image is very similar to the previous ACIS example.',
'caption': '`ds9`_ image of the simulation. The size of the PSF increases further away from the aimpoint.'}),
('hist', {'alternative': 'Plot is described in the caption.',
'caption': 'See previous example. The same trends are visible with a slightly larger scatter.'})
])
summary = 'In the central few arcmin the input position is typically recovered to better than 0.2 pixels for sources with a few hundred counts.'
DetectorType = 'HRC-I'
title = 'Regular grid (HRC)'
@base.Ciao
def step_10(self):
'''ds9 image of the PSF'''
return ['''ds9 -width 500 -height 500 -log -cmap heat points.fits -pan to 16392 16392 physical -bin factor 16 -grid -saveimage {0} -exit'''.format(self.figpath(list(self.figures.keys())[0]))]
@base.Ciao
def step_11(self):
'''Source detection'''
out = ['dmcopy "points.fits[EVENTS][bin x=8500:24500:8,y=8500:24500:8]" im.fits option=image clobber=yes',
'mkpsfmap im.fits psf.map 1.4 ecf=0.5 clobber=yes',
'celldetect im.fits src.fits psffile=psf.map clobber=yes'
]
return out
| gpl-2.0 |
jcrudy/glm-sklearn | glmsklearn/glm.py | 1 | 9972 |
import statsmodels.api
import statsmodels.genmod.families.family
import numpy as np
from sklearn.metrics import r2_score
class GLM(object):
'''
A scikit-learn style wrapper for statsmodels.api.GLM. The purpose of this class is to
make generalized linear models compatible with scikit-learn's Pipeline objects.
family : instance of subclass of statsmodels.genmod.families.family.Family
The family argument determines the distribution family to use for GLM fitting.
xlabels : iterable of strings, optional (empty by default)
The xlabels argument can be used to assign names to data columns. This argument is not
generally needed, as names can be captured automatically from most standard data
structures. If included, must have length n, where n is the number of features. Note
that column order is used to compute term values and make predictions, not column names.
'''
def __init__(self, family, add_constant=True):
self.family = family
self.add_constant = add_constant
def _scrub_x(self, X, offset, exposure, **kwargs):
'''
Sanitize input predictors and extract column names if appropriate.
'''
no_labels = False
if 'xlabels' not in kwargs and 'xlabels' not in self.__dict__:
#Try to get xlabels from input data (for example, if X is a pandas DataFrame)
try:
self.xlabels = list(X.columns)
except AttributeError:
try:
self.xlabels = list(X.design_info.column_names)
except AttributeError:
try:
self.xlabels = list(X.dtype.names)
except TypeError:
no_labels = True
elif 'xlabels' not in self.__dict__:
self.xlabels = kwargs['xlabels']
#Convert to internally used data type
X = np.asarray(X,dtype=np.float64)
m,n = X.shape
if offset is not None:
offset = np.asarray(offset,dtype=np.float64)
offset = offset.reshape(offset.shape[0])
if exposure is not None:
exposure = np.asarray(exposure,dtype=np.float64)
exposure = exposure.reshape(exposure.shape[0])
#Make up labels if none were found
if no_labels:
self.xlabels = ['x'+str(i) for i in range(n)]
return X, offset, exposure
def _scrub(self, X, y, offset, exposure, **kwargs):
'''
Sanitize input data.
'''
#Check whether X is the output of patsy.dmatrices
if y is None and type(X) is tuple:
y, X = X
#Handle X separately
X, offset, exposure = self._scrub_x(X, offset, exposure, **kwargs)
#Convert y to internally used data type
y = np.asarray(y,dtype=np.float64)
y = y.reshape(y.shape[0])
#Make sure dimensions match
if y.shape[0] != X.shape[0]:
raise ValueError('X and y do not have compatible dimensions.')
return X, y, offset, exposure
def fit(self, X, y = None, offset = None, exposure = None, xlabels = None):
'''
Fit a GLM model to the input data X and y.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples and n is the number of features
The training predictors. The X parameter can be a numpy array, a pandas DataFrame, a patsy
DesignMatrix, or a tuple of patsy DesignMatrix objects as output by patsy.dmatrices.
y : array-like, optional (default=None), shape = [m] where m is the number of samples
The training response. The y parameter can be a numpy array, a pandas DataFrame with one
column, a Patsy DesignMatrix, or can be left as None (default) if X was the output of a
call to patsy.dmatrices (in which case, X contains the response).
xlabels : iterable of strings, optional (default=None)
Convenient way to set the xlabels parameter while calling fit. Ignored if None (default).
See the GLM class for an explanation of the xlabels parameter.
'''
#Format and label the data
if xlabels is not None:
self.set_params(xlabels=xlabels)
X, y, offset, exposure = self._scrub(X,y,offset,exposure,**self.__dict__)
#Add a constant column
if self.add_constant:
X = statsmodels.api.add_constant(X, prepend=True)
#Do the actual work
model = statsmodels.api.GLM(y, X, self.family, offset=offset, exposure=exposure)
result = model.fit()
self.coef_ = result.params
return self
def predict(self, X, offset = None, exposure = None):
'''
Predict the response based on the input data X.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples and n is the number of features
The training predictors. The X parameter can be a numpy array, a pandas DataFrame, or a
patsy DesignMatrix.
'''
#Format the data
X, offset, exposure = self._scrub_x(X, offset, exposure)
#Linear transformation
eta = self.transform(X, offset, exposure)
#Nonlinear transformation
y_hat = self.family.fitted(eta)
return y_hat
def transform(self, X, offset = None, exposure = None):
'''
Perform a linear transformation of X.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples and n is the number of features
The training predictors. The X parameter can be a numpy array, a pandas DataFrame, or a
patsy DesignMatrix.
'''
#Format the data
X, offset, exposure = self._scrub_x(X, offset, exposure)
#Add a constant column
if self.add_constant:
X = statsmodels.api.add_constant(X, prepend=True)
#Compute linear combination
eta = np.dot(X,self.coef_)
#Apply offset and exposure
if offset is not None:
eta += offset
if exposure is not None:
eta += np.log(exposure)
return eta
def score(self, X, y = None, offset = None, exposure = None, xlabels = None):
X, y, offset, exposure = self._scrub(X,y,offset,exposure,**self.__dict__)
y_pred = self.predict(X, offset=offset, exposure=exposure)
return r2_score(y, y_pred)
def get_params(self, deep = False):
return {}
def __repr__(self):
return self.__class__.__name__ + '()'
def __str__(self):
return self.__class__.__name__ + '()'
class GLMFamily(GLM):
family = NotImplemented
def __init__(self, add_constant=True):
super(GLMFamily,self).__init__(family=self.__class__.family(), add_constant=add_constant)
class BinomialRegressor(GLMFamily):
family = statsmodels.genmod.families.family.Binomial
class GammaRegressor(GLMFamily):
family = statsmodels.genmod.families.family.Gamma
class GaussianRegressor(GLMFamily):
family = statsmodels.genmod.families.family.Gaussian
class InverseGaussianRegressor(GLMFamily):
family = statsmodels.genmod.families.family.InverseGaussian
class NegativeBinomialRegressor(GLMFamily):
family = statsmodels.genmod.families.family.NegativeBinomial
class PoissonRegressor(GLMFamily):
family = statsmodels.genmod.families.family.Poisson
# def fit(self, X, y = None, exposure = None, xlabels = None):
# '''
# Fit a GLM model to the input data X and y.
#
#
# Parameters
# ----------
# X : array-like, shape = [m, n] where m is the number of samples and n is the number of features
# The training predictors. The X parameter can be a numpy array, a pandas DataFrame, a patsy
# DesignMatrix, or a tuple of patsy DesignMatrix objects as output by patsy.dmatrices.
#
#
# y : array-like, optional (default=None), shape = [m] where m is the number of samples
# The training response. The y parameter can be a numpy array, a pandas DataFrame with one
# column, a Patsy DesignMatrix, or can be left as None (default) if X was the output of a
# call to patsy.dmatrices (in which case, X contains the response).
#
#
# xlabels : iterable of strings, optional (default=None)
# Convenient way to set the xlabels parameter while calling fit. Ignored if None (default).
# See the GLM class for an explanation of the xlabels parameter.
#
# '''
# #Format and label the data
# if xlabels is not None:
# self.set_params(xlabels=xlabels)
# X, y = self._scrub(X,y,**self.__dict__)
# if exposure is not None:
# exposure = np.asarray(exposure)
# exposure = exposure.reshape(exposure.shape[0])
# if exposure.shape != y.shape:
# raise ValueError('Shape of exposure does not match shape of y.')
#
# #Add a constant column
# if self.add_constant:
# X = statsmodels.api.add_constant(X, prepend=True)
#
# #Do the actual work
# if exposure is None:
# model = statsmodels.api.GLM(y, X, self.family)
# else:
# model = statsmodels.api.GLM(y, X, self.family, exposure=exposure)
# result = model.fit()
# self.coef_ = result.params
#
# return self
| bsd-3-clause |
valsusa/PICronos | Explicit/1d1v-python/landau_damping.py | 1 | 5578 | """
1D electrostatic particle-in-cell solver for studying the Landau damping.
Translation of the landau.m MATLAB routine by G. Lapenta.
E. Boella: [email protected]
"""
import os, time
start_time = time.clock()
import numpy as np #array syntax
import pylab as plt #plot
import matplotlib.patches as mpatches #plot
import scipy
import scipy.fftpack
from scipy import sparse #special functions, optimization, linear algebra
from scipy.sparse import linalg
from scipy.linalg import norm
# Output folder
#path = './Results'
#if not os.path.exists(path):
# os.makedirs(path)
# Set plotting parameters
params = {'axes.labelsize': 'large',
'xtick.labelsize': 'medium',
'ytick.labelsize': 'medium',
'font.size': 15,
'font.family': 'sans-serif',
'text.usetex': False,
'mathtext.fontset': 'stixsans',}
plt.rcParams.update(params)
## Switch on interactive plotting mode
plt.ion()
# Simulation parameters
L = 12. # Domain size
DT = 0.1 # Time step
NT = 200 # Number of time steps
TOut = round(NT/100) # Output period
verbose = True
NG = 60 # Number of grid cells
N = NG * 500 # Number of particles
WP = 1 # Plasma frequency
QM = -1. # Charge/mass ratio
VT = 1. # Thermal speed
# perturbation
VP1 = 0.5 * VT
mode = 1
Q = WP**2 / (QM*N/L) # rho0*L/N: charge carried by a single particle?
rho_back = -Q*N/L # Background charge density?
dx = L / NG # Grid step
# Auxilliary vectors
p = np.concatenate([np.arange(N), np.arange(N)]) # Some indices up to N
Poisson = sparse.spdiags(([1, -2, 1] * np.ones((1, NG-1), dtype=int).T).T, [-1, 0, 1], NG-1, NG-1)
Poisson = Poisson.tocsc()
# Cell center coordinates
xg = np.linspace(0, L-dx, NG) + dx/2
# electrons
xp = np.linspace(0, L-L/N, N).T # Particle positions
vp = VT * np.random.randn(N) # particle thermal spread
# Add electron perturbation to excite the desired mode
vp += VP1 * np.cos(2 * np.pi * xp / L * mode)
xp[np.where(xp < 0)] += L
xp[np.where(xp >= L)] -= L
histEnergy, histPotE, histKinE, Ep, normphi, t = [], [], [], [], [], []
if verbose:
plt.figure(1, figsize=(16,9))
# Main cycle
for it in xrange(NT+1):
# update particle position xp
xp += vp * DT
# Periodic boundary condition
xp[np.where(xp < 0)] += L
xp[np.where(xp >= L)] -= L
# Project particles->grid
g1 = np.floor(xp/dx - 0.5)
g = np.concatenate((g1, g1+1))
fraz1 = 1 - np.abs(xp/dx - g1 - 0.5)
fraz = np.concatenate((fraz1, 1-fraz1))
g[np.where(g < 0)] += NG
g[np.where(g > NG-1)] -= NG
mat = sparse.csc_matrix((fraz, (p, g)), shape=(N, NG))
rho = Q / dx * mat.toarray().sum(axis=0) + rho_back
# Compute electric field potential
Phi = linalg.spsolve(Poisson, -dx**2 * rho[0:NG-1])
Phi = np.concatenate((Phi,[0]))
normphi.append(norm(Phi))
# Electric field on the grid
Eg = (np.roll(Phi, 1) - np.roll(Phi, -1)) / (2*dx)
Ep.append(Eg[round(NG/2)])
# Electric field fft
ft = abs(scipy.fft(Eg))
k = scipy.fftpack.fftfreq(Eg.size,xg[1]-xg[0])
# interpolation grid->particle and velocity update
vp += mat * QM * Eg * DT
bins,edges=np.histogram(vp,bins=40,range=(-3.2,3.2))
left,right = edges[:-1],edges[1:]
vc = np.array([left,right]).T.flatten()
fv = np.array([bins,bins]).T.flatten()
Etot = 0.5 * (Eg**2).sum() * dx
histEnergy.append(Etot+0.5 * Q/QM * (vp**2).sum())
histPotE.append(0.5 * (Eg**2).sum() * dx)
histKinE.append(0.5 * Q/QM * (vp**2).sum())
t.append(it*DT)
if (np.mod(it, TOut) == 0) and verbose:
# Phase space
plt.clf()
plt.subplot(2, 2, 1)
plt.scatter(xp[0:-1:2], vp[0:-1:2], s=0.5, marker='.', color='blue')
plt.xlim(0, L)
plt.ylim(-6, 6)
plt.xlabel('x')
plt.ylabel('v')
plt.legend((mpatches.Patch(color='w'), ), (r'$\omega_{pe}t=$' + str(DT*it), ), loc=1, frameon=False)
# Electric field
plt.subplot(2, 2, 2)
plt.xlim(0, 15)
plt.ylim(0, 50)
plt.xlabel('x')
plt.plot(L*k, ft, label='fft(E)', linewidth=2)
plt.legend(loc=1)
# Energies
plt.subplot(2, 2, 3)
plt.xlim(0, NT*DT)
plt.ylim(1e-5, 100)
plt.xlabel('time')
plt.yscale('log')
plt.plot(t, histPotE, label='Potential', linewidth=2)
plt.plot(t, histKinE, label='Kinetic', linewidth=2)
plt.plot(t, histEnergy, label='Total Energy', linestyle='--', linewidth=2)
plt.legend(loc=4)
plt.subplot(2, 2, 4)
plt.xlim(0, NT*DT)
plt.ylim(-0.5, 0.5)
plt.xlabel('time')
#plt.yscale('log')
plt.plot(t,Ep, label='E(x=L/2)', linewidth=2)
plt.legend(loc=1)
plt.pause(0.000000000000001)
print it
#plt.savefig(os.path.join(path, 'twostream%3.3i' % (it/TOut,) + '.png'))
np.savetxt('norm_phi.txt',(t,normphi))
print 'Time elapsed: ', time.clock() - start_time
# Comment this line if you want the figure to automatically close at the end of the simulation
raw_input('Press enter...')
| lgpl-3.0 |
richrr/scripts | python/make_convex_hull_pcoa_plot.py | 1 | 3870 | #!/usr/bin/env python
# File created on 09 Jul 2013
from __future__ import division
#https://groups.google.com/forum/#!searchin/qiime-forum/PCoA/qiime-forum/zigFP_wKaps/QPzKZgQW55IJ
# adapted from
__author__ = "Yoshiki Vazquez Baeza"
__copyright__ = "Copyright 2013, ApocaQIIME"
__credits__ = ["Yoshiki Vazquez Baeza"]
__license__ = "GPL"
__version__ = "1.7.0"
__maintainer__ = "Yoshiki Vazquez Baeza"
__email__ = "[email protected]"
__status__ = "Use at your own risk"
# the creation of the convex hull is based on the example as provided by scipy
# see this URL http://docs.scipy.org/doc/scipy-dev/reference/generated/scipy.spatial.ConvexHull.html
import os
import sys
sys.path.insert(0,"/usr/lib/python2.7/dist-packages/")
from scipy.spatial import ConvexHull
import numpy as np
import matplotlib.pyplot as plt
from qiime.sort import natsort
from qiime.parse import parse_coords, parse_mapping_file
from qiime.util import (parse_command_line_parameters, make_option, qiime_system_call)
from qiime.colors import get_qiime_hex_string_color
script_info = {}
script_info['brief_description'] = ""
script_info['script_description'] = ""
script_info['script_usage'] = [("","","")]
script_info['output_description']= ""
script_info['required_options'] = [
make_option('-i','--coordinates_fp', type="existing_filepath",help='the '
'input coordinates filepath'),
make_option('-m','--mapping_file_fp', type="existing_filepath",help='the '
'mapping file filepath'),
make_option('-c','--category',type="string",help='header name of the '
'category of interest that you want the convex hulls to be created on')
]
script_info['optional_options'] = [
make_option('-o','--output_fp', type="new_filepath", help='filename and '
'format for the plot, the extension will determine the format of the '
'output file (pdf, eps or png)', default='convex_hull.pdf')
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
coordinates_fp = opts.coordinates_fp
mapping_file_fp = opts.mapping_file_fp
category_header_name = opts.category
output_fp = opts.output_fp
coords_headers, coords_data, coords_eigenvalues, coords_percents = parse_coords(open(coordinates_fp, 'U'))
mapping_data, mapping_headers, _ = parse_mapping_file(open(mapping_file_fp, 'U'))
category_header_index = mapping_headers.index(category_header_name)
category_names = list(set([line[category_header_index] for line in mapping_data]))
xtitle = 'PC1 (%.0f%%)' % round(coords_percents[0])
ytitle = 'PC2 (%.0f%%)' % round(coords_percents[1])
main_figure = plt.figure()
main_axes = main_figure.add_subplot(1, 1, 1, axisbg='white')
plt.xlabel(xtitle)
plt.ylabel(ytitle)
main_axes.tick_params(axis='y')
main_axes.tick_params(axis='x')
# sort the data!!! that way you can match make_3d_plots.py
for index, category in enumerate(natsort(category_names)):
sample_ids_list = [line[0] for line in mapping_data if line[category_header_index] == category]
qiime_color = get_qiime_hex_string_color(index)
if len(sample_ids_list) < 3:
continue
indices = [coords_headers.index(sample_id) for sample_id in sample_ids_list]
points = coords_data[indices, :2]# * coords_percents[:2]
hull = ConvexHull(points)
main_axes.plot(points[:,0], points[:,1], 'o', color=qiime_color)
for simplex in hull.simplices:
main_axes.plot(points[simplex,0], points[simplex,1], 'k-')
main_axes.plot(points[hull.vertices,0], points[hull.vertices,1], '--', lw=2, color=qiime_color)
# plt.plot(points[hull.vertices[0],0], points[hull.vertices[0],1], '--', color=qiime_color)
#plt.show()
main_figure.savefig(output_fp)
if __name__ == "__main__":
main() | gpl-3.0 |
ycaihua/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
mattilyra/scikit-learn | sklearn/cluster/mean_shift_.py | 15 | 15507 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0,
n_jobs=1):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile),
n_jobs=n_jobs)
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_mean_shift.py for an example.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=n_jobs)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=n_jobs).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth,
n_jobs=n_jobs).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/matplotlib/markers.py | 10 | 26324 | """
This module contains functions to handle markers. Used by both the
marker functionality of `~matplotlib.axes.Axes.plot` and
`~matplotlib.axes.Axes.scatter`.
All possible markers are defined here:
============================== ===============================================
marker description
============================== ===============================================
"." point
"," pixel
"o" circle
"v" triangle_down
"^" triangle_up
"<" triangle_left
">" triangle_right
"1" tri_down
"2" tri_up
"3" tri_left
"4" tri_right
"8" octagon
"s" square
"p" pentagon
"*" star
"h" hexagon1
"H" hexagon2
"+" plus
"x" x
"D" diamond
"d" thin_diamond
"|" vline
"_" hline
TICKLEFT tickleft
TICKRIGHT tickright
TICKUP tickup
TICKDOWN tickdown
CARETLEFT caretleft
CARETRIGHT caretright
CARETUP caretup
CARETDOWN caretdown
"None" nothing
None nothing
" " nothing
"" nothing
``'$...$'`` render the string using mathtext.
`verts` a list of (x, y) pairs used for Path vertices.
The center of the marker is located at (0,0) and
the size is normalized.
path a `~matplotlib.path.Path` instance.
(`numsides`, `style`, `angle`) see below
============================== ===============================================
The marker can also be a tuple (`numsides`, `style`, `angle`), which
will create a custom, regular symbol.
`numsides`:
the number of sides
`style`:
the style of the regular symbol:
===== =============================================
Value Description
===== =============================================
0 a regular polygon
1 a star-like symbol
2 an asterisk
3 a circle (`numsides` and `angle` is ignored)
===== =============================================
`angle`:
the angle of rotation of the symbol, in degrees
For backward compatibility, the form (`verts`, 0) is also accepted,
but it is equivalent to just `verts` for giving a raw set of vertices
that define the shape.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
from .cbook import is_math_text, is_string_like, is_numlike, iterable
from matplotlib import rcParams
from .path import Path
from .transforms import IdentityTransform, Affine2D
# special-purpose marker identifiers:
(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN) = list(xrange(8))
class MarkerStyle(object):
markers = {
'.': 'point',
',': 'pixel',
'o': 'circle',
'v': 'triangle_down',
'^': 'triangle_up',
'<': 'triangle_left',
'>': 'triangle_right',
'1': 'tri_down',
'2': 'tri_up',
'3': 'tri_left',
'4': 'tri_right',
'8': 'octagon',
's': 'square',
'p': 'pentagon',
'*': 'star',
'h': 'hexagon1',
'H': 'hexagon2',
'+': 'plus',
'x': 'x',
'D': 'diamond',
'd': 'thin_diamond',
'|': 'vline',
'_': 'hline',
TICKLEFT: 'tickleft',
TICKRIGHT: 'tickright',
TICKUP: 'tickup',
TICKDOWN: 'tickdown',
CARETLEFT: 'caretleft',
CARETRIGHT: 'caretright',
CARETUP: 'caretup',
CARETDOWN: 'caretdown',
"None": 'nothing',
None: 'nothing',
' ': 'nothing',
'': 'nothing'
}
# Just used for informational purposes. is_filled()
# is calculated in the _set_* functions.
filled_markers = (
'o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd')
fillstyles = ('full', 'left', 'right', 'bottom', 'top', 'none')
_half_fillstyles = ('left', 'right', 'bottom', 'top')
# TODO: Is this ever used as a non-constant?
_point_size_reduction = 0.5
def __init__(self, marker=None, fillstyle='full'):
"""
MarkerStyle
Attributes
----------
markers : list of known markes
fillstyles : list of known fillstyles
filled_markers : list of known filled markers.
Parameters
----------
marker : string or array_like, optional, default: None
See the descriptions of possible markers in the module docstring.
fillstyle : string, optional, default: 'full'
'full', 'left", 'right', 'bottom', 'top', 'none'
"""
self._fillstyle = fillstyle
self.set_marker(marker)
self.set_fillstyle(fillstyle)
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_marker_function')
return d
def __setstate__(self, statedict):
self.__dict__ = statedict
self.set_marker(self._marker)
self._recache()
def _recache(self):
self._path = Path(np.empty((0, 2)))
self._transform = IdentityTransform()
self._alt_path = None
self._alt_transform = None
self._snap_threshold = None
self._joinstyle = 'round'
self._capstyle = 'butt'
self._filled = True
self._marker_function()
if six.PY3:
def __bool__(self):
return bool(len(self._path.vertices))
else:
def __nonzero__(self):
return bool(len(self._path.vertices))
def is_filled(self):
return self._filled
def get_fillstyle(self):
return self._fillstyle
def set_fillstyle(self, fillstyle):
"""
Sets fillstyle
Parameters
----------
fillstyle : string amongst known fillstyles
"""
if fillstyle not in self.fillstyles:
raise ValueError("Unrecognized fillstyle %s"
% ' '.join(self.fillstyles))
self._fillstyle = fillstyle
self._recache()
def get_joinstyle(self):
return self._joinstyle
def get_capstyle(self):
return self._capstyle
def get_marker(self):
return self._marker
def set_marker(self, marker):
if (iterable(marker) and len(marker) in (2, 3) and
marker[1] in (0, 1, 2, 3)):
self._marker_function = self._set_tuple_marker
elif isinstance(marker, np.ndarray):
self._marker_function = self._set_vertices
elif not isinstance(marker, list) and marker in self.markers:
self._marker_function = getattr(
self, '_set_' + self.markers[marker])
elif is_string_like(marker) and is_math_text(marker):
self._marker_function = self._set_mathtext_path
elif isinstance(marker, Path):
self._marker_function = self._set_path_marker
else:
try:
Path(marker)
self._marker_function = self._set_vertices
except ValueError:
raise ValueError('Unrecognized marker style {}'.format(marker))
self._marker = marker
self._recache()
def get_path(self):
return self._path
def get_transform(self):
return self._transform.frozen()
def get_alt_path(self):
return self._alt_path
def get_alt_transform(self):
return self._alt_transform.frozen()
def get_snap_threshold(self):
return self._snap_threshold
def _set_nothing(self):
self._filled = False
def _set_custom_marker(self, path):
verts = path.vertices
rescale = max(np.max(np.abs(verts[:, 0])),
np.max(np.abs(verts[:, 1])))
self._transform = Affine2D().scale(0.5 / rescale)
self._path = path
def _set_path_marker(self):
self._set_custom_marker(self._marker)
def _set_vertices(self):
verts = self._marker
marker = Path(verts)
self._set_custom_marker(marker)
def _set_tuple_marker(self):
marker = self._marker
if is_numlike(marker[0]):
if len(marker) == 2:
numsides, rotation = marker[0], 0.0
elif len(marker) == 3:
numsides, rotation = marker[0], marker[2]
symstyle = marker[1]
if symstyle == 0:
self._path = Path.unit_regular_polygon(numsides)
self._joinstyle = 'miter'
elif symstyle == 1:
self._path = Path.unit_regular_star(numsides)
self._joinstyle = 'bevel'
elif symstyle == 2:
self._path = Path.unit_regular_asterisk(numsides)
self._filled = False
self._joinstyle = 'bevel'
elif symstyle == 3:
self._path = Path.unit_circle()
self._transform = Affine2D().scale(0.5).rotate_deg(rotation)
else:
verts = np.asarray(marker[0])
path = Path(verts)
self._set_custom_marker(path)
def _set_mathtext_path(self):
"""
Draws mathtext markers '$...$' using TextPath object.
Submitted by tcb
"""
from matplotlib.text import TextPath
from matplotlib.font_manager import FontProperties
# again, the properties could be initialised just once outside
# this function
# Font size is irrelevant here, it will be rescaled based on
# the drawn size later
props = FontProperties(size=1.0)
text = TextPath(xy=(0, 0), s=self.get_marker(), fontproperties=props,
usetex=rcParams['text.usetex'])
if len(text.vertices) == 0:
return
xmin, ymin = text.vertices.min(axis=0)
xmax, ymax = text.vertices.max(axis=0)
width = xmax - xmin
height = ymax - ymin
max_dim = max(width, height)
self._transform = Affine2D() \
.translate(-xmin + 0.5 * -width, -ymin + 0.5 * -height) \
.scale(1.0 / max_dim)
self._path = text
self._snap = False
def _half_fill(self):
fs = self.get_fillstyle()
result = fs in self._half_fillstyles
return result
def _set_circle(self, reduction=1.0):
self._transform = Affine2D().scale(0.5 * reduction)
self._snap_threshold = 6.0
fs = self.get_fillstyle()
if not self._half_fill():
self._path = Path.unit_circle()
else:
# build a right-half circle
if fs == 'bottom':
rotate = 270.
elif fs == 'top':
rotate = 90.
elif fs == 'left':
rotate = 180.
else:
rotate = 0.
self._path = self._alt_path = Path.unit_circle_righthalf()
self._transform.rotate_deg(rotate)
self._alt_transform = self._transform.frozen().rotate_deg(180.)
def _set_pixel(self):
self._path = Path.unit_rectangle()
# Ideally, you'd want -0.5, -0.5 here, but then the snapping
# algorithm in the Agg backend will round this to a 2x2
# rectangle from (-1, -1) to (1, 1). By offsetting it
# slightly, we can force it to be (0, 0) to (1, 1), which both
# makes it only be a single pixel and places it correctly
# aligned to 1-width stroking (i.e. the ticks). This hack is
# the best of a number of bad alternatives, mainly because the
# backends are not aware of what marker is actually being used
# beyond just its path data.
self._transform = Affine2D().translate(-0.49999, -0.49999)
self._snap_threshold = None
def _set_point(self):
self._set_circle(reduction=self._point_size_reduction)
_triangle_path = Path(
[[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
# Going down halfway looks to small. Golden ratio is too far.
_triangle_path_u = Path(
[[0.0, 1.0], [-3 / 5., -1 / 5.], [3 / 5., -1 / 5.], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
_triangle_path_d = Path(
[[-3 / 5., -1 / 5.], [3 / 5., -1 / 5.], [1.0, -1.0], [-1.0, -1.0],
[-3 / 5., -1 / 5.]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
_triangle_path_l = Path(
[[0.0, 1.0], [0.0, -1.0], [-1.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
_triangle_path_r = Path(
[[0.0, 1.0], [0.0, -1.0], [1.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY])
def _set_triangle(self, rot, skip):
self._transform = Affine2D().scale(0.5, 0.5).rotate_deg(rot)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
if not self._half_fill():
self._path = self._triangle_path
else:
mpaths = [self._triangle_path_u,
self._triangle_path_l,
self._triangle_path_d,
self._triangle_path_r]
if fs == 'top':
self._path = mpaths[(0 + skip) % 4]
self._alt_path = mpaths[(2 + skip) % 4]
elif fs == 'bottom':
self._path = mpaths[(2 + skip) % 4]
self._alt_path = mpaths[(0 + skip) % 4]
elif fs == 'left':
self._path = mpaths[(1 + skip) % 4]
self._alt_path = mpaths[(3 + skip) % 4]
else:
self._path = mpaths[(3 + skip) % 4]
self._alt_path = mpaths[(1 + skip) % 4]
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_triangle_up(self):
return self._set_triangle(0.0, 0)
def _set_triangle_down(self):
return self._set_triangle(180.0, 2)
def _set_triangle_left(self):
return self._set_triangle(90.0, 3)
def _set_triangle_right(self):
return self._set_triangle(270.0, 1)
def _set_square(self):
self._transform = Affine2D().translate(-0.5, -0.5)
self._snap_threshold = 2.0
fs = self.get_fillstyle()
if not self._half_fill():
self._path = Path.unit_rectangle()
else:
# build a bottom filled square out of two rectangles, one
# filled. Use the rotation to support left, right, bottom
# or top
if fs == 'bottom':
rotate = 0.
elif fs == 'top':
rotate = 180.
elif fs == 'left':
rotate = 270.
else:
rotate = 90.
self._path = Path([[0.0, 0.0], [1.0, 0.0], [1.0, 0.5],
[0.0, 0.5], [0.0, 0.0]])
self._alt_path = Path([[0.0, 0.5], [1.0, 0.5], [1.0, 1.0],
[0.0, 1.0], [0.0, 0.5]])
self._transform.rotate_deg(rotate)
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_diamond(self):
self._transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
if not self._half_fill():
self._path = Path.unit_rectangle()
else:
self._path = Path([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]])
self._alt_path = Path([[0.0, 0.0], [0.0, 1.0],
[1.0, 1.0], [0.0, 0.0]])
if fs == 'bottom':
rotate = 270.
elif fs == 'top':
rotate = 90.
elif fs == 'left':
rotate = 180.
else:
rotate = 0.
self._transform.rotate_deg(rotate)
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_thin_diamond(self):
self._set_diamond()
self._transform.scale(0.6, 1.0)
def _set_pentagon(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
polypath = Path.unit_regular_polygon(5)
fs = self.get_fillstyle()
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
y = (1 + np.sqrt(5)) / 4.
top = Path([verts[0], verts[1], verts[4], verts[0]])
bottom = Path([verts[1], verts[2], verts[3], verts[4], verts[1]])
left = Path([verts[0], verts[1], verts[2], [0, -y], verts[0]])
right = Path([verts[0], verts[4], verts[3], [0, -y], verts[0]])
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_star(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
polypath = Path.unit_regular_star(5, innerCircle=0.381966)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
top = Path(np.vstack((verts[0:4, :], verts[7:10, :], verts[0])))
bottom = Path(np.vstack((verts[3:8, :], verts[3])))
left = Path(np.vstack((verts[0:6, :], verts[0])))
right = Path(np.vstack((verts[0], verts[5:10, :], verts[0])))
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'bevel'
def _set_hexagon1(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = None
fs = self.get_fillstyle()
polypath = Path.unit_regular_polygon(6)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
# not drawing inside lines
x = np.abs(np.cos(5 * np.pi / 6.))
top = Path(np.vstack(([-x, 0], verts[(1, 0, 5), :], [x, 0])))
bottom = Path(np.vstack(([-x, 0], verts[2:5, :], [x, 0])))
left = Path(verts[(0, 1, 2, 3), :])
right = Path(verts[(0, 5, 4, 3), :])
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_hexagon2(self):
self._transform = Affine2D().scale(0.5).rotate_deg(30)
self._snap_threshold = None
fs = self.get_fillstyle()
polypath = Path.unit_regular_polygon(6)
if not self._half_fill():
self._path = polypath
else:
verts = polypath.vertices
# not drawing inside lines
x, y = np.sqrt(3) / 4, 3 / 4.
top = Path(verts[(1, 0, 5, 4, 1), :])
bottom = Path(verts[(1, 2, 3, 4), :])
left = Path(np.vstack(([x, y], verts[(0, 1, 2), :],
[-x, -y], [x, y])))
right = Path(np.vstack(([x, y], verts[(5, 4, 3), :], [-x, -y])))
if fs == 'top':
mpath, mpath_alt = top, bottom
elif fs == 'bottom':
mpath, mpath_alt = bottom, top
elif fs == 'left':
mpath, mpath_alt = left, right
else:
mpath, mpath_alt = right, left
self._path = mpath
self._alt_path = mpath_alt
self._alt_transform = self._transform
self._joinstyle = 'miter'
def _set_octagon(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
fs = self.get_fillstyle()
polypath = Path.unit_regular_polygon(8)
if not self._half_fill():
self._transform.rotate_deg(22.5)
self._path = polypath
else:
x = np.sqrt(2.) / 4.
half = Path([[0, -1], [0, 1], [-x, 1], [-1, x],
[-1, -x], [-x, -1], [0, -1]])
if fs == 'bottom':
rotate = 90.
elif fs == 'top':
rotate = 270.
elif fs == 'right':
rotate = 180.
else:
rotate = 0.
self._transform.rotate_deg(rotate)
self._path = self._alt_path = half
self._alt_transform = self._transform.frozen().rotate_deg(180.0)
self._joinstyle = 'miter'
_line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])
def _set_vline(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 1.0
self._filled = False
self._path = self._line_marker_path
def _set_hline(self):
self._transform = Affine2D().scale(0.5).rotate_deg(90)
self._snap_threshold = 1.0
self._filled = False
self._path = self._line_marker_path
_tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])
def _set_tickleft(self):
self._transform = Affine2D().scale(-1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickhoriz_path
def _set_tickright(self):
self._transform = Affine2D().scale(1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickhoriz_path
_tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])
def _set_tickup(self):
self._transform = Affine2D().scale(1.0, 1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickvert_path
def _set_tickdown(self):
self._transform = Affine2D().scale(1.0, -1.0)
self._snap_threshold = 1.0
self._filled = False
self._path = self._tickvert_path
_plus_path = Path([[-1.0, 0.0], [1.0, 0.0],
[0.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_plus(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 1.0
self._filled = False
self._path = self._plus_path
_tri_path = Path([[0.0, 0.0], [0.0, -1.0],
[0.0, 0.0], [0.8, 0.5],
[0.0, 0.0], [-0.8, 0.5]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_tri_down(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 5.0
self._filled = False
self._path = self._tri_path
def _set_tri_up(self):
self._transform = Affine2D().scale(0.5).rotate_deg(180)
self._snap_threshold = 5.0
self._filled = False
self._path = self._tri_path
def _set_tri_left(self):
self._transform = Affine2D().scale(0.5).rotate_deg(270)
self._snap_threshold = 5.0
self._filled = False
self._path = self._tri_path
def _set_tri_right(self):
self._transform = Affine2D().scale(0.5).rotate_deg(90)
self._snap_threshold = 5.0
self._filled = False
self._path = self._tri_path
_caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])
def _set_caretdown(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path
self._joinstyle = 'miter'
def _set_caretup(self):
self._transform = Affine2D().scale(0.5).rotate_deg(180)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path
self._joinstyle = 'miter'
def _set_caretleft(self):
self._transform = Affine2D().scale(0.5).rotate_deg(270)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path
self._joinstyle = 'miter'
def _set_caretright(self):
self._transform = Affine2D().scale(0.5).rotate_deg(90)
self._snap_threshold = 3.0
self._filled = False
self._path = self._caret_path
self._joinstyle = 'miter'
_x_path = Path([[-1.0, -1.0], [1.0, 1.0],
[-1.0, 1.0], [1.0, -1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _set_x(self):
self._transform = Affine2D().scale(0.5)
self._snap_threshold = 3.0
self._filled = False
self._path = self._x_path
| gpl-2.0 |
DavidTingley/ephys-processing-pipeline | installation/klustaviewa-0.3.0/build/lib.linux-x86_64-2.7/kwiklib/dataio/tests/mock_data.py | 2 | 8723 | """Functions that generate mock data."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import time
import numpy as np
import numpy.random as rnd
import pandas as pd
import shutil
import tempfile
from kwiklib.utils.colors import COLORS_COUNT
from kwiklib.dataio import (save_binary, save_text, check_dtype,
check_shape, save_cluster_info, save_group_info)
from kwiklib.utils import logger as log
# -----------------------------------------------------------------------------
# Global variables
# -----------------------------------------------------------------------------
# Mock parameters.
nspikes = 1000
nclusters = 20
nextrafet = 1
ngroups = 4
nchannelgroups = 4
cluster_offset = 2
channel_offset = 2
nsamples = 20
ncorrbins = 100
corrbin = .001
nchannels = 32
fetdim = 3
duration = 1.
freq = 20000.
TEST_FOLDER = tempfile.mkdtemp()
# if not os.path.exists(TEST_FOLDER):
# os.mkdir(TEST_FOLDER)
# -----------------------------------------------------------------------------
# Data creation methods
# -----------------------------------------------------------------------------
def create_waveforms(nspikes, nsamples, nchannels):
t = np.linspace(-np.pi, np.pi, nsamples)
t = t.reshape((1, -1, 1))
# Sinus shaped random waveforms.
return (np.array(rnd.randint(size=(nspikes, nsamples, nchannels),
low=-32768 // 2, high=32768 // 2), dtype=np.int16) -
np.array(32768 // 2 * (.5 + .5 * rnd.rand()) * np.cos(t),
dtype=np.int16))
def create_trace(nsamples, nchannels):
noise = np.array(rnd.randint(size=(nsamples, nchannels),
low=-1000, high=1000), dtype=np.int16)
t = np.linspace(0., 100., nsamples)
low = np.array(10000 * np.cos(t), dtype=np.int16)
return noise + low[:, np.newaxis]
def create_features(nspikes, nchannels, fetdim, duration, freq):
features = np.array(rnd.randint(size=(nspikes, nchannels * fetdim + 1),
low=-1e5, high=1e5), dtype=np.float32)
features[:, -1] = np.sort(np.random.randint(size=nspikes, low=0,
high=duration * freq))
return features
def create_clusters(nspikes, nclusters, cluster_offset=cluster_offset):
# Add shift in cluster indices to test robustness.
return rnd.randint(size=nspikes, low=cluster_offset,
high=nclusters + cluster_offset)
# ClusterView
def create_cluster_colors(nclusters):
return np.mod(np.arange(nclusters, dtype=np.int32), COLORS_COUNT) + 1
def create_group_colors(ngroups):
return np.mod(np.arange(ngroups, dtype=np.int32), COLORS_COUNT) + 1
def create_group_names(ngroups):
return ["Group {0:d}".format(group) for group in xrange(ngroups)]
def create_cluster_groups(nclusters):
return np.array(np.random.randint(size=nclusters, low=0, high=4),
dtype=np.int32)
# ChannelView
def create_channel_names(nchannels):
return ["Channel {0:d}".format(channel) for channel in xrange(nchannels)]
def create_channel_colors(nchannels):
return np.mod(np.arange(nchannels, dtype=np.int32), COLORS_COUNT) + 1
def create_channel_group_names(nchannelgroups):
return ["Group {0:d}".format(channelgroup) for channelgroup in xrange(nchannelgroups)]
def create_channel_groups(nchannels):
return np.array(np.random.randint(size=nchannels, low=0, high=4),
dtype=np.int32)
def create_channel_group_colors(nchannelgroups):
return np.mod(np.arange(nchannelgroups, dtype=np.int32), COLORS_COUNT) + 1
def create_masks(nspikes, nchannels, fetdim):
return np.clip(rnd.rand(nspikes, nchannels * fetdim + 1) * 1.5, 0, 1)
def create_similarity_matrix(nclusters):
return np.random.rand(nclusters, nclusters)
def create_correlograms(clusters, ncorrbins):
n = len(clusters)
shape = (n, n, ncorrbins)
# data = np.clip(np.random.rand(*shape), .75, 1)
data = np.random.rand(*shape)
data[0, 0] /= 10
data[1, 1] *= 10
# return IndexedMatrix(clusters, shape=shape,
# data=data)
return data
def create_baselines(clusters):
baselines = np.clip(np.random.rand(len(clusters), len(clusters)), .75, 1)
baselines[0, 0] /= 10
baselines[1, 1] *= 10
return baselines
def create_xml(nchannels, nsamples, fetdim):
channels = '\n'.join(["<channel>{0:d}</channel>".format(i)
for i in xrange(nchannels)])
xml = """
<parameters>
<acquisitionSystem>
<nBits>16</nBits>
<nChannels>{0:d}</nChannels>
<samplingRate>20000</samplingRate>
<voltageRange>20</voltageRange>
<amplification>1000</amplification>
<offset>2048</offset>
</acquisitionSystem>
<anatomicalDescription>
<channelGroups>
<group>
{2:s}
</group>
</channelGroups>
</anatomicalDescription>
<spikeDetection>
<channelGroups>
<group>
<channels>
{2:s}
</channels>
<nSamples>{1:d}</nSamples>
<peakSampleIndex>10</peakSampleIndex>
<nFeatures>{3:d}</nFeatures>
</group>
<group>
<channels>
{2:s}
</channels>
<nSamples>{1:d}</nSamples>
<peakSampleIndex>10</peakSampleIndex>
<nFeatures>{3:d}</nFeatures>
</group>
</channelGroups>
</spikeDetection>
</parameters>
""".format(nchannels, nsamples, channels, fetdim)
return xml
def create_probe(nchannels):
# return np.random.randint(size=(nchannels, 2), low=0, high=10)
geometry = np.zeros((nchannels, 2), dtype=np.int32)
geometry[:, 0] = np.arange(nchannels)
geometry[::2, 0] *= -1
geometry[:, 1] = np.arange(nchannels)
graph = [(i, (i + 1) % nchannels) for i in xrange(nchannels)]
probe = {'probes': {1: graph},
'geometry': {i: tuple(geometry[i, :]) for i in xrange(nchannels)}}
probe_python = "probes = {0:s}\ngeometry = {{1: {1:s}}}\n".format(
str(probe['probes']),
str(probe['geometry']),
)
return probe_python
# -----------------------------------------------------------------------------
# Fixtures
# -----------------------------------------------------------------------------
def setup():
# Create mock directory if needed.
dir = TEST_FOLDER
# Create mock data.
waveforms = create_waveforms(nspikes, nsamples, nchannels)
features = create_features(nspikes, nchannels, fetdim, duration, freq)
clusters = create_clusters(nspikes, nclusters)
cluster_colors = create_cluster_colors(nclusters)
cluster_groups = create_cluster_groups(nclusters)
cluster_info = pd.DataFrame(
{'color': cluster_colors,
'group': cluster_groups},
dtype=np.int32,
index=np.unique(clusters))
group_colors = create_group_colors(ngroups)
group_names = create_group_names(ngroups)
group_info = pd.DataFrame(
{'color': group_colors,
'name': group_names},
index=np.arange(ngroups))
masks = create_masks(nspikes, nchannels, fetdim)
xml = create_xml(nchannels, nsamples, fetdim)
probe = create_probe(nchannels)
# Create mock files.
save_binary(os.path.join(dir, 'test.spk.1'), waveforms)
save_text(os.path.join(dir, 'test.fet.1'), features,
header=nchannels * fetdim + 1)
save_text(os.path.join(dir, 'test.aclu.1'), clusters, header=nclusters)
save_text(os.path.join(dir, 'test.clu.1'), clusters, header=nclusters)
save_text(os.path.join(dir, 'test.fmask.1'), masks, header=nclusters,
fmt='%.6f')
save_text(os.path.join(dir, 'test.xml'), xml)
save_text(os.path.join(dir, 'test.probe'), probe)
def teardown():
# log.debug("Erasing mock data for dataio subpackage.")
# Erase the temporary data directory.
dir = TEST_FOLDER
# if os.path.exists(dir):
# shutil.rmtree(dir, ignore_errors=True)
# Erase the contents instead, otherwise run into Access denied errors
# when trying to re-create the directory right after it has been deleted.
for the_file in os.listdir(dir):
file_path = os.path.join(dir, the_file)
try:
os.unlink(file_path)
except:
pass
| gpl-3.0 |
zhushun0008/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/stochasticModelFrame.py | 22 | 3298 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
def stochasticModelFrame(x, w, N, stocf) :
# x: input array sound, w: analysis window, N: FFT size,
# stocf: decimation factor of mag spectrum for stochastic analysis
hN = N/2+1 # size of positive spectrum
hM = (w.size)/2 # half analysis window size
pin = hM # initialize sound pointer in middle of analysis window
fftbuffer = np.zeros(N) # initialize buffer for FFT
yw = np.zeros(w.size) # initialize output sound frame
w = w / sum(w) # normalize analysis window
#-----analysis-----
xw = x[pin-hM:pin+hM] * w # window the input sound
X = fft(xw) # compute FFT
mX = 20 * np.log10( abs(X[:hN]) ) # magnitude spectrum of positive frequencies
mXenv = resample(np.maximum(-200, mX), mX.size*stocf) # decimate the mag spectrum
pX = np.angle(X[:hN])
#-----synthesis-----
mY = resample(mXenv, hN) # interpolate to original size
pY = 2*np.pi*np.random.rand(hN) # generate phase random values
Y = np.zeros(N, dtype = complex)
Y[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq.
Y[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq.
fftbuffer = np.real( ifft(Y) ) # inverse FFT
y = fftbuffer*N/2
return mX, pX, mY, pY, y
# example call of stochasticModel function
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
w = np.hanning(1024)
N = 1024
stocf = 0.2
maxFreq = 10000.0
lastbin = N*maxFreq/fs
first = 1000
last = first+w.size
mX, pX, mY, pY, y = stochasticModelFrame(x[first:last], w, N, stocf)
plt.figure(1, figsize=(9, 7))
plt.subplot(4,1,1)
plt.plot(np.arange(first, last)/float(fs), x[first:last])
plt.axis([first/float(fs), last/float(fs), min(x[first:last]), max(x[first:last])])
plt.title('x (ocean.wav)')
plt.subplot(4,1,2)
plt.plot(float(fs)*np.arange(mX.size)/N, mX, 'r', lw=1.5, label="mX")
plt.plot(float(fs)*np.arange(mY.size)/N, mY, 'k', lw=1.5, label="mY")
plt.legend()
plt.axis([0, maxFreq, -80, max(mX)+3])
plt.title('mX + mY (stochastic approximation)')
plt.subplot(4,1,3)
plt.plot(float(fs)*np.arange(pX.size)/N, pX, 'c', lw=1.5, label="pX")
plt.plot(float(fs)*np.arange(pY.size)/N, pY-np.pi, 'k', lw=1.5, label="pY")
plt.axis([0, maxFreq, -np.pi, np.pi])
plt.legend()
plt.title('pX + pY (random phases)')
plt.subplot(4,1,4)
plt.plot(np.arange(first, last)/float(fs), y, 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(y), max(y)])
plt.title('y')
plt.tight_layout()
plt.savefig('stochasticModelFrame.png')
plt.show()
| agpl-3.0 |
schets/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 16 | 22326 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
ogrisel/scipy | scipy/stats/morestats.py | 1 | 70237 | # Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
import numpy as np
from numpy import (isscalar, r_, log, sum, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d, sqrt, ceil,
floor, array, poly1d, compress, not_equal, pi, exp, ravel, angle)
from numpy.testing.decorators import setastest
from scipy.lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
def bayes_mvs(data, alpha=0.90):
"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability `alpha`.
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
`alpha`.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://hdl.handle.net/1877/438, 2006.
"""
res = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." % alpha)
return tuple((x.mean(), x.interval(alpha)) for x in res)
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
Notes
-----
The return values from bayes_mvs(data) is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
Examples
--------
>>> from scipy.stats import mvsdist
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if (n < 2):
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if (n > 1000): # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C/n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C/(2.*n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0/n)*C)
else:
nm1 = n-1
fac = n*C/2.
val = nm1/2.
mdist = distributions.t(nm1,loc=xbar,scale=math.sqrt(C/nm1))
sdist = distributions.gengamma(val,-2,scale=math.sqrt(fac))
vdist = distributions.invgamma(val,scale=fac)
return mdist, vdist, sdist
def kstat(data,n=2):
"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic is the unique symmetric unbiased estimator of the nth
cumulant kappa_n.
Parameters
----------
data : array_like
Input array.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
Notes
-----
The cumulants are related to central moments but are specifically defined
using a power series expansion of the logarithm of the characteristic
function (which is the Fourier transform of the PDF).
In particular let phi(t) be the characteristic function, then::
ln phi(t) = > kappa_n (it)^n / n! (sum from n=0 to inf)
The first few cumulants (kappa_n) in terms of central moments (mu_n) are::
kappa_1 = mu_1
kappa_2 = mu_2
kappa_3 = mu_3
kappa_4 = mu_4 - 3*mu_2**2
kappa_5 = mu_5 - 10*mu_2 * mu_3
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = zeros(n+1,'d')
data = ravel(data)
N = len(data)
for k in range(1,n+1):
S[k] = sum(data**k,axis=0)
if n == 1:
return S[1]*1.0/N
elif n == 2:
return (N*S[2]-S[1]**2.0)/(N*(N-1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2]+N*N*S[3]) / (N*(N-1.0)*(N-2.0))
elif n == 4:
return (-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / \
(N*(N-1.0)*(N-2.0)*(N-3.0))
else:
raise ValueError("Should not be here.")
def kstatvar(data,n=2):
"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data,n=2)*1.0/N
elif n == 2:
k2 = kstat(data,n=2)
k4 = kstat(data,n=4)
return (2*k2*k2*N + (N-1)*k4)/(N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(x):
"""See Notes section of `probplot` for details."""
N = len(x)
osm_uniform = np.zeros(N, dtype=np.float64)
osm_uniform[-1] = 0.5**(1.0 / N)
osm_uniform[0] = 1 - osm_uniform[-1]
i = np.arange(2, N)
osm_uniform[1:-1] = (i - 0.3175) / (N + 0.365)
return osm_uniform
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def probplot(x, sparams=(), dist='norm', fit=True, plot=None):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample/2.,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
osm_uniform = _calc_uniform_order_statistic_medians(x)
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if fit or (plot is not None):
# perform a linear fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title('Probability Plot')
plot.set_xlabel('Quantiles')
plot.set_ylabel('Ordered Values')
else:
# matplotlib.pyplot module
plot.title('Probability Plot')
plot.xlabel('Quantiles')
plot.ylabel('Ordered Values')
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
# Add R^2 value to the plot as text
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0,1.0), dist='tukeylambda'):
"""Returns the shape parameter that maximizes the probability plot
correlation coefficient for the given data to a one-parameter
family of distributions.
See also ppcc_plot
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(x)
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1-r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x,a,b,dist='tukeylambda', plot=None, N=80):
"""Returns (shape, ppcc), and optionally plots shape vs. ppcc
(probability plot correlation coefficient) as a function of shape
parameter for a one-parameter family of distributions from shape
value a to b.
See also ppcc_max
"""
svals = r_[a:b:complex(N)]
ppcc = svals*0.0
k = 0
for sval in svals:
r1,r2 = probplot(x,sval,dist=dist,fit=1)
ppcc[k] = r2[-1]
k += 1
if plot is not None:
plot.plot(svals, ppcc, 'x')
plot.title('(%s) PPCC Plot' % dist)
plot.xlabel('Prob Plot Corr. Coef.')
plot.ylabel('Shape Values')
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=np.float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If `alpha` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and `alpha` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given `alpha`.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when `alpha` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(x)
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=np.float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if not method in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title('Box-Cox Normality Plot')
plot.set_ylabel('Prob Plot Corr. Coef.')
plot.set_xlabel('$\lambda$')
else:
# matplotlib.pyplot module
plot.title('Box-Cox Normality Plot')
plot.ylabel('Prob Plot Corr. Coef.')
plot.xlabel('$\lambda$')
except Exception:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
"""
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N,'f')
init = 0
else:
if len(a) != N//2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if not ifault in [0,2]:
warnings.warn(str(ifault))
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
def anderson(x,dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1' is a synonym for 'gumbel'
Returns
-------
A2 : float
The Anderson-Darling test statistic
critical : list
The critical values for this distribution
sig : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if not dist in ['norm','expon','gumbel','extreme1','logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y-xbar)/s
z = distributions.norm.cdf(w)
sig = array([15,10,5,2.5,1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N),3)
elif dist == 'expon':
w = y / xbar
z = distributions.expon.cdf(w)
sig = array([15,10,5,2.5,1])
critical = around(_Avals_expon / (1.0 + 0.6/N),3)
elif dist == 'logistic':
def rootfunc(ab,xj,N):
a,b = ab
tmp = (xj-a)/b
tmp2 = exp(tmp)
val = [sum(1.0/(1+tmp2),axis=0)-0.5*N,
sum(tmp*(1.0-tmp2)/(1+tmp2),axis=0)+N]
return array(val)
sol0 = array([xbar,np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc,sol0,args=(x,N),xtol=1e-5)
w = (y-sol[0])/sol[1]
z = distributions.logistic.cdf(w)
sig = array([25,10,5,2.5,1,0.5])
critical = around(_Avals_logistic / (1.0+0.25/N),3)
else: # (dist == 'gumbel') or (dist == 'extreme1'):
# the following is incorrect, see ticket:1097
## def fixedsolve(th,xj,N):
## val = stats.sum(xj)*1.0/N
## tmp = exp(-xj/th)
## term = sum(xj*tmp,axis=0)
## term /= sum(tmp,axis=0)
## return val - term
## s = optimize.fixed_point(fixedsolve, 1.0, args=(x,N),xtol=1e-5)
## xbar = -s*log(sum(exp(-x/s),axis=0)*1.0/N)
xbar, s = distributions.gumbel_l.fit(x)
w = (y-xbar)/s
z = distributions.gumbel_l.cdf(w)
sig = array([25,10,5,2.5,1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)),3)
i = arange(1,N+1)
S = sum((2*i-1.0)/N*(log(z)+log(1-z[::-1])),axis=0)
A2 = -N-S
return A2, critical, sig
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(np.float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / \
(Bj * (N - Bj) - N * lj / 4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
A2 : float
Normalized k-sample Anderson-Darling test statistic.
critical : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
p : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
h = (1. / arange(1, N)).sum()
H = (1. / n).sum()
g = 0
for l in arange(1, N-1):
inner = np.array([1. / ((N - l) * m) for m in arange(l+1, N)])
g += inner.sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return A2, critical, p
def ansari(x,y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
AB : float
The Ansari-Bradley test statistic
p-value : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x,y = asarray(x),asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m+n
xy = r_[x,y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank,N-rank+1)),0)
AB = sum(symrank[:n],axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and ((m < 55) or (n < 55)):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n,m)
ind = AB-astart
total = sum(a1,axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if (ind == cind):
pval = 2.0*sum(a1[:cind+1],axis=0)/total
else:
pval = 2.0*sum(a1[:cind],axis=0)/total
else:
find = int(floor(ind))
if (ind == floor(ind)):
pval = 2.0*sum(a1[find:],axis=0)/total
else:
pval = 2.0*sum(a1[find+1:],axis=0)/total
return AB, min(1.0,pval)
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n*(N+1.0)**2 / 4.0 / N
varAB = n*m*(N+1.0)*(3+N**2)/(48.0*N**2)
else:
mnAB = n*(N+2.0)/4.0
varAB = m*n*(N+2)*(N-2.0)/48/(N-1.0)
if repeats: # adjust variance estimates
# compute sum(tj * rj**2,axis=0)
fac = sum(symrank**2,axis=0)
if N % 2: # N odd
varAB = m*n*(16*N*fac-(N+1)**4)/(16.0 * N**2 * (N-1))
else: # N even
varAB = m*n*(16*fac-N*(N+2)**2)/(16.0 * N * (N-1))
z = (AB - mnAB)/sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AB, pval
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene`_ is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
T : float
The test statistic.
p-value : float
The p-value of the test.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
"""
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k,'d')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = sum(Ni,axis=0)
spsq = sum((Ni-1)*ssq,axis=0)/(1.0*(Ntot-k))
numer = (Ntot*1.0-k)*log(spsq) - sum((Ni-1.0)*log(ssq),axis=0)
denom = 1.0 + (1.0/(3*(k-1)))*((sum(1.0/(Ni-1.0),axis=0))-1.0/(Ntot-k))
T = numer / denom
pval = distributions.chi2.sf(T,k-1) # 1 - cdf
return T, pval
def levene(*args,**kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k,'d')
if not center in ['mean','median','trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = sum(Ni,axis=0)
# compute Zij's
Zij = [None]*k
for i in range(k):
Zij[i] = abs(asarray(args[i])-Yci[i])
# compute Zbari
Zbari = zeros(k,'d')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i]*Ni[i]
Zbar /= Ntot
numer = (Ntot-k)*sum(Ni*(Zbari-Zbar)**2,axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += sum((Zij[i]-Zbari[i])**2,axis=0)
denom = (k-1.0)*dvar
W = numer / denom
pval = distributions.f.sf(W,k-1,Ntot-k) # 1 - cdf
return W, pval
@setastest(False)
def binom_test(x,n=None,p=0.5):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1]+x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
d = distributions.binom.pmf(x,n,p)
rerr = 1+1e-7
if (x == p*n):
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif (x < p*n):
i = np.arange(np.ceil(p*n),n+1)
y = np.sum(distributions.binom.pmf(i,n,p) <= d*rerr,axis=0)
pval = distributions.binom.cdf(x,n,p) + distributions.binom.sf(n-y,n,p)
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i,n,p) <= d*rerr,axis=0)
pval = distributions.binom.cdf(y-1,n,p) + distributions.binom.sf(x-1,n,p)
return min(1.0,pval)
def _apply_func(x,g,func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0,g,len(x)])
output = []
for k in range(len(g)-1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
def fligner(*args,**kwds):
"""
Perform Fligner's test for equal variances.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner's test is
non-parametric in contrast to Bartlett's test `bartlett` and
Levene's test `levene`.
Parameters
----------
sample1, sample2, ... : array_like
arrays of sample data. Need not be the same length
center : {'mean', 'median', 'trimmed'}, optional
keyword argument controlling which function of the data
is used in computing the test statistic. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
Xsq : float
the test statistic
p-value : float
the p-value for the hypothesis test
Notes
-----
As with Levene's test there are three variants
of Fligner's test that differ by the measure of central
tendency used in the test. See `levene` for more information.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if not center in ['mean','median','trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = sum(Ni,axis=0)
# compute Zij's
Zij = [abs(asarray(args[i])-Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks/(2*(Ntot+1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a,g,sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a,axis=0, ddof=1)
Xsq = sum(Ni*(asarray(Aibar)-anbar)**2.0,axis=0)/varsq
pval = distributions.chi2.sf(Xsq,k-1) # 1 - cdf
return Xsq, pval
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis: int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned;
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.84332354, -5.6840814 ]), array([5.11694980e-09, 1.31517628e-08]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = sum((Ri - (N + 1.0) / 2) ** 2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
T : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
p-value : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if not zero_method in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' \
or 'pratt' or 'zsplit'")
if y is None:
d = x
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x-y
if zero_method == "wilcox":
d = compress(not_equal(d, 0), d, axis=-1) # Keep all non-zero differences
count = len(d)
if (count < 10):
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = sum((d > 0) * r, axis=0)
r_minus = sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count*(count + 1.) * 0.25
se = count*(count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return T, prob
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None]*N
plist[0] = poly1d(1)
for n in range(1,N):
plist[n] = plist[n-1].deriv() - poly1d([1,0])*plist[n-1]
return plist
def pdf_fromgamma(g1,g2,g3=0.0,g4=None):
if g4 is None:
g4 = 3*g2*g2
sigsq = 1.0/g2
sig = sqrt(sigsq)
mu = g1*sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] = p12[k]/sig**k
# Add all of the terms to polynomial
totp = p12[0] - (g1/6.0*p12[3]) + \
(g2/24.0*p12[4] + g1*g1/72.0*p12[6]) - \
(g3/120.0*p12[5] + g1*g2/144.0*p12[7] + g1**3.0/1296.0*p12[9]) + \
(g4/720*p12[6] + (g2*g2/1152.0+g1*g3/720)*p12[8] +
g1*g1*g2/1728.0*p12[10] + g1**4.0/31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi)/sig
def thefunc(x):
xn = (x-mu)/sig
return totp(xn)*exp(-xn*xn/2.0)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high-low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = angle(np.mean(exp(1j*ang), axis=axis))
mask = res < 0
if (mask.ndim > 0):
res[mask] += 2*pi
elif mask:
res = res + 2*pi
return res*(high-low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j*ang), axis=axis)
R = abs(res)
return ((high-low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j*ang), axis=axis)
R = abs(res)
return ((high-low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
| bsd-3-clause |
stylianos-kampakis/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
kpj/SDEMotif | figures.py | 1 | 3875 | """
Produce some nice figures
"""
from itertools import cycle
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import gridspec
from setup import generate_basic_system
from plotter import plot_system, plot_system_evolution, plot_corr_mat
from nm_data_generator import add_node_to_system
from main import analyze_system
from solver import solve_system
from filters import filter_steady_state
from utils import compute_correlation_matrix
def detailed_system():
syst = generate_basic_system()
plt.figure()
plot_system(syst, plt.gca())
plt.savefig('presentation/images/FFL.pdf', dpi=300)
def plot_series(syst, ax, uosd=True):
syst, mat, sol = analyze_system(
syst, use_ode_sde_diff=uosd,
repetition_num=5, tmax=10)
plot_system_evolution(sol[:50], ax, show_legend=False)
ax.set_xticks([], [])
ax.set_yticks([], [])
ax.set_xlabel('')
ax.set_ylabel('')
def plot_mat(syst, ax):
syst, mat, sol = analyze_system(
syst, use_ode_sde_diff=True,
repetition_num=5, tmax=10)
plot_corr_mat(mat, ax)
ax.set_xticks([], [])
ax.set_yticks([], [])
def plot_hist(syst, ax):
single_run_matrices = []
for _ in range(50):
sol = solve_system(syst)
sol_extract = sol.T[int(len(sol.T)*3/4):]
if filter_steady_state(sol_extract):
continue
single_run_mat = compute_correlation_matrix(np.array([sol_extract]))
if single_run_mat.shape == (4, 4):
single_run_mat = single_run_mat[:-1,:-1]
assert single_run_mat.shape == (3, 3)
single_run_matrices.append(single_run_mat)
single_run_matrices = np.asarray(single_run_matrices)
# plotting
cols = cycle(['b', 'r', 'g', 'c', 'm', 'y', 'k'])
for i, row in enumerate(single_run_matrices.T):
for j, series in enumerate(row):
if i == j: break
sns.distplot(series, ax=ax, label=r'$c_{{{},{}}}$'.format(i,j))
ax.set_xlim((-1,1))
ax.set_xticks([], [])
ax.set_yticks([], [])
def configuration_overview(func, fname, draw_all=True):
fig = plt.figure()
gs = gridspec.GridSpec(3, 5, width_ratios=[1,.2,1,1,1])
for i, conf in enumerate([(1,1), (4,2), (2,1)]):
syst = generate_basic_system(*conf)
func(syst, plt.subplot(gs[i, 0]))
if draw_all:
for j, m in enumerate(add_node_to_system(syst)[3:6]):
func(m, plt.subplot(gs[i, j+2]))
if draw_all:
fig.text(0.5, 0.04, 'varied embedding', ha='center', fontsize=20)
fig.text(0.085, 0.5, 'varied parameters', va='center', rotation='vertical', fontsize=20)
plt.savefig('presentation/images/overview_{}.pdf'.format(fname))
def distribution_filter_threshold():
plt.figure()
sns.distplot(
np.random.normal(.1, .1, size=1000),
label='before embedding', hist_kws=dict(alpha=.2))
sns.distplot(
np.random.normal(0, .1, size=1000),
label='after embedding', hist_kws=dict(alpha=.2))
thres = .1
plt.axvspan(-thres, thres, facecolor='r', alpha=0.1, label='threshold')
plt.xlim((-1,1))
plt.xlabel('correlation')
plt.legend(loc='best')
plt.savefig('presentation/images/dist_thres.pdf')
def main():
sns.set_style('white')
plt.style.use('seaborn-poster')
detailed_system()
configuration_overview(
lambda s,a: plot_system(s, a, netx_plot=True),
'sub', draw_all=False)
configuration_overview(
lambda s,a: plot_system(s, a, netx_plot=True), 'all')
configuration_overview(
lambda s,a: plot_series(s,a,uosd=False), 'series_orig')
configuration_overview(plot_series, 'series')
configuration_overview(plot_mat, 'mat')
configuration_overview(plot_hist, 'hist')
distribution_filter_threshold()
if __name__ == '__main__':
main()
| mit |
grhawk/IChemiton | dot.ipython/profile_ichemiton/ipython_config.py | 1 | 19779 | # Configuration file for ipython.
c = get_config()
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.TerminalIPythonApp.extra_config_file = u''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.TerminalIPythonApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = u'default'
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.TerminalIPythonApp.matplotlib = None
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.TerminalIPythonApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = u''
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.TerminalIPythonApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.TerminalInteractiveShell.ast_transformers = []
#
# c.TerminalInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'Linux'
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.TerminalInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = u'/usr/bin/nano'
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 2.7.3 (default, Mar 13 2014, 11:03:55) \nType "copyright", "credits" or "license" for more information.\n\nIPython 2.3.1 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#
# c.TerminalInteractiveShell.separate_out2 = ''
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
#
# c.TerminalInteractiveShell.debug = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.TerminalInteractiveShell.logstart = False
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.readline_use = True
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Start logging to the given file in append mode.
# c.TerminalInteractiveShell.logappend = ''
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.quiet = False
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
#
# c.PromptManager.color_scheme = 'Linux'
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = u''
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryManager.connection_options = {}
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryManager.enabled = True
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.type_printers = {}
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.singleton_printers = {}
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| gpl-2.0 |
JohnDMcMaster/uvscada | camac/2228a_calplt.py | 1 | 2764 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import argparse
import csv
import os
import sys
def hms(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Use LeCroy 2228A test mode to collect calibration data')
parser.add_argument('--force', action='store_true', help="Overwrite files if they already exist")
parser.add_argument('fn', help='File name in')
parser.add_argument('dir', nargs='?', default=None, help='Output dir')
args = parser.parse_args()
data = []
f = open(args.fn)
if args.dir is None:
args.dir = os.path.splitext(args.fn)[0]
print 'Saving to %s' % (args.dir,)
if os.path.exists(args.dir):
if not args.force:
raise Exception("Refusing to overwrite existing data")
else:
os.mkdir(args.dir)
tm = 9.6
hdr = f.readline()
csvr = csv.reader(f, delimiter=',')
ts = []
vs = []
off = None
# old without t
# slot,iter,ch0,ch1,ch2,ch3,ch4,ch5,ch6,ch7
if len(hdr) == 10:
for row in csvr:
ts.append(int(row[1]) * tm)
v = [int(v) for v in row[2:]]
if off is None:
off = v
vs.append([vi - offi for vi, offi in zip(v, off)])
# old without t
# slot,iter,t,ch0,ch1,ch2,ch3,ch4,ch5,ch6,ch7
else:
csvr = csv.reader(f, delimiter=',')
ts = []
vs = []
off = None
for row in csvr:
ts.append(float(row[2]))
v = [int(v) for v in row[3:]]
if off is None:
off = v
vs.append([vi - offi for vi, offi in zip(v, off)])
total_t = ts[-1] - ts[0]
vs = zip(*vs)
# Plot everything together
if 1:
for
colors = 'rgbybmcr'
pargs = []
for i in xrange(8):
pargs.extend([ts, vs[i], colors[i]])
plt.plot(*pargs)
#plt.semilogy([small for (t, small, large) in data])
#plt.plotting(semilogy)
plt.title('All drift over %s' % hms(total_t))
plt.xlabel('Sample #')
plt.ylabel('ADC delta')
#plt.show()
plt.savefig(os.path.join(args.dir, 'all.png'))
# Individual
if 1:
for i in xrange(8):
plt.clf()
plt.plot(ts, vs[i], 'b-')
plt.title('CH%d over %s' % (i, hms(total_t)))
plt.xlabel('Sample #')
plt.ylabel('ADC delta')
#plt.show()
plt.savefig(os.path.join(args.dir, 'ch%d.png' % i))
| bsd-2-clause |
alexsavio/scikit-learn | examples/applications/plot_stock_market.py | 76 | 8522 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
try:
from matplotlib.finance import quotes_historical_yahoo_ochl
except ImportError:
# quotes_historical_yahoo_ochl was named quotes_historical_yahoo before matplotlib 1.4
from matplotlib.finance import quotes_historical_yahoo as quotes_historical_yahoo_ochl
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [quotes_historical_yahoo_ochl(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
juliusbierk/scikit-image | doc/examples/plot_local_binary_pattern.py | 17 | 6774 | """
===============================================
Local Binary Pattern for texture classification
===============================================
In this example, we will see how to classify textures based on LBP (Local
Binary Pattern). LBP looks at points surrounding a central point and tests
whether the surrounding points are greater than or less than the central point
(i.e. gives a binary result).
Before trying out LBP on an image, it helps to look at a schematic of LBPs.
The below code is just used to plot the schematic.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
METHOD = 'uniform'
plt.rcParams['font.size'] = 9
def plot_circle(ax, center, radius, color):
circle = plt.Circle(center, radius, facecolor=color, edgecolor='0.5')
ax.add_patch(circle)
def plot_lbp_model(ax, binary_values):
"""Draw the schematic for a local binary pattern."""
# Geometry spec
theta = np.deg2rad(45)
R = 1
r = 0.15
w = 1.5
gray = '0.5'
# Draw the central pixel.
plot_circle(ax, (0, 0), radius=r, color=gray)
# Draw the surrounding pixels.
for i, facecolor in enumerate(binary_values):
x = R * np.cos(i * theta)
y = R * np.sin(i * theta)
plot_circle(ax, (x, y), radius=r, color=str(facecolor))
# Draw the pixel grid.
for x in np.linspace(-w, w, 4):
ax.axvline(x, color=gray)
ax.axhline(x, color=gray)
# Tweak the layout.
ax.axis('image')
ax.axis('off')
size = w + 0.2
ax.set_xlim(-size, size)
ax.set_ylim(-size, size)
fig, axes = plt.subplots(ncols=5, figsize=(7, 2))
titles = ['flat', 'flat', 'edge', 'corner', 'non-uniform']
binary_patterns = [np.zeros(8),
np.ones(8),
np.hstack([np.ones(4), np.zeros(4)]),
np.hstack([np.zeros(3), np.ones(5)]),
[1, 0, 0, 1, 1, 1, 0, 0]]
for ax, values, name in zip(axes, binary_patterns, titles):
plot_lbp_model(ax, values)
ax.set_title(name)
"""
.. image:: PLOT2RST.current_figure
The figure above shows example results with black (or white) representing
pixels that are less (or more) intense than the central pixel. When surrounding
pixels are all black or all white, then that image region is flat (i.e.
featureless). Groups of continuous black or white pixels are considered
"uniform" patterns that can be interpreted as corners or edges. If pixels
switch back-and-forth between black and white pixels, the pattern is considered
"non-uniform".
When using LBP to detect texture, you measure a collection of LBPs over an
image patch and look at the distribution of these LBPs. Lets apply LBP to
a brick texture.
"""
from skimage.transform import rotate
from skimage.feature import local_binary_pattern
from skimage import data
from skimage.color import label2rgb
# settings for LBP
radius = 3
n_points = 8 * radius
def overlay_labels(image, lbp, labels):
mask = np.logical_or.reduce([lbp == each for each in labels])
return label2rgb(mask, image=image, bg_label=0, alpha=0.5)
def highlight_bars(bars, indexes):
for i in indexes:
bars[i].set_facecolor('r')
image = data.load('brick.png')
lbp = local_binary_pattern(image, n_points, radius, METHOD)
def hist(ax, lbp):
n_bins = lbp.max() + 1
return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),
facecolor='0.5')
# plot histograms of LBP of textures
fig, (ax_img, ax_hist) = plt.subplots(nrows=2, ncols=3, figsize=(9, 6))
plt.gray()
titles = ('edge', 'flat', 'corner')
w = width = radius - 1
edge_labels = range(n_points // 2 - w, n_points // 2 + w + 1)
flat_labels = list(range(0, w + 1)) + list(range(n_points - w, n_points + 2))
i_14 = n_points // 4 # 1/4th of the histogram
i_34 = 3 * (n_points // 4) # 3/4th of the histogram
corner_labels = (list(range(i_14 - w, i_14 + w + 1)) +
list(range(i_34 - w, i_34 + w + 1)))
label_sets = (edge_labels, flat_labels, corner_labels)
for ax, labels in zip(ax_img, label_sets):
ax.imshow(overlay_labels(image, lbp, labels))
for ax, labels, name in zip(ax_hist, label_sets, titles):
counts, _, bars = hist(ax, lbp)
highlight_bars(bars, labels)
ax.set_ylim(ymax=np.max(counts[:-1]))
ax.set_xlim(xmax=n_points + 2)
ax.set_title(name)
ax_hist[0].set_ylabel('Percentage')
for ax in ax_img:
ax.axis('off')
"""
.. image:: PLOT2RST.current_figure
The above plot highlights flat, edge-like, and corner-like regions of the
image.
The histogram of the LBP result is a good measure to classify textures. Here,
we test the histogram distributions against each other using the
Kullback-Leibler-Divergence.
"""
# settings for LBP
radius = 2
n_points = 8 * radius
def kullback_leibler_divergence(p, q):
p = np.asarray(p)
q = np.asarray(q)
filt = np.logical_and(p != 0, q != 0)
return np.sum(p[filt] * np.log2(p[filt] / q[filt]))
def match(refs, img):
best_score = 10
best_name = None
lbp = local_binary_pattern(img, n_points, radius, METHOD)
n_bins = lbp.max() + 1
hist, _ = np.histogram(lbp, normed=True, bins=n_bins, range=(0, n_bins))
for name, ref in refs.items():
ref_hist, _ = np.histogram(ref, normed=True, bins=n_bins,
range=(0, n_bins))
score = kullback_leibler_divergence(hist, ref_hist)
if score < best_score:
best_score = score
best_name = name
return best_name
brick = data.load('brick.png')
grass = data.load('grass.png')
wall = data.load('rough-wall.png')
refs = {
'brick': local_binary_pattern(brick, n_points, radius, METHOD),
'grass': local_binary_pattern(grass, n_points, radius, METHOD),
'wall': local_binary_pattern(wall, n_points, radius, METHOD)
}
# classify rotated textures
print('Rotated images matched against references using LBP:')
print('original: brick, rotated: 30deg, match result: ',
match(refs, rotate(brick, angle=30, resize=False)))
print('original: brick, rotated: 70deg, match result: ',
match(refs, rotate(brick, angle=70, resize=False)))
print('original: grass, rotated: 145deg, match result: ',
match(refs, rotate(grass, angle=145, resize=False)))
# plot histograms of LBP of textures
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(nrows=2, ncols=3,
figsize=(9, 6))
plt.gray()
ax1.imshow(brick)
ax1.axis('off')
hist(ax4, refs['brick'])
ax4.set_ylabel('Percentage')
ax2.imshow(grass)
ax2.axis('off')
hist(ax5, refs['grass'])
ax5.set_xlabel('Uniform LBP values')
ax3.imshow(wall)
ax3.axis('off')
hist(ax6, refs['wall'])
"""
.. image:: PLOT2RST.current_figure
"""
plt.show()
| bsd-3-clause |
kagayakidan/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
NickC1/skedm | build/lib/skedm/utilities.py | 1 | 9845 | """
Metrics for scoring predictions and also some more specialized
math needed for skedm
"""
import numpy as np
from scipy import stats as stats
from numba import jit
def weighted_mean(X, distances ):
"""
Calculates the weighted mean given a set of values and their corresponding
distances. Only 1/distance is implemented. This essentially is just a
weighted mean down axis=1.
Parameters
----------
X : 2d array
Training values. shape(nsamples,number near neighbors)
distances : 2d array
Sorted distances to the near neighbors for the indices.
shape(nsamples,number near neighbors)
Returns
-------
w_mean : 2d array
Weighted predictions
"""
distances = distances+0.00001 #ensures no zeros when dividing
W = 1./distances
denom = np.sum(W, axis=1,keepdims=True)
W/=denom
w_mean = np.sum(X * W, axis=1)
return w_mean.ravel()
def mi_digitize(X):
"""
Digitize a time series for mutual information analysis
Parameters
----------
X : 1D array
array to be digitized of length m
Returns
-------
Y : 1D array
digitized array of length m
"""
minX = np.min(X) - 1e-5 #subtract for correct binning
maxX = np.max(X) + 1e-5 #add for correct binning
nbins = int(np.sqrt(len(X)/20))
nbins = max(4,nbins) #make sure there are atleast four bins
bins = np.linspace(minX, maxX, nbins+1) #add one for correct num bins
Y = np.digitize(X, bins)
return Y
def corrcoef(preds,actual):
"""
Correlation Coefficient of between predicted values and actual values
Parameters
----------
preds : array shape (num samples,num targets)
test : array of shape (num samples, num targets)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
cc = np.corrcoef(preds,actual)[1,0]
return cc
def classCompare(preds,actual):
"""
Percent correct between predicted values and actual values
Parameters
----------
preds : array shape (num samples,num targets)
test : array of shape (num samples, num targets)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
cc = np.mean( preds == actual )
return cc
def classificationError(preds,actual):
"""
Percent correct between predicted values and actual values scaled
to the most common prediction of the space
Parameters
----------
preds : array shape (num samples,)
test : array of shape (num samples,)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
most_common,_=stats.mode(actual,axis=None)
num = np.mean(preds == actual)
denom = np.mean(actual == most_common)
cc = num/denom.astype('float')
return cc
def kleckas_tau(preds,actual):
"""
Calculates kleckas tau
Parameters
----------
preds : array shape (num samples,)
test : array of shape (num samples,)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
ncorr = np.sum(preds == actual) #number correctly classified
cats_unique = np.unique(actual)
sum_t = 0
for cat in cats_unique:
ni = np.sum(cat==actual)
pi = float(ni)/len(preds)
sum_t += ni*pi
tau = (ncorr - sum_t) / (len(preds) - sum_t)
return tau
def cohens_kappa(preds,actual):
"""
Calculates cohens kappa
Parameters
----------
preds : array shape (num samples,)
test : array of shape (num samples,)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
c = cohen_kappa_score(preds,actual)
return c
def klekas_tau_spatial(X,max_lag,percent_calc=.5):
"""
Similar to mutual_information_spatial, it calculates the kleckas tau value
between a shifted and unshifted slice of the space. It makes slices in both
the rows and the columns.
Parameters
----------
X : 2-D array
input two-dimensional image
max_lag : integer
maximum amount to shift the space
percent_calc : float
How many rows and columns to use average over. Using the whole space
is overkill.
Returns
-------
R_mut : 1-D array
the mutual inforation averaged down the rows (vertical)
C_mut : 1-D array
the mutual information averaged across the columns (horizontal)
r_mi : 2-D array
the mutual information down each row (vertical)
c_mi : 2-D array
the mutual information across each columns (horizontal)
"""
rs, cs = np.shape(X)
rs_iters = int(rs*percent_calc)
cs_iters = int(cs*percent_calc)
r_picks = np.random.choice(np.arange(rs),size=rs_iters,replace=False)
c_picks = np.random.choice(np.arange(cs),size=cs_iters,replace=False)
# The r_picks are used to calculate the MI in the columns
# and the c_picks are used to calculate the MI in the rows
c_mi = np.zeros((max_lag,rs_iters))
r_mi = np.zeros((max_lag,cs_iters))
for ii in range(rs_iters):
m_slice = X[r_picks[ii],:]
for j in range(max_lag):
shift = j+1
new_m = m_slice[:-shift]
shifted = m_slice[shift:]
c_mi[j,ii] = kleckas_tau(new_m,shifted)
for ii in range(cs_iters):
m_slice = X[:,c_picks[ii]]
for j in range(max_lag):
shift = j+1
new_m = m_slice[:-shift]
shifted = m_slice[shift:]
r_mi[j,ii] = kleckas_tau(new_m,shifted)
r_mut = np.mean(r_mi,axis=1)
c_mut = np.mean(c_mi,axis=1)
return r_mut, c_mut, r_mi, c_mi
def varianceExplained(preds,actual):
"""
Explained variance between predicted values and actual values scaled
to the most common prediction of the space
Parameters
----------
preds : array shape (num samples,num targets)
actual : array of shape (num samples, num targets)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
cc = np.var(preds - actual) / np.var(actual)
return cc
def score(preds,actual):
"""
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum(). Best possible
score is 1.0, lower values are worse.
Parameters
----------
preds : array shape (num samples,num targets)
test : array of shape (num samples, num targets)
actual values from the testing set
Returns
-------
cc : float
Returns the correlation coefficient
"""
u = np.square(actual - preds ).sum()
v = np.square(actual - actual.mean()).sum()
r2 = 1 - u/v
if v == 0.:
print('Targets are all the same. Returning 0.')
r2=0
return r2
def weighted_mode(a, w, axis=0):
"""This function is borrowed from sci-kit learn's extmath.py
Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
print('both weights')
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
@jit
def quick_mode_axis1(X):
"""
Takes the mode of an array across the columns. aka axis=1
X : np.array
"""
X = X.astype(int)
len_x = len(X)
mode = np.zeros(len_x)
for i in range(len_x):
mode[i] = np.bincount(X[i,:]).argmax()
return mode
@jit
def quick_mode_axis1_keep_nearest_neigh(X):
"""
The current implementation of the mode takes the lowest value instead of
the closest value. For example if the neighbors have values:
[7,7,2,3,4,1,1] the current implementation will keep 1 as the value. For
our purposes, the ordering is important, so we want to keep the first value.
"""
X = X.astype(int)
len_x = len(X)
mode = np.zeros(len_x)
for i in range(len_x):
loc = np.bincount(X[i,:])[X[i,:]].argmax() #reorder before argmax
mode[i] = X[i,:][loc]
return mode
def keep_diversity(X,thresh=1.):
"""
Throws out rows of only one class.
X : 2d array of ints
Returns
keep : 1d boolean
ex:
[1 1 1 1]
[2 1 2 3]
[2 2 2 2]
[3 2 1 4]
returns:
[F]
[T]
[F]
[T]
"""
X = X.astype(int)
mode = quick_mode_axis1(X).reshape(-1,1)
compare = np.repeat(mode,X.shape[1],axis=1)
thresh = int(thresh*X.shape[1])
keep = np.sum(compare==X, axis=1) < X.shape[1]
return keep
| mit |
juliusbierk/scikit-image | skimage/io/_plugins/matplotlib_plugin.py | 16 | 4961 | from collections import namedtuple
import numpy as np
import warnings
import matplotlib.pyplot as plt
from ...util import dtype as dtypes
from ...exposure import is_low_contrast
from ...util.colormap import viridis
_default_colormap = 'gray'
_nonstandard_colormap = viridis
_diverging_colormap = 'RdBu'
ImageProperties = namedtuple('ImageProperties',
['signed', 'out_of_range_float',
'low_dynamic_range', 'unsupported_dtype'])
def _get_image_properties(image):
"""Determine nonstandard properties of an input image.
Parameters
----------
image : array
The input image.
Returns
-------
ip : ImageProperties named tuple
The properties of the image:
- signed: whether the image has negative values.
- out_of_range_float: if the image has floating point data
outside of [-1, 1].
- low_dynamic_range: if the image is in the standard image
range (e.g. [0, 1] for a floating point image) but its
dynamic range would be too small to display with standard
image ranges.
- unsupported_dtype: if the image data type is not a
standard skimage type, e.g. ``numpy.uint64``.
"""
immin, immax = np.min(image), np.max(image)
imtype = image.dtype.type
try:
lo, hi = dtypes.dtype_range[imtype]
except KeyError:
lo, hi = immin, immax
signed = immin < 0
out_of_range_float = (np.issubdtype(image.dtype, np.float) and
(immin < lo or immax > hi))
low_dynamic_range = (immin != immax and
is_low_contrast(image))
unsupported_dtype = image.dtype not in dtypes._supported_types
return ImageProperties(signed, out_of_range_float,
low_dynamic_range, unsupported_dtype)
def _raise_warnings(image_properties):
"""Raise the appropriate warning for each nonstandard image type.
Parameters
----------
image_properties : ImageProperties named tuple
The properties of the considered image.
"""
ip = image_properties
if ip.unsupported_dtype:
warnings.warn("Non-standard image type; displaying image with "
"stretched contrast.")
if ip.low_dynamic_range:
warnings.warn("Low image dynamic range; displaying image with "
"stretched contrast.")
if ip.out_of_range_float:
warnings.warn("Float image out of standard range; displaying "
"image with stretched contrast.")
def _get_display_range(image):
"""Return the display range for a given set of image properties.
Parameters
----------
image : array
The input image.
Returns
-------
lo, hi : same type as immin, immax
The display range to be used for the input image.
cmap : string
The name of the colormap to use.
"""
ip = _get_image_properties(image)
immin, immax = np.min(image), np.max(image)
if ip.signed:
magnitude = max(abs(immin), abs(immax))
lo, hi = -magnitude, magnitude
cmap = _diverging_colormap
elif any(ip):
_raise_warnings(ip)
lo, hi = immin, immax
cmap = _nonstandard_colormap
else:
lo = 0
imtype = image.dtype.type
hi = dtypes.dtype_range[imtype][1]
cmap = _default_colormap
return lo, hi, cmap
def imshow(im, *args, **kwargs):
"""Show the input image and return the current axes.
By default, the image is displayed in greyscale, rather than
the matplotlib default colormap.
Images are assumed to have standard range for their type. For
example, if a floating point image has values in [0, 0.5], the
most intense color will be gray50, not white.
If the image exceeds the standard range, or if the range is too
small to display, we fall back on displaying exactly the range of
the input image, along with a colorbar to clearly indicate that
this range transformation has occurred.
For signed images, we use a diverging colormap centered at 0.
Parameters
----------
im : array, shape (M, N[, 3])
The image to display.
*args, **kwargs : positional and keyword arguments
These are passed directly to `matplotlib.pyplot.imshow`.
Returns
-------
ax_im : `matplotlib.pyplot.AxesImage`
The `AxesImage` object returned by `plt.imshow`.
"""
if kwargs.get('cmap', None) == 'viridis':
kwargs['cmap'] = viridis
lo, hi, cmap = _get_display_range(im)
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', cmap)
kwargs.setdefault('vmin', lo)
kwargs.setdefault('vmax', hi)
ax_im = plt.imshow(im, *args, **kwargs)
if cmap != _default_colormap:
plt.colorbar()
return ax_im
imread = plt.imread
show = plt.show
def _app_show():
show()
| bsd-3-clause |
airbnb/superset | superset/examples/sf_population_polygons.py | 3 | 2118 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import pandas as pd
from sqlalchemy import BigInteger, Float, Text
from superset import db
from superset.utils import core as utils
from .helpers import get_example_data, TBL
def load_sf_population_polygons(
only_metadata: bool = False, force: bool = False
) -> None:
tbl_name = "sf_population_polygons"
database = utils.get_example_database()
table_exists = database.has_table_by_name(tbl_name)
if not only_metadata and (not table_exists or force):
data = get_example_data("sf_population.json.gz")
df = pd.read_json(data)
df["contour"] = df.contour.map(json.dumps)
df.to_sql(
tbl_name,
database.get_sqla_engine(),
if_exists="replace",
chunksize=500,
dtype={
"zipcode": BigInteger,
"population": BigInteger,
"contour": Text,
"area": Float,
},
index=False,
)
print("Creating table {} reference".format(tbl_name))
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = "Population density of San Francisco"
tbl.database = database
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
| apache-2.0 |
ajrichards/bayesian-examples | estimation/two-component-gaussian-em.py | 2 | 6433 | #!/usr/bin/env python
"""
This is an implementation of two-component Gaussian example from
Elements of Statistical Learning (pp 272)
"""
## make imports
from __future__ import division
import sys
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import scipy.stats as stats
class TwoComponentGaussian():
def __init__(self, y, num_iters, num_runs,verbose=False):
self.y = y
self.verbose = verbose
self.max_like, self.best_est = self.run_em_algorithm(num_iters, num_runs)
### make defs for initial guessing, expectation, and maximization
def get_init_guesses(self,y):
## make intial guesses for the parameters (mu1, sig1, mu2, sig2 and pi)
n = len(self.y)
mu1 = y[np.random.randint(0,n)]
mu2 = y[np.random.randint(0,n)]
sig1 = np.random.uniform(0.5,1.5)
sig2 = np.random.uniform(0.5,1.5)
pi = 0.5
return {'n':n, 'mu1':mu1, 'mu2':mu2, 'sig1':sig1, 'sig2':sig2, 'pi':pi}
def perform_expectation(self, y, parms):
gamma_hat = np.zeros((parms['n']),'float')
for i in range(parms['n']):
phi_theta1 = stats.norm.pdf(y[i],loc=parms['mu1'],scale=np.sqrt(parms['sig1']))
phi_theta2 = stats.norm.pdf(y[i],loc=parms['mu2'],scale=np.sqrt(parms['sig2']))
numer = parms['pi'] * phi_theta2
denom = ((1.0 - parms['pi']) * phi_theta1) + (parms['pi'] * phi_theta2)
gamma_hat[i] = numer / denom
return gamma_hat
def perform_maximization(self,y,parms,gamma_hat):
"""
maximization
"""
## use weighted maximum likelihood fits to get updated parameter estimates
numer_muhat1 = 0
denom_hat1 = 0
numer_sighat1 = 0
numer_muhat2 = 0
denom_hat2 = 0
numer_sighat2 = 0
pi_hat = 0
## get numerators and denomanators for updating of parameter estimates
for i in range(parms['n']):
numer_muhat1 = numer_muhat1 + ((1.0 - gamma_hat[i]) * y[i])
numer_sighat1 = numer_sighat1 + ( (1.0 - gamma_hat[i]) * ( y[i] - parms['mu1'] )**2 )
denom_hat1 = denom_hat1 + (1.0 - gamma_hat[i])
numer_muhat2 = numer_muhat2 + (gamma_hat[i] * y[i])
numer_sighat2 = numer_sighat2 + (gamma_hat[i] * ( y[i] - parms['mu2'] )**2)
denom_hat2 = denom_hat2 + gamma_hat[i]
pi_hat = pi_hat + (gamma_hat[i] / parms['n'])
## calculate estimates
mu_hat1 = numer_muhat1 / denom_hat1
sig_hat1 = numer_sighat1 / denom_hat1
mu_hat2 = numer_muhat2 / denom_hat2
sig_hat2 = numer_sighat2 / denom_hat2
return {'mu1':mu_hat1, 'mu2':mu_hat2, 'sig1': sig_hat1, 'sig2':sig_hat2, 'pi':pi_hat, 'n':parms['n']}
def get_likelihood(self,y,parms,gamma_hat):
"""
likelihood
"""
part1 = 0
part2 = 0
for i in range(parms['n']):
phi_theta1 = stats.norm.pdf(y[i],loc=parms['mu1'],scale=np.sqrt(parms['sig1']))
phi_theta2 = stats.norm.pdf(y[i],loc=parms['mu2'],scale=np.sqrt(parms['sig2']))
part1 = part1 + ( (1.0 - gamma_hat[i]) * np.log(phi_theta1) + gamma_hat[i] * np.log(phi_theta2) )
part2 = part2 + ( (1.0 - gamma_hat[i]) * np.log(parms['pi']) + gamma_hat[i] * np.log(1.0 - parms['pi']) )
return part1 + part2
def run_em_algorithm(self, num_iters, num_runs, verbose = True):
"""
main algorithm functions
"""
max_like = -np.inf
best_estimates = None
for j in range(num_runs):
iter_count = 0
parms = self.get_init_guesses(self.y)
## iterate between E-step and M-step
while iter_count < num_iters:
iter_count += 1
## ensure we have reasonable estimates
if parms['sig1'] < 0.0 or parms['sig2'] < 0.0:
iter_count = 1
parms = get_init_guesses()
## E-step
gamma_hat = self.perform_expectation(self.y,parms)
log_like = self.get_likelihood(self.y,parms,gamma_hat)
## M-step
parms = self.perform_maximization(self.y,parms,gamma_hat)
if log_like > max_like:
max_like = log_like
best_estimates = parms.copy()
if self.verbose == True:
print('run:',j+1, '--- mu1: ',round(parms['mu1'],2),'--- mu2:',round(parms['mu2'],2),)
print('--- obs.data likelihood: ', round(log_like,4))
print("runs complete")
return max_like, best_estimates
if __name__ == '__main__':
y1 = np.array([-0.39,0.12,0.94,1.67,1.76,2.44,3.72,4.28,4.92,5.53])
y2 = np.array([ 0.06,0.48,1.01,1.68,1.80,3.25,4.12,4.60,5.28,6.22])
y = np.hstack((y1,y2))
num_iters = 25
num_runs = 20
verbose = True
make_plots = True
tcg = TwoComponentGaussian(y, num_iters, num_runs,verbose=verbose)
print('max likelihood', tcg.max_like)
print('best estimates', tcg.best_est)
if make_plots:
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(111)
n, bins, patches = ax.hist(y,bins=20,facecolor="#9999FF",alpha=0.7,normed=1,histtype='stepfilled')
#n, bins, patches = plt.hist(y,15,normed=1,facecolor='gray',alpha=0.75)
## add a 'best fit' line (book results)
mu1 = 4.62
mu2 = 1.06
sig1 = 0.87
sig2 = 0.77
p1 = mlab.normpdf( bins, mu1, np.sqrt(sig1))
p2 = mlab.normpdf( bins, mu2, np.sqrt(sig2))
l1 = ax.plot(bins, p1, 'r--', linewidth=1)
l2 = ax.plot(bins, p2, 'r--', linewidth=1)
## add a 'best fit' line (results from here)
p3 = mlab.normpdf( bins, tcg.best_est['mu1'], np.sqrt(tcg.best_est['sig1']))
p4 = mlab.normpdf( bins, tcg.best_est['mu2'], np.sqrt(tcg.best_est['sig2']))
l3 = ax.plot(bins, p3, 'k-', linewidth=1)
l4 = ax.plot(bins, p4, 'k-', linewidth=1)
plt.xlabel('y')
plt.ylabel('freq')
plt.ylim([0,0.8])
plt.legend( (l1[0], l3[0]), ('Book Estimate', 'EM Estimate') )
plt.savefig('../TwoComponentGauss.png')
plt.show()
| bsd-3-clause |
nutils/nutils | examples/drivencavity.py | 1 | 6644 | #! /usr/bin/env python3
#
# In this script we solve the lid driven cavity problem for stationary Stokes
# and Navier-Stokes flow. That is, a unit square domain, with no-slip left,
# bottom and right boundaries and a top boundary that is moving at unit
# velocity in positive x-direction.
import nutils, numpy
# The main function defines the parameter space for the script. Configurable
# parameters are the mesh density (in number of elements along an edge),
# element type (square, triangle, or mixed), polynomial degree, and Reynolds
# number.
def main(nelems: 'number of elements' = 12,
etype: 'type of elements (square/triangle/mixed)' = 'square',
degree: 'polynomial degree for velocity' = 3,
reynolds: 'reynolds number' = 1000.):
domain, geom = nutils.mesh.unitsquare(nelems, etype)
ns = nutils.function.Namespace()
ns.Re = reynolds
ns.x = geom
ns.ubasis, ns.pbasis = nutils.function.chain([
domain.basis('std', degree=degree).vector(2),
domain.basis('std', degree=degree-1),
])
ns.u_i = 'ubasis_ni ?lhs_n'
ns.p = 'pbasis_n ?lhs_n'
ns.stress_ij = '(u_i,j + u_j,i) / Re - p δ_ij'
sqr = domain.boundary.integral('u_k u_k d:x' @ ns, degree=degree*2)
wallcons = nutils.solver.optimize('lhs', sqr, droptol=1e-15)
sqr = domain.boundary['top'].integral('(u_0 - 1)^2 d:x' @ ns, degree=degree*2)
lidcons = nutils.solver.optimize('lhs', sqr, droptol=1e-15)
cons = numpy.choose(numpy.isnan(lidcons), [lidcons, wallcons])
cons[-1] = 0 # pressure point constraint
res = domain.integral('(ubasis_ni,j stress_ij + pbasis_n u_k,k) d:x' @ ns, degree=degree*2)
with nutils.log.context('stokes'):
lhs0 = nutils.solver.solve_linear('lhs', res, constrain=cons)
postprocess(domain, ns, lhs=lhs0)
res += domain.integral('ubasis_ni u_i,j u_j d:x' @ ns, degree=degree*3)
with nutils.log.context('navierstokes'):
lhs1 = nutils.solver.newton('lhs', res, lhs0=lhs0, constrain=cons).solve(tol=1e-10)
postprocess(domain, ns, lhs=lhs1)
return lhs0, lhs1
# Postprocessing in this script is separated so that it can be reused for the
# results of Stokes and Navier-Stokes, and because of the extra steps required
# for establishing streamlines.
def postprocess(domain, ns, every=.05, spacing=.01, **arguments):
ns = ns.copy_() # copy namespace so that we don't modify the calling argument
ns.streambasis = domain.basis('std', degree=2)[1:] # remove first dof to obtain non-singular system
ns.stream = 'streambasis_n ?streamdofs_n' # stream function
sqr = domain.integral('((u_0 - stream_,1)^2 + (u_1 + stream_,0)^2) d:x' @ ns, degree=4)
arguments['streamdofs'] = nutils.solver.optimize('streamdofs', sqr, arguments=arguments) # compute streamlines
bezier = domain.sample('bezier', 9)
x, u, p, stream = bezier.eval(['x_i', 'sqrt(u_k u_k)', 'p', 'stream'] @ ns, **arguments)
with nutils.export.mplfigure('flow.png') as fig: # plot velocity as field, pressure as contours, streamlines as dashed
ax = fig.add_axes([.1,.1,.8,.8], yticks=[], aspect='equal')
import matplotlib.collections
ax.add_collection(matplotlib.collections.LineCollection(x[bezier.hull], colors='w', linewidths=.5, alpha=.2))
ax.tricontour(x[:,0], x[:,1], bezier.tri, stream, 16, colors='k', linestyles='dotted', linewidths=.5, zorder=9)
caxu = fig.add_axes([.1,.1,.03,.8], title='velocity')
imu = ax.tripcolor(x[:,0], x[:,1], bezier.tri, u, shading='gouraud', cmap='jet')
fig.colorbar(imu, cax=caxu)
caxu.yaxis.set_ticks_position('left')
caxp = fig.add_axes([.87,.1,.03,.8], title='pressure')
imp = ax.tricontour(x[:,0], x[:,1], bezier.tri, p, 16, cmap='gray', linestyles='solid')
fig.colorbar(imp, cax=caxp)
# If the script is executed (as opposed to imported), :func:`nutils.cli.run`
# calls the main function with arguments provided from the command line. To
# keep with the default arguments simply run :sh:`python3 drivencavity.py`.
if __name__ == '__main__':
nutils.cli.run(main)
# Once a simulation is developed and tested, it is good practice to save a few
# strategic return values for regression testing. The :mod:`nutils.testing`
# module, which builds on the standard :mod:`unittest` framework, facilitates
# this by providing :func:`nutils.testing.TestCase.assertAlmostEqual64` for the
# embedding of desired results as compressed base64 data.
class test(nutils.testing.TestCase):
@nutils.testing.requires('matplotlib')
def test_square(self):
lhs0, lhs1 = main(nelems=3, etype='square', reynolds=100, degree=3)
with self.subTest('stokes'): self.assertAlmostEqual64(lhs0, '''
eNp1zj1IQlEUB/BrCJKEQxLRFNFQxvN1vTcpoqWhzZaGElr7WKOGirApiIaipcEKoiXCpaKEiCKnhjzn
XX1PejaEJGGFRCCiCH153YrXOXCG3+Fw/oT8rZFeQpaVqDGVmjHNxEKSJmxM2rOIal1aDlsxKyK+gF/a
sZbHEA5gDmL6FduuWRnHsAQXcABEXeGP/5rVrdUPqyxWma1q2ih3u1g7/+JnPf3+BiYtr5ToBGvm33yN
d/C3pLTrTi9d9Y2yCkuxU2Z6pa17CqpKMzTo+6AbdLJmc3eupC7axKFmF7NiR5c2aBpiUYugAxUcRk/N
mgyn2MVXsME83INblRZW6hMFfIA6CMRvbotonTgL7/ACWQjBfjwcT8MT6HAJSxCEI8hAvroxIQZ7cA7F
X+3ET3CgG1Ucxz5sRDu2IMctTONQNVkFbNW5iScGIT8HbdXq''')
with self.subTest('navier-stokes'): self.assertAlmostEqual64(lhs1, '''
eNptzktoU0EUBuC7KeLGguKioS4MBdPekNyZSWIwEihowVVBxJW0pYuiFgpiXSh0F0ltELvoC2zAVuor
RuiTJlRLC6Hof2cml0wwCxVqCl1XFOqi4p27LPlXP985HI5hHM/1i4aRMzvVL7VqOs4j5VMhS9un8k2Z
kEnZLL+271v3mLYb8oG4KuKiR0yGtkk6om1MODzLH/Ma/xZK0b+eXROveJzX7Vs8ZcXYUFTbkYiJp7yF
b9i3VTO765m/fFL+5IM8ZBfFHJvybCD4WvVWi86BZPIsj3j3Gv3cKKXKUDhJovQ7TbBhdsrSdjl4xcqS
btrEZukM7VDa3ge2wnHSRAt0lmboSFjbCfNMuGItkH7aSxdpi9Q2c+Gf80JFgpdIHxkgdaJtt3aufFq2
iRXxUPqchLfnV63yLT/Pd2CKLXqfadsL9DmGmLeruPPl42diN/44jyV8wBuMogvteIe827MYxwTWkMOi
K1k8QxrTbl9xZQpPMIzn2EDR3cgjg5dYxzYKKIHjDzbx252sY9mdHuKHaRj/AYh1yFc=''')
@nutils.testing.requires('matplotlib')
def test_mixed(self):
lhs0, lhs1 = main(nelems=3, etype='mixed', reynolds=100, degree=2)
with self.subTest('stokes'): self.assertAlmostEqual64(lhs0, '''
eNpjYICAiRePnWdg0D736SyIF3P2nK6VYSWQHWS+1SjI3MAkyLz6rMbZI2BZhXMJZxyMNp/xMbwMFA8y
LzNhYNh6YdUFiElzzykYgGg94yBzkH6oBQwvLm80YmA4r6dkCOYZq5h4GZUYgdg8QHKbJpA2OHhp8zmQ
iM8Vp6tpV03PMp1TPQ/ipwPJcIOtZyAmvT69Bcy6BOXHnM0+m3w28ezmM+ZnY88EnW0/O+vs2bO7zq48
W352FdA8ABC3SoM=''')
with self.subTest('navier-stokes'): self.assertAlmostEqual64(lhs1, '''
eNpjYICA1RezLjIwPD639hyIl31umX6vgQGQHWTuaRhkLmYcZB54bvvZq2dBsofPqZ4tMoo4o22oaxJk
HmReasLAsOrihAsQkxzOJl0B0TJAOZB+qAUMtZefGzIwxOjtNgDxfho9MbI1UjcCsV/pMTA802VgqDNY
qrsEbL+I7nGD0/o655ouMIFN3QLUqWSUcQZiEvMZbrA7npyG8IXPyJ2RPiN65ubpn6dPn+Y9I3XG4Awf
UMzlDPuZ60A9AH73RT0=''')
| mit |
JarnoRFB/GENNN | ga.py | 1 | 7202 | import random
import copy
from time import gmtime, strftime
import os
import matplotlib.pyplot as plt
class GA:
def __init__(self, parms):
#
self._parms = parms
self._population_size = parms['population']
self._rate_mutation = parms['mutation']['rate']
self._rate_crossover = parms['crossover']['rate']
self._candidate_class = parms['candidate_class']
self._start_time = strftime("%Y.%m.%d-%H.%M.%S", gmtime())
self._candidate_id = 0
# Create Random start population
self._population = list(
self._candidate_class(candidate_id=i, start_time_str=self._start_time, runtime_spec=parms['RUNTIME_SPEC'])
for i in range(self._population_size)
)
self._candidate_id = self._population_size
self.generation = 0
self.best_candidate = None
self.best_candidate_forever = None
self.fitness_avg = None
self.diversity = None
# set base_logdir
self._base_logdir = os.path.join(self._parms['RUNTIME_SPEC']['logdir'], str(self._start_time))
os.makedirs(self._base_logdir, exist_ok=True)
# Create running file
file_loc = os.path.join(self._base_logdir, "_running")
with open(file_loc, 'w') as fd:
fd.write("running")
# Save json
file_loc = os.path.join(self._base_logdir, "ga.json")
with open(file_loc, 'w') as fp:
fp.write(str(self._parms))
self._all_fitness_avg = list()
self._all_fitness_best = list()
self._all_diversity = list()
def mutate(self):
self.best_candidate = None
self.fitness_avg = None
self.diversity = None
for candidate in self._population:
candidate.mutation(self._rate_mutation)
def crossover(self, strategy):
if self._rate_crossover == 0:
return
self.best_candidate = None
self.fitness_avg = None
self.diversity = None
# Number of Crossover operations
crossovers = int((self._population_size * self._rate_crossover) / 2)
for i in range(crossovers):
candidate1 = random.randint(0, len(self._population) - 1)
candidate2 = random.randint(0, len(self._population) - 1)
# Get new Candidate 2 until Candidates are different
while candidate1 == candidate2:
candidate2 = random.randint(0, len(self._population) - 1)
self._population[candidate1].crossover(other_candidate=self._population[candidate2],
crossover_parms=strategy)
def evaluate(self, calc_diversity):
self.diversity = 0
self.fitness_avg = 0
self.generation += 1
# Here we can make Multi computing
# Set: best_candidate and fitness_avg
for candidate in self._population:
candidate.to_next_generation(self.generation)
self.fitness_avg += candidate.get_fitness()
if self.best_candidate is None or candidate.get_fitness() > self.best_candidate.get_fitness():
self.best_candidate = copy.deepcopy(candidate)
self.fitness_avg /= len(self._population)
if (self.best_candidate_forever is None or
self.best_candidate_forever.get_fitness() < self.best_candidate.get_fitness()):
# Copy best candidate.
self.best_candidate_forever = copy.deepcopy(self.best_candidate)
# Compute Diversity if wanted
if calc_diversity:
self._calc_diversity()
self._all_fitness_avg.append(copy.copy(self.fitness_avg))
self._all_fitness_best.append(copy.copy(self.best_candidate.get_fitness()))
self._all_diversity.append(copy.copy(self.diversity))
def write_stats(self):
file = os.path.join(self._base_logdir, 'graph.png')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self._all_fitness_avg, label='Fitness avg')
ax.plot(self._all_fitness_best, label='Fitness best')
ax.plot(self._all_diversity, label='diversity')
ax.set_xlabel('Generation')
ax.legend()
fig.savefig(file, format='png')
plt.clf()
plt.close(fig)
# Calc best Candidate more
print("BestID: " + str(self.best_candidate_forever._candidate_id) + "- Fitness: " +
str(round(self.best_candidate_forever.get_fitness(),3)))
file_loc = os.path.join(self._base_logdir, "besetID")
with open(file_loc, 'w') as fd:
fd.write(str(self.best_candidate_forever._candidate_id))
# Remove running file
file_loc = os.path.join(self._base_logdir, "_running")
os.remove(file_loc)
def _calc_diversity(self):
divs = 0
for idx_from, candidate_from in enumerate(self._population):
for candidate_to in self._population[idx_from + 1:]:
self.diversity += candidate_from.get_diversity(candidate_to)
divs += 1
self.diversity /= divs
def selection(self, strategy="Tournament", tournament_win_rate=0.75, tournament_size=10):
if strategy == "Tournament":
self._selection_tournament(tournament_win_rate, tournament_size)
def _selection_tournament(self, win_rate=0.75, tournement_size=10):
new_population = list()
sorted_candidates = sorted(self._population, key=lambda x: x.get_fitness())
for tournement in range(self._population_size):
# Create tournement candidates
idx_candidates = [i for i in range(self._population_size)]
random.shuffle(idx_candidates)
best_candidate_idx = max(idx_candidates[0:tournement_size])
worst_candidate_idx = min(idx_candidates[0:tournement_size])
if random.random() <= win_rate:
# new_population.append(copy.deepcopy(sorted_candidates[best_candidate_idx]))
network_spec_copy = copy.deepcopy(sorted_candidates[best_candidate_idx].network_spec)
new_population.append(self._candidate_class(candidate_id=self._candidate_id,
start_time_str=self._start_time,
network_spec=network_spec_copy,
runtime_spec=self._parms['RUNTIME_SPEC']))
self._candidate_id += 1
else:
# new_population.append(copy.deepcopy(sorted_candidates[worst_candidate_idx]))
network_spec_copy = copy.deepcopy(sorted_candidates[worst_candidate_idx].network_spec)
new_population.append(self._candidate_class(candidate_id=self._candidate_id,
start_time_str=self._start_time,
network_spec=network_spec_copy,
runtime_spec=self._parms['RUNTIME_SPEC']))
self._candidate_id += 1
self._population = new_population
| mit |
KristianJensen/cameo | cameo/api/designer.py | 1 | 10721 | # Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
__all__ = ['design']
import re
import numpy as np
from IPython.core.display import display
from IPython.core.display import HTML
from pandas import DataFrame
from cameo import Metabolite, Model, phenotypic_phase_plane, fba
from cameo import config, util
from cameo.core.result import Result
from cameo.api.hosts import hosts, Host
from cameo.api.products import products
from cameo.exceptions import SolveError
from cameo.strain_design.heuristic import GeneKnockoutOptimization
from cameo.strain_design.heuristic.objective_functions import biomass_product_coupled_yield
from cameo.ui import notice, searching, stop_loader
from cameo.strain_design import pathway_prediction
from cameo.util import TimeMachine
from cameo.models import universal
from cameo.visualization import visualization
from cameo.visualization.plotting import Grid
import logging
logger = logging.getLogger(__name__)
# TODO: implement cplex preference (if available)
class _OptimizationRunner(object):
def __call__(self, strategy, *args, **kwargs):
(host, model, pathway) = (strategy[0], strategy[1], strategy[2])
with TimeMachine() as tm:
pathway.plug_model(model, tm)
objective = biomass_product_coupled_yield(model.biomass,
pathway.product,
model.carbon_source)
opt = GeneKnockoutOptimization(model=model, objective_function=objective, progress=True, plot=False)
return opt.run(product=pathway.product.id, max_evaluations=10000)
class DesignerResult(Result):
def __init__(self, designs, *args, **kwargs):
super(DesignerResult, self).__init__(*args, **kwargs)
self.designs = designs
def _repr_latex_(self):
pass
class StrainDesings(Result):
def __init__(self, organism, designs, *args, **kwargs):
super(StrainDesings, self).__init__(*args, **kwargs)
class Designer(object):
"""High-level strain design functionality.
Example
-------
design = Designer()
designs = design(product='L-glutamate')
"""
def __init__(self):
""""""
pass
def __call__(self, product='L-glutamate', hosts=hosts, database=None, view=config.default_view):
"""The works.
The following workflow will be followed to determine suitable
metabolic engineering strategies for a desired product:
- Determine production pathways for desired product and host organisms.
Try a list of default hosts if no hosts are specified.
- Determine maximum theoretical yields and production envelopes for
all routes.
- Determine if production routes can be coupled to growth.
- Determine over-expression, down-regulation, and KO targets.
Parameters
----------
product : str or Metabolite
The desired product.
hosts : list or Model or Host
A list of hosts (e.g. cameo.api.hosts), models, mixture thereof, or a single model or host.
Returns
-------
Designs
"""
if database is None:
database = universal.metanetx_universal_model_bigg_rhea
notice("Starting searching for compound %s" % product)
product = self.__translate_product_to_universal_reactions_model_metabolite(product, database)
pathways = self.predict_pathways(product, hosts=hosts, database=database)
optimization_reports = self.optimize_strains(pathways, view)
return optimization_reports
@staticmethod
def optimize_strains(pathways, view):
runner = _OptimizationRunner()
designs = [(host, model, pathway) for (host, model) in pathways for pathway in pathways[host, model]
if pathway.needs_optimization(model, objective=model.biomass)]
return view.map(runner, designs)
def predict_pathways(self, product, hosts=None, database=None): # TODO: make this work with a single host or model
"""Predict production routes for a desired product and host spectrum.
Parameters
----------
product : str or Metabolite
The desired product.
hosts : list or Model or Host
A list of hosts (e.g. cameo.api.hosts), models, mixture thereof, or a single model or host.
Returns
-------
dict
...
"""
pathways = dict()
product = self.__translate_product_to_universal_reactions_model_metabolite(product, database)
for host in hosts:
if isinstance(host, Model):
host = Host(name='UNKNOWN_HOST', models=[host])
for model in list(host.models):
notice('Predicting pathways for product %s in %s (using model %s).'
% (product.name, host, model.id))
identifier = searching()
try:
logger.debug('Trying to set solver to cplex for pathway predictions.')
model.solver = 'cplex' # CPLEX is better predicting pathways
except ValueError:
logger.debug('Could not set solver to cplex for pathway predictions.')
pass
pathway_predictor = pathway_prediction.PathwayPredictor(model,
universal_model=database,
compartment_regexp=re.compile(".*_c$"))
# TODO adjust these numbers to something reasonable
predicted_pathways = pathway_predictor.run(product, max_predictions=4, timeout=3 * 60, silent=True)
pathways[(host, model)] = predicted_pathways
stop_loader(identifier)
self.__display_pathways_information(predicted_pathways, host, model)
return pathways
def __translate_product_to_universal_reactions_model_metabolite(self, product, database):
if isinstance(product, Metabolite):
return product
elif isinstance(product, str):
search_result = products.search(product)
notice("Found %d compounds that match query '%s'" % (len(search_result), product))
self.__display_product_search_result(search_result)
notice("Choosing best match (%s) ... please interrupt if this is not the desired compound."
% search_result.name[0])
self.__display_compound(search_result.InChI[0])
return database.metabolites.get_by_id(search_result.index[0])
@staticmethod
def __display_product_search_result(search_result):
if util.in_ipnb():
Designer.__display_product_search_results_html(search_result)
else:
Designer.__display_product_search_results_cli(search_result)
@staticmethod
def __display_compound(inchi):
if util.in_ipnb():
Designer.__display_compound_html(inchi)
else:
Designer.__display_compound_cli(inchi)
@staticmethod
def __display_compound_html(inchi):
svg = Designer.__generate_svg(inchi)
display(HTML("""
<p>
%s
</p>
""" % svg))
@staticmethod
def __display_compound_cli(inchi):
text = Designer.__generate_ascii(inchi)
print(text)
@staticmethod
def __display_product_search_results_html(search_result):
rows = []
for index, row in search_result.iterrows():
name = row["name"]
formula = row["formula"]
rows.append("<tr><td>%s</td><td>%s</td><td>%s</td></tr>" % (index, name, formula))
display(HTML(
"""
<table>
<thead>
<th>Id</th>
<th>Name</th>
<th>Formula</th>
</thead>
<tbody>
%s
</tbody>
</table>
""" % "\n".join(rows)
))
@staticmethod
def __display_product_search_results_cli(search_result):
rows = np.ndarray((len(search_result), 3), dtype=object)
for i, index in enumerate(search_result.index):
row = search_result.loc[index]
name = row["name"]
formula = row["formula"]
rows[i, ] = [index, name, formula]
i += 1
display(DataFrame(rows, columns=["Id", "Name", "Formula"]))
@staticmethod
def __generate_svg(inchi):
if isinstance(inchi, float) or inchi is None:
return ""
else:
return visualization.inchi_to_svg(inchi, three_d=False)
@staticmethod
def __generate_ascii(inchi):
if isinstance(inchi, float) or inchi is None:
return ""
else:
return visualization.inchi_to_ascii(inchi)
@staticmethod
def __display_pathways_information(predicted_pathways, host, original_model):
# TODO: remove copy hack.
with Grid(nrows=2, title="Production envelopes for %s (%s)" % (host.name, original_model.id)) as grid:
for i, pathway in enumerate(predicted_pathways):
pathway_id = "Pathway %i" % (i + 1)
with TimeMachine() as tm:
pathway.plug_model(original_model, tm)
production_envelope = phenotypic_phase_plane(original_model,
variables=[original_model.biomass],
objective=pathway.product)
production_envelope.plot(grid, title=pathway_id, width=400, height=300)
@staticmethod
def calculate_yield(model, source, product):
try:
flux_dist = fba(model, objective=product)
return flux_dist[product.id] / abs(flux_dist[source.id])
except SolveError:
return 0.0
design = Designer()
| apache-2.0 |
Srisai85/numpy | numpy/fft/fftpack.py | 72 | 45497 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
a = asarray(a).astype(complex)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
| bsd-3-clause |
astrikos/nl_banks_stats | bank.py | 1 | 1112 | from pandas import ExcelFile
class Bank(object):
def __init__(self, transactions_file):
self.transactions_file = transactions_file
self.import_data(transactions_file)
def import_data(self, transactions_file):
pass
class ABN(Bank):
def import_data(self, transactions_file):
xls = ExcelFile(transactions_file)
self.data = xls.parse('Sheet0', index_col=3, na_values=['NA'])
def get_amount(self, transaction_type, pattern):
if transaction_type:
t_slice = self.data[[d > 0 for d in self.data['amount']]]
else:
t_slice = self.data[[d < 0 for d in self.data['amount']]]
final_slice = t_slice
if pattern:
final_slice = t_slice[
[pattern.lower() in d.lower() for d in t_slice['description']]
]
amount_series = final_slice['amount']
if transaction_type:
word = 'income'
else:
word = 'expenses'
print 'Total amount of %s is: %dE' % (
word,
sum([d for d in amount_series])
)
| lgpl-3.0 |
takaakiaoki/PyFoam | PyFoam/Applications/SamplePlot.py | 2 | 44177 | # ICE Revision: $Id: /local/openfoam/Python/PyFoam/PyFoam/Applications/SamplePlot.py 8488 2013-11-03T14:38:32.775063Z bgschaid $
"""
Application class that implements pyFoamSamplePlot.py
"""
import sys,string
from os import path
from optparse import OptionGroup
from .PyFoamApplication import PyFoamApplication
from PyFoam.RunDictionary.SampleDirectory import SampleDirectory
from PyFoam.Basics.SpreadsheetData import WrongDataSize
from PyFoam.Error import error,warning
from .PlotHelpers import cleanFilename
from PyFoam.ThirdParty.six import print_
class SamplePlot(PyFoamApplication):
def __init__(self,
args=None,
**kwargs):
description="""\
Reads data from the sample-dictionary and generates appropriate
gnuplot-commands. As an option the data can be written to a CSV-file.
"""
PyFoamApplication.__init__(self,
args=args,
description=description,
usage="%prog [options] <casedir>",
nr=1,
changeVersion=False,
interspersed=True,
**kwargs)
modeChoices=["separate","timesInOne","fieldsInOne","linesInOne","complete"]
def addOptions(self):
data=OptionGroup(self.parser,
"Data",
"Select the data to plot")
self.parser.add_option_group(data)
data.add_option("--line",
action="append",
default=None,
dest="line",
help="Thesample line from which data is plotted (can be used more than once)")
data.add_option("--field",
action="append",
default=None,
dest="field",
help="The fields that are plotted (can be used more than once). If none are specified all found fields are used")
data.add_option("--pattern-for-line",
action="store",
default=None,
dest="linePattern",
help="Usually the name of the line is automatically determined from the file name by taking the first part. If this regular expression is specified then it is used: the first group in the pattern will be the line name")
data.add_option("--default-value-names",
action="store",
default=None,
dest="valueNames",
help="Usually the names of the values automatically determined from the file. If they are specified (as a comma separated list of names) then these names are used and all the files MUST have these values")
data.add_option("--no-extension-needed",
action="store_false",
default=True,
dest="needsExtension",
help="The files do not have an extension")
data.add_option("--is-distribution",
action="store_true",
default=False,
dest="isDistribution",
help="The files in the directory are distributions. This sets the names of the lines and fields accordingly")
data.add_option("--postfix-for-field-names",
action="append",
default=[],
dest="fieldPostfix",
help="Possible postfix for field names of the form 'name_postfix'. Note that this should not be a possible field name")
data.add_option("--prefix-for-field-names",
action="append",
default=[],
dest="fieldPrefix",
help="Possible prefix for field names of the form 'prefix_name'. Note that this should not be a possible field name")
data.add_option("--directory-name",
action="store",
default="samples",
dest="dirName",
help="Alternate name for the directory with the samples (Default: %default)")
data.add_option("--preferred-component",
action="store",
type="int",
default=None,
dest="component",
help="The component that should be used for vectors. Otherwise the absolute value is used")
data.add_option("--reference-directory",
action="store",
default=None,
dest="reference",
help="A reference directory. If fitting sample data is found there it is plotted alongside the regular data")
data.add_option("--reference-case",
action="store",
default=None,
dest="referenceCase",
help="A reference case where a directory with the same name is looked for. Mutual exclusive with --reference-directory")
scale=OptionGroup(self.parser,
"Scale",
"Scale the data before comparing (not used during plotting)")
self.parser.add_option_group(scale)
scale.add_option("--scale-data",
action="store",
type="float",
default=1,
dest="scaleData",
help="Scale the data by this factor. Default: %default")
scale.add_option("--offset-data",
action="store",
type="float",
default=0,
dest="offsetData",
help="Offset the data by this factor. Default: %default")
scale.add_option("--scale-x-axis",
action="store",
type="float",
default=1,
dest="scaleXAxis",
help="Scale the x-axis by this factor. Default: %default")
scale.add_option("--offset-x-axis",
action="store",
type="float",
default=0,
dest="offsetXAxis",
help="Offset the x-axis by this factor. Default: %default")
scale.add_option("--scale-reference-data",
action="store",
type="float",
default=1,
dest="scaleReferenceData",
help="Scale the reference data by this factor. Default: %default")
scale.add_option("--offset-reference-data",
action="store",
type="float",
default=0,
dest="offsetReferenceData",
help="Offset the reference data by this factor. Default: %default")
scale.add_option("--scale-reference-x-axis",
action="store",
type="float",
default=1,
dest="scaleReferenceXAxis",
help="Scale the reference x-axis by this factor. Default: %default")
scale.add_option("--offset-reference-x-axis",
action="store",
type="float",
default=0,
dest="offsetReferenceXAxis",
help="Offset the reference x-axis by this factor. Default: %default")
time=OptionGroup(self.parser,
"Time",
"Select the times to plot")
self.parser.add_option_group(time)
time.add_option("--time",
action="append",
default=None,
dest="time",
help="The times that are plotted (can be used more than once). If none are specified all found times are used")
time.add_option("--min-time",
action="store",
type="float",
default=None,
dest="minTime",
help="The smallest time that should be used")
time.add_option("--max-time",
action="store",
type="float",
default=None,
dest="maxTime",
help="The biggest time that should be used")
time.add_option("--fuzzy-time",
action="store_true",
default=False,
dest="fuzzyTime",
help="Try to find the next timestep if the time doesn't match exactly")
time.add_option("--latest-time",
action="store_true",
default=False,
dest="latestTime",
help="Take the latest time from the data")
time.add_option("--reference-time",
action="store",
default=None,
dest="referenceTime",
help="Take this time from the reference data (instead of using the same time as the regular data)")
time.add_option("--tolerant-reference-time",
action="store_true",
default=False,
dest="tolerantReferenceTime",
help="Take the reference-time that is nearest to the selected time")
output=OptionGroup(self.parser,
"Appearance",
"How it should be plotted")
self.parser.add_option_group(output)
output.add_option("--mode",
type="choice",
default="separate",
dest="mode",
action="store",
choices=self.modeChoices,
help="What kind of plots are generated: a) separate for every time, line and field b) all times of a field in one plot c) all fields of a time in one plot d) all lines in one plot e) everything in one plot (Names: "+", ".join(self.modeChoices)+") Default: %default")
output.add_option("--unscaled",
action="store_false",
dest="scaled",
default=True,
help="Don't scale a value to the same range for all plots")
output.add_option("--scale-all",
action="store_true",
dest="scaleAll",
default=False,
help="Use the same scale for all fields (else use one scale for each field)")
output.add_option("--scale-domain",
action="store_true",
dest="scaleDomain",
default=False,
help="Automatically scale the x-domain to the same length for all plots")
output.add_option("--domain-minimum",
action="store",
type="float",
dest="domainMin",
default=None,
help="Use this value as the minimum for the x-domain for all plots")
output.add_option("--domain-maximum",
action="store",
type="float",
dest="domainMax",
default=None,
help="Use this value as the maximum for the x-domain for all plots")
output.add_option("--gnuplot-file",
action="store",
dest="gnuplotFile",
default=None,
help="Write the necessary gnuplot commands to this file. Else they are written to the standard output")
output.add_option("--picture-destination",
action="store",
dest="pictureDest",
default=None,
help="Directory the pictures should be stored to")
output.add_option("--name-prefix",
action="store",
dest="namePrefix",
default=None,
help="Prefix to the picture-name")
output.add_option("--csv-file",
action="store",
dest="csvFile",
default=None,
help="Write the data to a CSV-file instead of the gnuplot-commands")
output.add_option("--excel-file",
action="store",
dest="excelFile",
default=None,
help="Write the data to a Excel-file instead of the gnuplot-commands")
output.add_option("--pandas-data",
action="store_true",
dest="pandasData",
default=False,
help="Pass the raw data in pandas-format")
output.add_option("--numpy-data",
action="store_true",
dest="numpyData",
default=False,
help="Pass the raw data in numpy-format")
data.add_option("--info",
action="store_true",
dest="info",
default=False,
help="Print info about the sampled data and exit")
output.add_option("--style",
action="store",
default="lines",
dest="style",
help="Gnuplot-style for the data (Default: %default)")
output.add_option("--clean-filename",
action="store_true",
dest="cleanFilename",
default=False,
help="Clean filenames so that they can be used in HTML or Latex-documents")
output.add_option("--index-instead-of-time",
action="store_true",
dest="indexInsteadOfTime",
default=False,
help="Use an index instead of the time in the filename (mainly needed if the files are used to make a movie with FFMPEG)")
output.add_option("--reference-prefix",
action="store",
dest="refprefix",
default="Reference",
help="Prefix that gets added to the reference lines. Default: %default")
output.add_option("--resample-reference",
action="store_true",
dest="resampleReference",
default=False,
help="Resample the reference value to the current x-axis (for CSV or Excel-output)")
output.add_option("--extend-data",
action="store_true",
dest="extendData",
default=False,
help="Extend the data range if it differs (for CSV or Excel-files)")
output.add_option("--silent",
action="store_true",
dest="silent",
default=False,
help="Don't write to screen (with the silent and the compare-options)")
numerics=OptionGroup(self.parser,
"Quantify",
"Metrics of the data and numerical comparisons")
self.parser.add_option_group(numerics)
numerics.add_option("--metrics",
action="store_true",
dest="metrics",
default=None,
help="Print the metrics of the data sets")
numerics.add_option("--compare",
action="store_true",
dest="compare",
default=None,
help="Compare all data sets that are also in the reference data")
numerics.add_option("--common-range-compare",
action="store_true",
dest="commonRange",
default=None,
help="When comparing two datasets only use the common time range")
numerics.add_option("--index-tolerant-compare",
action="store_true",
dest="indexTolerant",
default=None,
help="Compare two data sets even if they have different indizes")
numerics.add_option("--use-reference-for-comparison",
action="store_false",
dest="compareOnOriginal",
default=True,
help="Use the reference-data as the basis for the numerical comparison. Otherwise the original data will be used")
def run(self):
if self.opts.isDistribution:
if self.opts.valueNames or self.opts.linePattern:
self.error("The option --is-distribution can not be used with --pattern-for-line or --default-value-names")
# self.opts.valueNames="normalized,raw"
self.opts.linePattern=".+istribution_(.+)"
self.opts.needsExtension=False
# remove trailing slashif present
if self.opts.dirName[-1]==path.sep:
self.opts.dirName=self.opts.dirName[:-1]
usedDirName=self.opts.dirName.replace("/","_")
if self.opts.valueNames==None:
usedValueNames=None
else:
usedValueNames=self.opts.valueNames.split(","),
samples=SampleDirectory(self.parser.getArgs()[0],
dirName=self.opts.dirName,
postfixes=self.opts.fieldPostfix,
prefixes=self.opts.fieldPrefix,
valueNames=usedValueNames,
namesFromFirstLine=self.opts.isDistribution,
linePattern=self.opts.linePattern,
needsExtension=self.opts.needsExtension)
reference=None
if self.opts.reference and self.opts.referenceCase:
self.error("Options --reference-directory and --reference-case are mutual exclusive")
if (self.opts.csvFile or self.opts.excelFile or self.opts.pandasData or self.opts.numpyData) and (self.opts.compare or self.opts.metrics):
self.error("Options --csv-file/--excel-file/--pandas-data/--numpy-data and --compare/--metrics are mutual exclusive")
if self.opts.reference:
reference=SampleDirectory(self.parser.getArgs()[0],
dirName=self.opts.reference,
postfixes=self.opts.fieldPostfix,
prefixes=self.opts.fieldPrefix)
elif self.opts.referenceCase:
reference=SampleDirectory(self.opts.referenceCase,
dirName=self.opts.dirName,
postfixes=self.opts.fieldPostfix,
prefixes=self.opts.fieldPrefix)
if reference:
if path.samefile(reference.dir,samples.dir):
self.error("Used sample directory",samples.dir,
"and reference directory",reference.dir,
"are the same")
lines=samples.lines()
times=samples.times
if self.opts.info:
if not self.opts.silent:
print_("Times : ",samples.times)
print_("Lines : ",samples.lines())
print_("Fields: ",list(samples.values()))
self.setData({'times' : samples.times,
'lines' : samples.lines(),
'values' : list(samples.values())})
if reference:
if not self.opts.silent:
print_("\nReference Data:")
print_("Times : ",reference.times)
print_("Lines : ",reference.lines())
print_("Fields: ",list(reference.values()))
self.setData({'reference':{'times' : samples.times,
'lines' : samples.lines(),
'values' : list(samples.values())}})
return 0
if self.opts.line==None:
# error("At least one line has to be specified. Found were",samples.lines())
self.opts.line=lines
else:
for l in self.opts.line:
if l not in lines:
error("The line",l,"does not exist in",lines)
if self.opts.latestTime:
if self.opts.time:
self.opts.time.append(samples.times[-1])
else:
self.opts.time=[samples.times[-1]]
if self.opts.maxTime or self.opts.minTime:
if self.opts.time:
error("Times",self.opts.time,"and range [",self.opts.minTime,",",self.opts.maxTime,"] set: contradiction")
self.opts.time=[]
if self.opts.maxTime==None:
self.opts.maxTime= 1e20
if self.opts.minTime==None:
self.opts.minTime=-1e20
for t in times:
if float(t)<=self.opts.maxTime and float(t)>=self.opts.minTime:
self.opts.time.append(t)
if len(self.opts.time)==0:
error("No times in range [",self.opts.minTime,",",self.opts.maxTime,"] found: ",times)
elif self.opts.time:
iTimes=self.opts.time
self.opts.time=[]
for t in iTimes:
if t in samples.times:
self.opts.time.append(t)
elif self.opts.fuzzyTime:
tf=float(t)
use=None
dist=1e20
for ts in samples.times:
if abs(tf-float(ts))<dist:
use=ts
dist=abs(tf-float(ts))
if use and use not in self.opts.time:
self.opts.time.append(use)
else:
pass
# self.warning("Time",t,"not found in the sample-times. Use option --fuzzy")
if self.opts.tolerantReferenceTime:
if self.opts.referenceTime:
self.error("--tolerant-reference-time and --reference-time can't be used at the same time")
refTimes={}
for t in self.opts.time:
dist=1e20
for rt in reference.times:
if abs(float(t)-float(rt))<dist:
refTimes[t]=rt
dist=abs(float(t)-float(rt))
plots=[]
oPlots=[]
rPlots=[]
if self.opts.mode=="separate":
if self.opts.time==None:
self.opts.time=samples.times
if self.opts.field==None:
self.opts.field=list(samples.values())
if self.opts.line==None:
self.opts.line=samples.lines()
for t in self.opts.time:
for f in self.opts.field:
for l in self.opts.line:
plot=samples.getData(line=[l],
value=[f],
time=[t],
scale=(self.opts.scaleXAxis,
self.opts.scaleData),
offset=(self.opts.offsetXAxis,
self.opts.offsetData))
oPlots.append(plot[:])
if reference:
rT=[t]
if self.opts.referenceTime:
rT=[self.opts.referenceTime]
elif self.opts.tolerantReferenceTime:
rT=[refTimes[t]]
p=reference.getData(line=[l],
value=[f],
time=rT,
note=self.opts.refprefix+" ",
scale=(self.opts.scaleReferenceXAxis,
self.opts.scaleReferenceData),
offset=(self.opts.offsetReferenceXAxis,
self.opts.offsetReferenceData))
rPlots.append(p)
plot+=p
plots.append(plot)
elif self.opts.mode=="timesInOne":
if self.opts.field==None:
self.opts.field=list(samples.values())
if self.opts.line==None:
self.opts.line=samples.lines()
for f in self.opts.field:
for l in self.opts.line:
plot=samples.getData(line=[l],
value=[f],
time=self.opts.time)
oPlots.append(plot[:])
if reference:
rT=self.opts.time
if self.opts.referenceTime:
rT=[self.opts.referenceTime]
elif self.opts.tolerantReferenceTime:
rT=[refTimes[t]]
p=reference.getData(line=[l],
value=[f],
time=rT,
note=self.opts.refprefix+" ")
rPlots.append(p)
plot+=p
plots.append(plot)
elif self.opts.mode=="fieldsInOne":
if self.opts.scaled and not self.opts.scaleAll:
warning("In mode '",self.opts.mode,"' all fields are scaled to the same value")
self.opts.scaleAll=True
if self.opts.time==None:
self.opts.time=samples.times
if self.opts.line==None:
self.opts.line=samples.lines()
for t in self.opts.time:
for l in self.opts.line:
plot=samples.getData(line=[l],
value=self.opts.field,
time=[t])
oPlots.append(plot[:])
if reference:
rT=t
if self.opts.referenceTime:
rT=self.opts.referenceTime
elif self.opts.tolerantReferenceTime:
rT=refTimes[t]
p=reference.getData(line=[l],
value=self.opts.field,
time=[rT],
note=self.opts.refprefix+" ")
rPlots.append(p)
plot+=p
plots.append(plot)
elif self.opts.mode=="linesInOne":
if self.opts.field==None:
self.opts.field=list(samples.values())
if self.opts.time==None:
self.opts.time=samples.times
for f in self.opts.field:
for t in self.opts.time:
plot=samples.getData(line=self.opts.line,
value=[f],
time=[t])
oPlots.append(plot[:])
if reference:
rT=t
if self.opts.referenceTime:
rT=self.opts.referenceTime
elif self.opts.tolerantReferenceTime:
rT=refTimes[t]
p=reference.getData(line=self.opts.line,
value=[f],
time=[rT],
note=self.opts.refprefix+" ")
rPlots.append(p)
plot+=p
plots.append(plot)
elif self.opts.mode=="complete":
if self.opts.scaled and not self.opts.scaleAll:
warning("In mode '",self.opts.mode,"' all fields are scaled to the same value")
self.opts.scaleAll=True
plot=samples.getData(line=self.opts.line,
value=self.opts.field,
time=self.opts.time)
oPlots.append(plot[:])
if reference:
rT=self.opts.time
if self.opts.referenceTime:
rT=[self.opts.referenceTime]
elif self.opts.tolerantReferenceTime:
rT=[refTimes[t]]
p=reference.getData(line=self.opts.line,
value=self.opts.field,
time=rT,
note=self.opts.refprefix+" ")
plot+=p
rPlots.append(p)
plots.append(plot)
xMin,xMax=None,None
if self.opts.scaleDomain:
if self.opts.domainMin or self.opts.domainMax:
self.error("--scale-domain used. Can't use --domain-minimum or --domain-maximum")
xMin,xMax=1e40,-1e40
for p in plots:
for d in p:
mi,mx=d.domain()
xMin=min(xMin,mi)
xMax=max(xMax,mx)
else:
xMin,xMax=self.opts.domainMin,self.opts.domainMax
if self.opts.scaled:
if self.opts.scaleAll:
vRange=None
else:
vRanges={}
for p in plots:
for d in p:
mi,ma=d.range(component=self.opts.component)
nm=d.name
if not self.opts.scaleAll:
if nm in vRanges:
vRange=vRanges[nm]
else:
vRange=None
if vRange==None:
vRange=mi,ma
else:
vRange=min(vRange[0],mi),max(vRange[1],ma)
if not self.opts.scaleAll:
vRanges[nm]=vRange
result="set term png\n"
plots=[p for p in plots if len(p)>0]
if len(plots)<1:
self.error("No plots produced. Nothing done")
for p in plots:
if len(p)<1:
continue
name=""
if self.opts.namePrefix:
name+=self.opts.namePrefix+"_"
name+=usedDirName
title=None
tIndex=times.index(p[0].time())
# name+="_"+"_".join(self.opts.line)
if self.opts.mode=="separate":
name+="_%s" % (p[0].line())
if self.opts.indexInsteadOfTime:
name+="_%s_%04d" % (p[0].name,tIndex)
else:
name+="_%s_t=%f" % (p[0].name,float(p[0].time()))
title="%s at t=%f on %s" % (p[0].name,float(p[0].time()),p[0].line())
elif self.opts.mode=="timesInOne":
name+="_%s" % (p[0].line())
if self.opts.time!=None:
name+="_"+"_".join(["t="+t for t in self.opts.time])
name+="_%s" % p[0].name
title="%s on %s" % (p[0].name,p[0].line())
elif self.opts.mode=="fieldsInOne":
name+="_%s" % (p[0].line())
if self.opts.field!=None:
name+="_"+"_".join(self.opts.field)
if self.opts.time!=None:
name+="_"+"_".join(["t="+t for t in self.opts.time])
name+="_%04d" % tIndex
title="t=%f on %s" % (float(p[0].time()),p[0].line())
elif self.opts.mode=="linesInOne":
name+="_%s" % (p[0].name)
if self.opts.line!=None:
name+="_"+"_".join(self.opts.line)
if self.opts.indexInsteadOfTime:
name+="_%04d" % tIndex
else:
name+="_t=%f" % float(p[0].time())
title="%s at t=%f" % (p[0].name,float(p[0].time()))
elif self.opts.mode=="complete":
pass
name+=".png"
if self.opts.pictureDest:
name=path.join(self.opts.pictureDest,name)
if self.opts.cleanFilename:
name=cleanFilename(name)
result+='set output "%s"\n' % name
if title!=None:
result+='set title "%s"\n' % title.replace("_","\\_")
result+="plot "
if self.opts.scaled:
if not self.opts.scaleAll:
vRange=vRanges[p[0].name]
# only scale if extremas are sufficiently different
if abs(vRange[0]-vRange[1])>1e-5*max(abs(vRange[0]),abs(vRange[1])) and max(abs(vRange[0]),abs(vRange[1]))>1e-10:
yRange="[%g:%g] " % vRange
else:
yRange="[]"
else:
yRange="[]"
if xMin or xMax:
xRange="["
if xMin:
xRange+=str(xMin)
xRange+=":"
if xMax:
xRange+=str(xMax)
xRange+="]"
else:
xRange="[]"
if self.opts.scaled or xMin or xMax:
result+=xRange+yRange
first=True
for d in p:
if first:
first=False
else:
result+=", "
colSpec=d.index+1
if d.isVector():
if self.opts.component!=None:
colSpec=d.index+1+self.opts.component
else:
colSpec="(sqrt($%d**2+$%d**2+$%d**2))" % (d.index+1,d.index+2,d.index+3)
# result+='"%s" using 1:%s ' % (d.file,colSpec)
def makeCol(spec,sc,off):
if type(spec)==str:
pre=""
else:
pre="$"
spec=str(spec)
if sc==1:
if off==0:
return spec
else:
return "(%s%s+%f)" % (pre,spec,off)
else:
if off==0:
return "(%s%s*%f)" % (pre,spec,sc)
else:
return "(%s%s*%f+%f)" % (pre,spec,sc,off)
result+='"%s" using %s:%s ' % (d.file,
makeCol(1,d.scale[0],d.offset[0]),
makeCol(colSpec,d.scale[1],d.offset[1]))
title=d.note
if self.opts.mode=="separate":
title+=""
elif self.opts.mode=="timesInOne":
title+="t=%f" % float(d.time())
elif self.opts.mode=="fieldsInOne":
title+="%s" % d.name
elif self.opts.mode=="linesInOne":
title+="t=%f" % float(d.time())
elif self.opts.mode=="complete":
title+="%s at t=%f" % (d.name,float(d.time()))
if len(self.opts.line)>1:
title+=" on %s" % d.line()
if title=="":
result+="notitle "
else:
result+='title "%s" ' % title.replace("_","\\_")
result+="with %s " % self.opts.style
result+="\n"
if self.opts.csvFile or self.opts.excelFile or self.opts.pandasData or self.opts.numpyData:
tmp=sum(plots,[])
c=tmp[0]()
for p in tmp[1:]:
try:
c+=p()
except WrongDataSize:
if self.opts.resampleReference:
sp=p()
for n in sp.names()[1:]:
data=c.resample(sp,
n,
extendData=self.opts.extendData)
try:
c.append(n,data)
except ValueError:
c.append(self.opts.refprefix+" "+n,data)
else:
self.warning("Try the --resample-option")
raise
if self.opts.csvFile:
c.writeCSV(self.opts.csvFile)
if self.opts.excelFile:
c.getData().to_excel(self.opts.excelFile)
if self.opts.pandasData:
self.setData({"series":c.getSeries(),
"dataFrame":c.getData()})
if self.opts.numpyData:
self.setData({"data":c.data.copy()})
elif self.opts.compare or self.opts.metrics:
statData={}
if self.opts.compare:
statData["compare"]={}
if self.opts.metrics:
statData["metrics"]={}
for p in self.opts.line:
if self.opts.compare:
statData["compare"][p]={}
if self.opts.metrics:
statData["metrics"][p]={}
oPlots=[item for sublist in oPlots for item in sublist]
rPlots=[item for sublist in rPlots for item in sublist]
if len(rPlots)!=len(oPlots) and self.opts.compare:
self.error("Number of original data sets",len(oPlots),
"is not equal to the reference data sets",
len(rPlots))
if len(rPlots)==0 and self.opts.metrics:
rPlots=[None]*len(oPlots)
for o,r in zip(oPlots,rPlots):
data=o(scaleData=self.opts.scaleData,
offsetData=self.opts.offsetData,
scaleX=self.opts.scaleXAxis,
offsetX=self.opts.offsetXAxis)
if self.opts.compare:
if o.name!=r.name or (o.index!=r.index and not self.opts.indexTolerant):
self.error("Data from original",o.name,o.index,
"and reference",r.name,r.index,
"do not match. Try --index-tolerant-compare if you're sure that the data is right")
ref=r(scaleData=self.opts.scaleReferenceData,
offsetData=self.opts.offsetReferenceData,
scaleX=self.opts.scaleReferenceXAxis,
offsetX=self.opts.offsetReferenceXAxis)
else:
ref=None
for i,n in enumerate(data.names()):
if i==0:
continue
indexName=o.name
if n.split(" ")[-1]!=indexName:
indexName=n.split(" ")[-1]
if self.opts.metrics:
if not self.opts.silent:
print_("Metrics for",indexName,"(Path:",o.file,")")
result=data.metrics(data.names()[i],
minTime=self.opts.minTime,
maxTime=self.opts.maxTime)
statData["metrics"][o.line()][indexName]=result
if not self.opts.silent:
print_(" Min :",result["min"])
print_(" Max :",result["max"])
print_(" Average :",result["average"])
print_(" Weighted average :",result["wAverage"])
if not self.opts.compare:
print_("Data size:",data.size())
print_(" Time Range :",result["tMin"],result["tMax"])
if self.opts.compare:
oname=data.names()[i]
if self.opts.referenceTime or self.opts.tolerantReferenceTime:
oname=ref.names()[i]
if not self.opts.silent:
print_("Comparing",indexName,"with name",oname,"(Path:",r.file,")",end="")
if self.opts.compareOnOriginal:
if not self.opts.silent:
print_("on original data points")
result=data.compare(ref,
data.names()[i],
otherName=oname,common=self.opts.commonRange,
minTime=self.opts.minTime,
maxTime=self.opts.maxTime)
else:
if not self.opts.silent:
print_("on reference data points")
result=ref.compare(data,
oname,
otherName=data.names()[i],
common=self.opts.commonRange,
minTime=self.opts.minTime,
maxTime=self.opts.maxTime)
statData["compare"][o.line()][indexName]=result
if not self.opts.silent:
print_(" Max difference :",result["max"],"(at",result["maxPos"],")")
print_(" Average difference :",result["average"])
print_(" Weighted average :",result["wAverage"])
print_("Data size:",data.size(),"Reference:",ref.size())
if not self.opts.metrics:
print_(" Time Range :",result["tMin"],result["tMax"])
if not self.opts.silent:
print_()
self.setData(statData)
else:
dest=sys.stdout
if self.opts.gnuplotFile:
dest=open(self.opts.gnuplotFile,"w")
dest.write(result)
# Should work with Python3 and Python2
| gpl-2.0 |
yeatmanlab/BrainTools | projects/NLR_MEG/nlr_stats.py | 1 | 7281 | # -*- coding: utf-8 -*-
# Author: Kambiz Tavabi <[email protected]>
#
"""Docsting
"""
import matplotlib
matplotlib.use('Agg')
import os
from os import path as op
import numpy as np
from functools import partial
import time
import mne
from mne import set_log_level as log
from mne.stats import ttest_1samp_no_p
from mne.minimum_norm import (read_inverse_operator, apply_inverse)
import mnefun
from mnefun import anova_time
from mnefun import get_fsaverage_medial_vertices
__copyright__ = "Copyright 2015, ILABS"
__status__ = "Development"
log(verbose='Warning')
# cd to meg directory
os.chdir('/media/ALAYA/data/ilabs/nlr/')
work_dir = os.getcwd()
# set up mnefun parameters of interest
p = mnefun.Params(lp_cut=40.)
p.analyses = ['Words_noise']
p.subjects = ['nlr01', 'nlr02', 'nlr04', 'nlr05', 'nlr06', 'nlr07', 'nlr08']
p.structurals = ['nlr01', 'nlr02', 'nlr04', 'nlr05', 'nlr06', 'nlr07', 'nlr08']
do_plots = False
reload_data = True
do_contrasts = True
do_anova = False
# Local variables
lambda2 = 1. / 9.
n_smooth = 15
fs_verts = [np.arange(10242), np.arange(10242)]
fs_medial = get_fsaverage_medial_vertices()
inv_type = 'meg-fixed' # can be meg-eeg, meg-fixed, meg, eeg-fixed, or eeg
fname_data = op.join(work_dir, '%s_data.npz' % p.analyses[0])
sigma = 1e-3
n_jobs = 18
tmin, tmax = 0.15, 0.20 # time window for RM-ANOVA
conditions = ['epochs_word_c254_p20_010', 'epochs_word_c254_p50_010', 'epochs_word_c137_p20_010', 'epochs_word_c141_p20_010',
'epochs_noise_010',
'epochs_word_c254_p20_020', 'epochs_word_c254_p50_020', 'epochs_word_c137_p20_020', 'epochs_word_c141_p20_020',
'epochs_noise_020'] # events of interest
contrasts = [[4, 0],
[9, 5],
[2, 0],
[7, 5],
[0, 5],
[1, 6],
[2, 7],
[3, 8]] # contrasts of interest
# Plot (butterfly & topographic) conditions averaged over subjects in sensor domain.
if do_plots:
if not op.exists(work_dir + '/figures'):
os.mkdir(work_dir + '/figures')
for c in conditions:
evo = []
for subj in p.subjects:
ev_name = 'evoked_%s' % c
evoked_file = op.join(work_dir, subj, 'inverse',
'%s_%d-sss_eq_%s-ave.fif' % (p.analyses[0], p.lp_cut, subj))
evo.append(mne.read_evokeds(evoked_file, condition=c, baseline=(None, 0),
kind='average', proj=True))
evo_grand_ave = np.sum(evo)
h0 = evo_grand_ave.plot_topomap(times=np.arange(0, evo_grand_ave.times[-1], 0.1))
h0.savefig(op.join(work_dir, 'figures', ev_name + '_topomap'), dpi=96, format='png')
h1 = evo_grand_ave.plot()
h1.savefig(op.join(work_dir, 'figures', ev_name + '_butterfly'), dpi=96, format='png')
######################################
# Do source imaging and handle data.#
######################################
if reload_data:
naves = np.zeros(len(p.subjects), int)
for si, (subj, struc) in enumerate(zip(p.subjects, p.structurals)):
print('Loading data for subject %s...' % subj)
inv_dir = op.join(work_dir, subj, 'inverse')
# load the inverse
inv = op.join(inv_dir, '%s-%d-sss-%s-inv.fif' % (subj, p.lp_cut, inv_type))
inv = read_inverse_operator(inv)
fname = op.join(inv_dir, '%s_%d-sss_eq_%s-ave.fif'
% (p.analyses[0], p.lp_cut, subj))
aves = [mne.Evoked(fname, cond, baseline=(None, 0), proj=True,
kind='average') for cond in conditions]
nave = np.unique([a.nave for a in aves])
assert len(nave) == 1
for ave, cond in zip(aves, conditions):
assert ave.comment == cond
naves[si] = nave[0]
# apply inverse, bin, morph
stcs = [apply_inverse(ave, inv, lambda2, 'dSPM') for ave in aves]
stcs = [stc.bin(0.005) for stc in stcs]
m = mne.compute_morph_matrix(struc, 'fsaverage', stcs[0].vertices,
fs_verts, n_smooth)
stcs = [stc.morph_precomputed('fsaverage', fs_verts, m)
for stc in stcs]
# put in big matrix
if subj == p.subjects[0]:
data = np.empty((len(stcs), len(p.subjects), stcs[0].shape[0],
stcs[0].shape[1]))
for di, stc in enumerate(stcs):
data[di, si, :, :] = stc.data
times = stc.times
print('Writing data...')
np.savez_compressed(fname_data, data=data, times=times, naves=naves)
else:
print('Loading saved data...')
data = np.load(fname_data)
data, times, naves = data['data'], data['times'], data['naves']
# 1-sample t-test in time (uncorrected) on source data for given contrasts and save results as source time course.
for s in range(len(p.subjects)):
for cont in contrasts:
contrast = '-'.join([conditions[c] for c in cont[::-1]])
X = data[:, s, :, :]
X = (np.abs(X[cont[1]]) - np.abs(X[cont[0]]))
stc = mne.SourceEstimate(X, fs_verts, times[0],
np.diff(times[:2]))
stc.save(op.join(p.work_dir, 'stcs', 'nlr%.0f_contrast_%s' % (s + 1, contrast)))
if do_contrasts:
if not op.exists(work_dir + '/stcs'):
os.mkdir(work_dir + '/stcs')
stat_fun = partial(ttest_1samp_no_p, sigma=sigma)
for cont in contrasts:
contrast = '-'.join([conditions[c] for c in cont[::-1]])
print(' Running t-tests for %s' % contrast)
X = (np.abs(data[cont[1]]) - np.abs(data[cont[0]]))
stc = mne.SourceEstimate(stat_fun(X), fs_verts, times[0],
np.diff(times[:2]))
stc.save(op.join(work_dir, 'stcs', 'contrast_%s' % contrast))
# Compute spatiotemporal RM-ANOVA (one-way) and visualize results on surface
if do_anova:
for cont in contrasts:
t0 = time.time()
contrast = '-'.join([conditions[c] for c in cont[::-1]])
tt = '%s-%s' % (tmin, tmax)
print(' Running spatiotemporal RM-ANOVA for %s in the interval %s ms' % (contrast, tt))
mask = np.logical_and(times >= tmin, times <= tmax)
X = np.swapaxes(np.swapaxes(data[cont], 0, 1), 2, 3)[:, :, mask, :]
X = np.reshape(X, (X.shape[0], 2 * X.shape[2], X.shape[3]))
X = np.abs(X)
tvals, pvals, dof = anova_time(X)
d = np.sign(tvals) * -np.log10(np.minimum(np.abs(pvals), 1)) # -np.log10(np.minimum(np.abs(p) * 20484, 1) bonferroni correction
d[fs_medial] = 0
stc_anova = mne.SourceEstimate(d, fs_verts, 0, 1e-3, 'fsaverage')
stc_anova.save(op.join(work_dir, 'stcs', 'anova_%s_%s' % (contrast, tt)))
fmin, fmid, fmax = 2, 4, 6
colormap = mne.viz.mne_analyze_colormap(limits=[fmin, fmid, fmax])
brain = stc_anova.plot(hemi='split', colormap=colormap, time_label=None,
smoothing_steps=n_smooth, transparent=True, config_opts={},
views=['lat', 'med'])
brain.save_image(op.join(work_dir, 'stcs', 'anova_%s_%s.png'
% (contrast, tt)))
print(' Time: %s' % round((time.time() - t0) / 60., 2))
brain.close()
| bsd-3-clause |
justincassidy/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
lazywei/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
hagabbar/pycbc_copy | setup.py | 1 | 20651 | #!/usr/bin/env python
# Copyright (C) 2012 Alex Nitz, Duncan Brown, Andrew Miller, Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
setup.py file for PyCBC package
"""
from __future__ import print_function
import os, fnmatch, sys, subprocess, shutil
# FIXME: trace.fullmodname was undocumented in Python 2 and actually became an
# internal function in Python 3. We should not depend on it.
try:
from trace import fullmodname
except ImportError:
from trace import _fullmodname as fullmodname
try:
from setuptools.command.install import install as _install
from setuptools.command.install_egg_info import install_egg_info as egg_info
USE_SETUPTOOLS = True
except:
from distutils.command.install import install as _install
USE_SETUPTOOLS = False
from distutils.errors import DistutilsError
from distutils.core import setup, Command, Extension
from distutils.command.clean import clean as _clean
from distutils.file_util import write_file
from distutils.version import LooseVersion
try:
import numpy.version
if LooseVersion(numpy.version.version) < LooseVersion("1.6.4"):
print(" Numpy >= 1.6.4 is required for pycbc dependencies. \n"
" We found version %s already installed. Please update \n"
" to a more recent version and then retry PyCBC \n"
" installation. \n"
" \n"
" Using pip: [pip install 'numpy>=1.6.4' --upgrade --user] \n"
"" % numpy.version.version)
exit(1)
except ImportError:
pass
requires = ['lal.lal', 'lalsimulation.lalsimulation', 'glue']
setup_requires = []
install_requires = setup_requires + ['Mako>=1.0.1',
'argparse>=1.3.0',
'decorator>=3.4.2',
'scipy>=0.13.0',
'weave>=0.16.0',
'unittest2',
'matplotlib>=1.3.1',
'numpy>=1.9.0',
'pillow',
'h5py>=2.5',
'jinja2',
'mpld3>=0.3',
'pyRXP>=2.1.0',
'pycbc-glue-obsolete==1.1.0',
'kombine>=0.8.2',
'emcee==2.2.1',
'corner>=2.0.1',
'requests>=1.2.1',
'beautifulsoup4>=4.6.0',
'astropy>=2.0.1'
]
#FIXME Remove me when we bump to h5py > 2.5
try:
import h5py
except ImportError:
setup_requires.append('cython')
else:
import h5py.version
if h5py.version.version < '2.5':
setup_requires.append('cython')
def find_package_data(dirname):
def find_paths(dirname):
items = []
for fname in os.listdir(dirname):
path = os.path.join(dirname, fname)
if os.path.isdir(path):
items += find_paths(path)
elif not path.endswith(".py") and not path.endswith(".pyc"):
items.append(path)
return items
items = find_paths(dirname)
return [os.path.relpath(path, dirname) for path in items]
# Add swig-generated files to the list of things to clean, so they
# get regenerated each time.
class clean(_clean):
def finalize_options (self):
_clean.finalize_options(self)
self.clean_files = []
self.clean_folders = ['docs/_build']
def run(self):
_clean.run(self)
for f in self.clean_files:
try:
os.unlink(f)
print('removed {0}'.format(f))
except:
pass
for fol in self.clean_folders:
shutil.rmtree(fol, ignore_errors=True)
print('removed {0}'.format(fol))
class install(_install):
def run(self):
etcdirectory = os.path.join(self.install_data, 'etc')
if not os.path.exists(etcdirectory):
os.makedirs(etcdirectory)
filename = os.path.join(etcdirectory, 'pycbc-user-env.sh')
self.execute(write_file,
(filename, [self.extra_dirs]),
"creating %s" % filename)
env_file = open(filename, 'w')
print("# Source this file to access PyCBC", file=env_file)
print("PATH=" + self.install_scripts + ":$PATH", file=env_file)
print("PYTHONPATH=" + self.install_libbase + ":$PYTHONPATH",
file=env_file)
print("export PYTHONPATH", file=env_file)
print("export PATH", file=env_file)
env_file.close()
_install.run(self)
def do_setup(*args):
return True
_install._called_from_setup=do_setup
test_results = []
# Run all of the testing scripts
class TestBase(Command):
user_options = []
test_modules = []
def initialize_options(self):
self.scheme = None
self.build_dir = None
def finalize_options(self):
#Populate the needed variables
self.set_undefined_options('build',('build_lib', 'build_dir'))
def find_test_modules(self,pattern):
# Find all the unittests that match a given string pattern
modules= []
for path, dirs, files in os.walk("test"):
for filename in fnmatch.filter(files, pattern):
#add the test directories to the path
sys.path.append(os.path.join(path))
#save the module name for importing
modules.append(fullmodname(filename))
return modules
def run(self):
self.run_command('build')
# Get the list of cpu test modules
self.test_modules = self.find_test_modules("test*.py")
# Run from the build directory
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = self.build_dir + ":" + os.environ['PYTHONPATH']
else:
os.environ['PYTHONPATH'] = self.build_dir
test_results.append("\n" + (self.scheme + " tests ").rjust(30))
for test in self.test_modules:
test_command = [sys.executable,
'test/' + test + '.py',
'-s', self.scheme]
a = subprocess.call(test_command, env=os.environ)
if a != 0:
result_str = str(test).ljust(30) + ": Fail : " + str(a)
else:
result_str = str(test).ljust(30) + ": Pass"
test_results.append(result_str)
for test in test_results:
print(test)
class test(Command):
def has_cuda(self):
import pycbc
return pycbc.HAVE_CUDA
sub_commands = [('test_cpu',None),('test_cuda',has_cuda)]
user_options = []
description = "run the available tests for all compute schemes (cpu, cuda)"
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
class test_cpu(TestBase):
description = "run all CPU tests"
def initialize_options(self):
TestBase.initialize_options(self)
self.scheme = 'cpu'
class test_cuda(TestBase):
description = "run CUDA tests"
def initialize_options(self):
TestBase.initialize_options(self)
self.scheme = 'cuda'
# write versioning info
def get_version_info():
"""Get VCS info and write version info to version.py
"""
from pycbc import _version_helper
# If this is a pycbc git repo always populate versoin information using GIT
try:
vcs_info = _version_helper.generate_git_version_info()
with open('pycbc/version.py', 'w') as f:
f.write("# coding: utf-8\n")
f.write("# Generated by setup.py for PyCBC on %s.\n\n"
% vcs_info.build_date)
# print general info
f.write('version = \'%s\'\n' % vcs_info.version)
f.write('date = \'%s\'\n' % vcs_info.date)
f.write('release = %s\n' % vcs_info.release)
f.write('last_release = \'%s\'\n' % vcs_info.last_release)
# print git info
f.write('\ngit_hash = \'%s\'\n' % vcs_info.hash)
f.write('git_branch = \'%s\'\n' % vcs_info.branch)
f.write('git_tag = \'%s\'\n' % vcs_info.tag)
f.write('git_author = \'%s\'\n' % vcs_info.author)
f.write('git_committer = \'%s\'\n' % vcs_info.committer)
f.write('git_status = \'%s\'\n' % vcs_info.status)
f.write('git_builder = \'%s\'\n' % vcs_info.builder)
f.write('git_build_date = \'%s\'\n' % vcs_info.build_date)
f.write('git_verbose_msg = """Branch: %s\n'
'Tag: %s\n'
'Id: %s\n'
'Builder: %s\n'
'Build date: %s\n'
'Repository status is %s"""\n' %(vcs_info.branch,
vcs_info.tag,
vcs_info.hash,
vcs_info.builder,
vcs_info.build_date,
vcs_info.status))
f.write('from pycbc._version import *\n')
version = vcs_info.version
# If this is a release or another kind of source distribution of PyCBC
except:
version = '1.9.1dev'
release = 'False'
date = hash = branch = tag = author = committer = status = builder = build_date = ''
with open('pycbc/version.py', 'w') as f:
f.write("# Generated by setup.py for PyCBC.\n\n")
# print general infov
f.write('version = \'%s\'\n' % version)
f.write('date = \'%s\'\n' % date)
f.write('release = %s\n' % release)
# print git info
f.write('\ngit_hash = \'%s\'\n' % hash)
f.write('git_branch = \'%s\'\n' % branch)
f.write('git_tag = \'%s\'\n' % tag)
f.write('git_author = \'%s\'\n' % author)
f.write('git_committer = \'%s\'\n' % committer)
f.write('git_status = \'%s\'\n' % status)
f.write('git_builder = \'%s\'\n' % builder)
f.write('git_build_date = \'%s\'\n' % build_date)
f.write('git_verbose_msg = """Version: %s Release: %s \n'
' """\n' % (version, release))
f.write('from pycbc._version import *\n')
from pycbc import version
version = version.version
return version
class build_docs(Command):
user_options = []
description = "Build the documentation pages"
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
subprocess.check_call("cd docs; cp Makefile.std Makefile; cp conf_std.py conf.py; sphinx-apidoc "
" -o ./ -f -A 'PyCBC dev team' -V '0.1' ../pycbc && make html",
stderr=subprocess.STDOUT, shell=True)
class build_gh_pages(Command):
user_options = []
description = "Build the documentation pages for GitHub"
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
subprocess.check_call("mkdir -p _gh-pages/latest && touch _gh-pages/.nojekyll && "
"cd docs; cp Makefile.gh_pages Makefile; cp conf_std.py conf.py; sphinx-apidoc "
" -o ./ -f -A 'PyCBC dev team' -V '0.1' ../pycbc && make html",
stderr=subprocess.STDOUT, shell=True)
cmdclass = { 'test' : test,
'build_docs' : build_docs,
'build_gh_pages' : build_gh_pages,
'install' : install,
'test_cpu':test_cpu,
'test_cuda':test_cuda,
'clean' : clean,
}
extras_require = {'cuda': ['pycuda>=2015.1', 'scikit-cuda']}
# do the actual work of building the package
VERSION = get_version_info()
setup (
name = 'PyCBC',
version = VERSION,
description = 'Analyze gravitational-wave data, find signals, and study their parameters.',
long_description = open('descr.rst').read(),
author = 'Ligo Virgo Collaboration - PyCBC team',
author_email = '[email protected]',
url = 'https://ligo-cbc.github.io',
download_url = 'https://github.com/ligo-cbc/pycbc/tarball/v%s' % VERSION,
keywords = ['ligo', 'physics', 'gravity', 'signal processing', 'gravitational waves'],
cmdclass = cmdclass,
setup_requires = setup_requires,
extras_require = extras_require,
install_requires = install_requires,
scripts = [
'bin/minifollowups/pycbc_injection_minifollowup',
'bin/minifollowups/pycbc_foreground_minifollowup',
'bin/minifollowups/pycbc_sngl_minifollowup',
'bin/minifollowups/pycbc_single_template_plot',
'bin/minifollowups/pycbc_plot_chigram',
'bin/minifollowups/pycbc_page_coincinfo',
'bin/minifollowups/pycbc_page_injinfo',
'bin/minifollowups/pycbc_page_snglinfo',
'bin/minifollowups/pycbc_plot_trigger_timeseries',
'bin/pycbc_banksim',
'bin/pycbc_banksim_skymax',
'bin/pycbc_banksim_combine_banks',
'bin/pycbc_banksim_match_combine',
'bin/pycbc_faithsim',
'bin/pycbc_inspiral',
'bin/pycbc_inspiral_skymax',
'bin/pycbc_live',
'bin/pycbc_live_nagios_monitor',
'bin/pycbc_single_template',
'bin/pycbc_multi_inspiral',
'bin/pycbc_make_banksim',
'bin/pycbc_splitbank',
'bin/pycbc_hdf5_splitbank',
'bin/pycbc_split_inspinj',
'bin/bank/pycbc_brute_bank',
'bin/bank/pycbc_geom_aligned_2dstack',
'bin/bank/pycbc_geom_aligned_bank',
'bin/bank/pycbc_geom_nonspinbank',
'bin/bank/pycbc_aligned_bank_cat',
'bin/bank/pycbc_aligned_stoch_bank',
'bin/bank/pycbc_coinc_bank2hdf',
'bin/bank/pycbc_tmpltbank_to_chi_params',
'bin/bank/pycbc_bank_verification',
'bin/pycbc_make_faithsim',
'bin/pycbc_get_ffinal',
'bin/pycbc_inj_cut',
'bin/pycbc_upload_xml_to_gracedb',
'bin/pycbc_dark_vs_bright_injections',
'bin/pycbc_make_html_page',
'bin/pycbc_optimal_snr',
'bin/pycbc_fit_sngl_trigs',
'bin/pycbc_randomize_inj_dist_by_optsnr',
'bin/pycbc_create_injections',
'bin/hdfcoinc/pycbc_calculate_psd',
'bin/hdfcoinc/pycbc_average_psd',
'bin/hdfcoinc/pycbc_coinc_mergetrigs',
'bin/hdfcoinc/pycbc_coinc_findtrigs',
'bin/hdfcoinc/pycbc_coinc_statmap',
'bin/hdfcoinc/pycbc_coinc_statmap_inj',
'bin/hdfcoinc/pycbc_page_foreground',
'bin/hdfcoinc/pycbc_page_foundmissed',
'bin/hdfcoinc/pycbc_page_ifar',
'bin/hdfcoinc/pycbc_page_snrifar',
'bin/hdfcoinc/pycbc_page_snrratehist',
'bin/hdfcoinc/pycbc_page_sensitivity',
'bin/hdfcoinc/pycbc_page_banktriggerrate',
'bin/hdfcoinc/pycbc_coinc_hdfinjfind',
'bin/hdfcoinc/pycbc_page_snrchi',
'bin/hdfcoinc/pycbc_page_segments',
'bin/hdfcoinc/pycbc_page_segtable',
'bin/hdfcoinc/pycbc_page_segplot',
'bin/hdfcoinc/pycbc_page_vetotable',
'bin/hdfcoinc/pycbc_plot_psd_file',
'bin/hdfcoinc/pycbc_plot_psd_timefreq',
'bin/hdfcoinc/pycbc_plot_range',
'bin/hdfcoinc/pycbc_foreground_censor',
'bin/hdfcoinc/pycbc_plot_hist',
'bin/hdfcoinc/pycbc_page_recovery',
'bin/hdfcoinc/pycbc_page_injtable',
'bin/hdfcoinc/pycbc_strip_injections',
'bin/hdfcoinc/pycbc_page_coinc_snrchi',
'bin/hdfcoinc/pycbc_distribute_background_bins',
'bin/hdfcoinc/pycbc_combine_statmap',
'bin/hdfcoinc/pycbc_stat_dtphase',
'bin/hdfcoinc/pycbc_plot_singles_vs_params',
'bin/hdfcoinc/pycbc_plot_singles_timefreq',
'bin/hdfcoinc/pycbc_plot_throughput',
'bin/hdfcoinc/pycbc_plot_background_coincs',
'bin/hdfcoinc/pycbc_plot_bank_bins',
'bin/hdfcoinc/pycbc_merge_psds',
'bin/hdfcoinc/pycbc_plot_gating',
'bin/hdfcoinc/pycbc_fit_sngls_by_template',
'bin/hdfcoinc/pycbc_fit_sngls_over_param',
'bin/hdfcoinc/pycbc_fit_sngls_binned',
'bin/hdfcoinc/pycbc_template_recovery_hist',
'bin/hwinj/pycbc_generate_hwinj',
'bin/hwinj/pycbc_generate_hwinj_from_xml',
'bin/hwinj/pycbc_plot_hwinj',
'bin/hwinj/pycbc_insert_frame_hwinj',
'bin/pycbc_submit_dax',
'bin/mvsc/pycbc_mvsc_get_features',
'bin/pycbc_coinc_time',
'bin/pygrb/pycbc_make_offline_grb_workflow',
'bin/pygrb/pycbc_make_grb_summary_page',
'bin/pycbc_condition_strain',
'bin/workflows/pycbc_make_inference_workflow',
'bin/inference/pycbc_inference',
'bin/inference/pycbc_inference_extract_samples',
'bin/inference/pycbc_inference_plot_acceptance_rate',
'bin/inference/pycbc_inference_plot_acf',
'bin/inference/pycbc_inference_plot_acl',
'bin/inference/pycbc_inference_plot_geweke',
'bin/inference/pycbc_inference_plot_gelman_rubin',
'bin/inference/pycbc_inference_plot_inj_recovery',
'bin/inference/pycbc_inference_plot_movie',
'bin/inference/pycbc_inference_plot_inj_intervals',
'bin/inference/pycbc_inference_plot_posterior',
'bin/inference/pycbc_inference_plot_prior',
'bin/inference/pycbc_inference_plot_samples',
'bin/inference/pycbc_inference_table_summary',
'bin/plotting/pycbc_plot_waveform',
'bin/plotting/pycbc_banksim_plot_eff_fitting_factor',
'bin/plotting/pycbc_banksim_table_point_injs',
'bin/plotting/pycbc_banksim_plot_fitting_factors',
'bin/workflows/pycbc_create_sbank_workflow',
'bin/workflows/pycbc_create_uberbank_workflow',
'bin/workflows/pycbc_make_coinc_search_workflow',
'bin/workflows/pycbc_make_psd_estimation_workflow',
'bin/workflows/pycbc_create_bank_verifier_workflow',
'bin/pycbc_compress_bank',
'bin/pycbc_ringinj',
'tools/einsteinathome/pycbc_build_eah.sh'
],
packages = [
'pycbc',
'pycbc.calibration',
'pycbc.distributions',
'pycbc.fft',
'pycbc.types',
'pycbc.filter',
'pycbc.psd',
'pycbc.waveform',
'pycbc.events',
'pycbc.noise',
'pycbc.vetoes',
'pycbc.tmpltbank',
'pycbc.workflow',
'pycbc.results',
'pycbc.io',
'pycbc.inference',
'pycbc.inject',
'pycbc.frame',
'pycbc.catalog',
],
package_data = {'pycbc.workflow': find_package_data('pycbc/workflow'),
'pycbc.results': find_package_data('pycbc/results'),
'pycbc.tmpltbank': find_package_data('pycbc/tmpltbank')},
)
| gpl-3.0 |
timothydmorton/transit-fitting | setup.py | 2 | 1295 | from setuptools import setup, find_packages
import os,sys
def readme():
with open('README.md') as f:
return f.read()
import sys
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
builtins.__TRANSITFIT_SETUP__ = True
import transitfit
version = transitfit.__version__
# Publish the library to PyPI.
if "publish" in sys.argv[-1]:
os.system("python setup.py sdist upload")
sys.exit()
# Push a new tag to GitHub.
if "tag" in sys.argv:
os.system("git tag -a {0} -m 'version {0}'".format(version))
os.system("git push --tags")
sys.exit()
setup(name = "transitfit",
version = version,
description = "Pythonic fitting of transits.",
long_description = readme(),
author = "Timothy D. Morton",
author_email = "[email protected]",
url = "https://github.com/timothydmorton/transit-fitting",
packages = find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Astronomy'
],
install_requires=['pandas>=0.14','emcee>=2',
'kplr', 'transit', 'triangle_plot'],
zip_safe=False
)
| mit |
ljschumacher/tierpsy-tracker | tierpsy/analysis/blob_feats/getBlobsFeats.py | 1 | 6209 | import json
import os
import cv2
import numpy as np
import pandas as pd
import tables
from tierpsy.analysis.ske_create.getSkeletonsTables import getWormMask
from tierpsy.analysis.ske_create.helperIterROI import generateMoviesROI
from tierpsy.helper.misc import TABLE_FILTERS
def _getBlobFeatures(blob_cnt, blob_mask, roi_image, roi_corner):
if blob_cnt.size > 0:
area = float(cv2.contourArea(blob_cnt))
# find use the best rotated bounding box, the fitEllipse function produces bad results quite often
# this method is better to obtain an estimate of the worm length than
# eccentricity
(CMx, CMy), (L, W), angle = cv2.minAreaRect(blob_cnt)
#adjust CM from the ROI reference frame to the image reference
CMx += roi_corner[0]
CMy += roi_corner[1]
if L == 0 or W == 0:
return None #something went wrong abort
if W > L:
L, W = W, L # switch if width is larger than length
quirkiness = np.sqrt(1 - W**2 / L**2)
hull = cv2.convexHull(blob_cnt) # for the solidity
solidity = area / cv2.contourArea(hull)
perimeter = float(cv2.arcLength(blob_cnt, True))
compactness = 4 * np.pi * area / (perimeter**2)
# calculate the mean intensity of the worm
intensity_mean, intensity_std = cv2.meanStdDev(roi_image, mask=blob_mask)
intensity_mean = intensity_mean[0,0]
intensity_std = intensity_std[0,0]
# calculate hu moments, they are scale and rotation invariant
hu_moments = cv2.HuMoments(cv2.moments(blob_cnt))
# save everything into the the proper output format
mask_feats = (CMx,
CMy,
area,
perimeter,
L,
W,
quirkiness,
compactness,
angle,
solidity,
intensity_mean,
intensity_std,
*hu_moments.flatten())
else:
return tuple([np.nan]*19)
return mask_feats
def getBlobsFeats(skeletons_file, masked_image_file, strel_size):
# extract the base name from the masked_image_file. This is used in the
# progress status.
base_name = masked_image_file.rpartition('.')[0].rpartition(os.sep)[-1]
progress_prefix = base_name + ' Calculating individual blobs features.'
#read trajectories data with pandas
with pd.HDFStore(skeletons_file, 'r') as ske_file_id:
trajectories_data = ske_file_id['/trajectories_data']
with tables.File(skeletons_file, 'r') as ske_file_id:
dd = ske_file_id.get_node('/trajectories_data')
is_light_background = dd._v_attrs['is_light_background']
expected_fps = dd._v_attrs['expected_fps']
bgnd_param = dd._v_attrs['bgnd_param']
bgnd_param = json.loads(bgnd_param.decode("utf-8"))
#get generators to get the ROI for each frame
ROIs_generator = generateMoviesROI(masked_image_file,
trajectories_data,
progress_prefix = progress_prefix)
def _gen_rows_blocks():
block_size = 1000
#use rows for the ROIs_generator, this should balance the data in a given tread
block = []
for roi_dicts in ROIs_generator:
for irow, (roi_image, roi_corner) in roi_dicts.items():
block.append((irow, (roi_image, roi_corner)))
if len(block) == block_size:
yield block
block = []
if len(block) > 0:
yield block
def _roi2feats(block):
#from a
output= []
for irow, (roi_image, roi_corner) in block:
row_data = trajectories_data.loc[irow]
blob_mask, blob_cnt, _ = getWormMask(roi_image,
row_data['threshold'],
strel_size,
min_blob_area=row_data['area'] / 2,
is_light_background = is_light_background)
feats = _getBlobFeatures(blob_cnt, blob_mask, roi_image, roi_corner)
output.append((irow, feats))
return output
# initialize output data as a numpy recarray (pytables friendly format)
feats_names = ['coord_x', 'coord_y', 'area', 'perimeter',
'box_length', 'box_width', 'quirkiness', 'compactness',
'box_orientation', 'solidity', 'intensity_mean', 'intensity_std',
'hu0', 'hu1', 'hu2', 'hu3', 'hu4', 'hu5', 'hu6']
features_df = np.recarray(len(trajectories_data),
dtype = [(x, np.float32) for x in feats_names])
feats_generator = map(_roi2feats, _gen_rows_blocks())
for block in feats_generator:
for irow, row_dat in block:
features_df[irow] = row_dat
with tables.File(skeletons_file, 'r+') as fid:
if '/blob_features' in fid:
fid.remove_node('/blob_features')
fid.create_table(
'/',
'blob_features',
obj=features_df,
filters=TABLE_FILTERS)
assert all(x in feats_names for x in fid.get_node('/blob_features').colnames)
if __name__ == '__main__':
#masked_image_file = '/Volumes/behavgenom_archive$/Avelino/Worm_Rig_Tests/short_movies/MaskedVideos/double_pick_021216/N2_N6_Set4_Pos5_Ch5_02122016_160343.hdf5'
masked_image_file = '/Volumes/behavgenom_archive$/Serena/MaskedVideos/recording 29.9 green 100-200/recording 29.9 green_X1.hdf5'
skeletons_file = masked_image_file.replace('MaskedVideos', 'Results').replace('.hdf5', '_skeletons.hdf5')
is_light_background = True
strel_size = 5
getBlobFeats(skeletons_file, masked_image_file, is_light_background, strel_size) | mit |
andreadelprete/pinocchio_inv_dyn | python/pinocchio_inv_dyn/geom_utils.py | 1 | 6063 | #from polytope_conversion_utils import *
from numpy import zeros, sqrt, array, vstack
import numpy as np
#from math import cos, sin, tan, atan, pi
import matplotlib.pyplot as plt
#import cdd
import plot_utils as plut
#from polytope_conversion_utils import poly_face_to_span, NotPolyFace
NUMBER_TYPE = 'float' # 'float' or 'fraction'
''' Compute the projection matrix of the cross product.
'''
def crossMatrix( v ):
VP = np.array( [[ 0, -v[2], v[1] ],
[ v[2], 0, -v[0] ],
[-v[1], v[0], 0 ]] );
return VP;
''' Check whether v is inside a 3d cone with the specified normal direction
and friction coefficient.
'''
def is_vector_inside_cone(v, mu, n):
P = np.eye(3) - np.outer(n, n);
return (np.linalg.norm(np.dot(P,v)) - mu*np.dot(n,v)<=0.0);
''' Find the intersection between two lines:
a1^T x = b1
a2^T x = b2
'''
def find_intersection(a1, b1, a2, b2):
x = np.zeros(2);
den = (a1[0]*a2[1] - a2[0]*a1[1]);
if(abs(den)<1e-6):
print "ERROR: Impossible to find intersection between two lines that are parallel";
return x;
if(np.abs(a1[0])>np.abs(a2[0])):
x[1] = (-a2[0]*b1 + a1[0]*b2)/den;
x[0] = (b1-a1[1]*x[1])/a1[0];
else:
x[1] = (-a2[0]*b1 + a1[0]*b2)/den;
x[0] = (b2-a2[1]*x[1])/a2[0];
return x;
''' Find the line passing through two points:
a^T x1 + b = 0
a^T x2 + b = 0
'''
def find_line(x1, x2):
den = (x1[0]*x2[1] - x2[0]*x1[1]);
if(abs(den)<1e-4):
# print "ERROR: x1 and x2 are too close, x1=(%f,%f), x2=(%f,%f)" % (x1[0],x1[1],x2[0],x2[1]);
return (zeros(2),-1);
# a = np.array([-(x1[1] - x2[1])/den, -(x2[0] - x1[0])/den]);
# a_norm = np.linalg.norm(a);
# a /= a_norm;
# b = -1.0/a_norm;
a = np.array([x2[1]-x1[1], x1[0]-x2[0]]);
a /= np.linalg.norm(a);
b = -a[0]*x1[0] - a[1]*x1[1];
# print "a=(%f,%f), a2=(%f,%f), b=%f, b2=%f" % (a[0],a[1],a2[0],a2[1],b,b2);
return (a,b);
''' Compute the area of a 2d triangle with vertices a, b and c.
'''
def compute_triangle_area(a, b, c):
la = np.linalg.norm(a-b);
lb = np.linalg.norm(b-c);
lc = np.linalg.norm(c-a);
s = 0.5*(la+lb+lc);
return sqrt(s*(s-la)*(s-lb)*(s-lc));
''' Plot inequalities A*x<=b on x-y plane.
'''
def plot_inequalities(A, b, x_bounds, y_bounds, ls='--', color='k', ax=None, lw=8):
if(A.shape[1]!=2):
print "[ERROR in plot_inequalities] matrix does not have 2 columns";
return;
# if(A.shape[0]!=len(b)):
# print "[ERROR in plot_inequalities] matrix and vector does not have the same number of rows";
# return;
if(ax==None):
f, ax = plut.create_empty_figure();
p = np.zeros(2); # p height
p_x = np.zeros(2);
p_y = np.zeros(2);
for i in range(A.shape[0]):
if(np.abs(A[i,1])>1e-13):
p_x[0] = x_bounds[0]; # p x coordinate
p_x[1] = x_bounds[1]; # p x coordinate
p[0] = p_x[0];
p[1] = 0;
p_y[0] = (b[i] - np.dot(A[i,:],p) )/A[i,1];
p[0] = p_x[1];
p_y[1] = (b[i] - np.dot(A[i,:],p) )/A[i,1];
ax.plot(p_x, p_y, ls=ls, color=color, linewidth=lw);
elif(np.abs(A[i,0])>1e-13):
p_y[0] = y_bounds[0];
p_y[1] = y_bounds[1];
p[0] = 0;
p[1] = p_y[0];
p_x[0] = (b[i] - np.dot(A[i,:],p) )/A[i,0];
p[1] = p_y[1];
p_x[1] = (b[i] - np.dot(A[i,:],p) )/A[i,0];
ax.plot(p_x, p_y, ls=ls, color=color, linewidth=lw);
else:
pass;
# print "[WARNING] Could not print one inequality as all coefficients are 0: A[%d,:]=[%f,%f]" % (i,A[i,0],A[i,1]);
ax.set_xlim(x_bounds)
ax.set_ylim(y_bounds)
return ax;
''' Plot the polytope A*x<=b with vectices V '''
def plot_polytope(A, b, V=None, color='b', ax=None, plotLines=True, lw=4):
A = np.asarray(A);
b = np.asarray(b);
if(ax==None):
f, ax = plut.create_empty_figure();
if(V==None):
try:
from polytope_conversion_utils import poly_face_to_span, NotPolyFace
V = poly_face_to_span(A,b).T;
except (ValueError,NotPolyFace) as e:
print "WARNING: "+str(e);
if(V==None):
X_MIN = -1.;
X_MAX = 1.;
Y_MIN = -1.;
Y_MAX = 1.;
else:
X_MIN = np.min(V[:,0]);
X_MAX = np.max(V[:,0]);
X_MIN -= 0.1*(X_MAX-X_MIN);
X_MAX += 0.1*(X_MAX-X_MIN);
Y_MIN = np.min(V[:,1]);
Y_MAX = np.max(V[:,1]);
Y_MIN -= 0.1*(Y_MAX-Y_MIN);
Y_MAX += 0.1*(Y_MAX-Y_MIN);
if(plotLines):
plot_inequalities(A, b, [X_MIN,X_MAX], [Y_MIN,Y_MAX], color=color, ls='--', ax=ax, lw=lw);
n = b.shape[0];
if(n<2):
return (ax,None);
line = None;
if(V!=None):
xx = np.zeros(2);
yy = np.zeros(2);
for i in range(n):
xx[0] = V[i,0];
xx[1] = V[(i+1)%n,0];
yy[0] = V[i,1];
yy[1] = V[(i+1)%n,1];
line, = ax.plot(xx, yy, color='r', ls='-', markersize=30); #, lw=2*lw);
ax.set_xlim([X_MIN, X_MAX]);
ax.set_ylim([Y_MIN, Y_MAX]);
return (ax, line);
def compute_convex_hull(S):
"""
Returns the matrix A and the vector b such that:
{x = S z, sum z = 1, z>=0} if and only if {A x + b >= 0}.
"""
import cdd
V = np.hstack([np.ones((S.shape[1], 1)), S.T])
# V-representation: first column is 0 for rays, 1 for vertices
V_cdd = cdd.Matrix(V, number_type=NUMBER_TYPE)
V_cdd.rep_type = cdd.RepType.GENERATOR
P = cdd.Polyhedron(V_cdd)
H = np.array(P.get_inequalities())
b, A = H[:, 0], H[:, 1:]
return (A,b) | gpl-2.0 |
arbuz001/sms-tools | workspace/A6/A6Part2.py | 2 | 12166 | import os
import sys
import numpy as np
import math
from scipy.signal import get_window
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../software/models/'))
import utilFunctions as UF
import harmonicModel as HM
import stft
eps = np.finfo(float).eps
#sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../A2/'))
#import A2Part1
"""
A6Part2 - Segmentation of stable note regions in an audio signal
Complete the function segmentStableNotesRegions() to identify the stable regions of notes in a specific
monophonic audio signal. The function returns an array of segments where each segment contains the
starting and the ending frame index of a stable note.
The input argument to the function are the wav file name including the path (inputFile), threshold to
be used for deciding stable notes (stdThsld) in cents, minimum allowed duration of a stable note (minNoteDur),
number of samples to be considered for computing standard deviation (winStable), analysis window (window),
window size (M), FFT size (N), hop size (H), error threshold used in the f0 detection (f0et), magnitude
threshold for spectral peak picking (t), minimum allowed f0 (minf0) and maximum allowed f0 (maxf0).
The function returns a numpy array of shape (k,2), where k is the total number of detected segments.
The two columns in each row contains the starting and the ending frame indexes of a stable note segment.
The segments must be returned in the increasing order of their start times.
In order to facilitate the assignment we have configured the input parameters to work with a particular
sound, '../../sounds/sax-phrase-short.wav'. The code and parameters to estimate the fundamental frequency
is completed. Thus you start from an f0 curve obtained using the f0Detection() function and you will use
that to obtain the note segments.
All the steps to be implemented in order to solve this question are indicated in segmentStableNotesRegions()
as comments. These are the steps:
1. In order to make the processing musically relevant, the f0 values should be converted first from
Hertz to Cents, which is a logarithmic scale.
2. At each time frame (for each f0 value) you should compute the standard deviation of the past winStable
number of f0 samples (including the f0 sample at the current audio frame).
3. You should then apply a deviation threshold, stdThsld, to determine if the current frame belongs
to a stable note region or not. Since we are interested in the stable note regions, the standard
deviation of the previous winStable number of f0 samples (including the current sample) should be less
than stdThsld i.e. use the current sample and winStable-1 previous samples. Ignore the first winStable-1
samples in this computation.
4. All the consecutive frames belonging to the stable note regions should be grouped together into
segments. For example, if the indexes of the frames corresponding to the stable note regions are
3,4,5,6,12,13,14, we get two segments, first 3-6 and second 12-14.
5. After grouping frame indexes into segments filter/remove the segments which are smaller in duration
than minNoteDur. Return the segment indexes in the increasing order of their start frame index.
Test case 1: Using inputFile='../../sounds/cello-phrase.wav', stdThsld=10, minNoteDur=0.1,
winStable = 3, window='hamming', M=1025, N=2048, H=256, f0et=5.0, t=-100, minf0=310, maxf0=650,
the function segmentStableNotesRegions() should return 9 segments. Please use loadTestcases.load()
to check the expected segment indexes in the output.
Test case 2: Using inputFile='../../sounds/cello-phrase.wav', stdThsld=20, minNoteDur=0.5,
winStable = 3, window='hamming', M=1025, N=2048, H=256, f0et=5.0, t=-100, minf0=310, maxf0=650,
the function segmentStableNotesRegions() should return 6 segments. Please use loadTestcases.load()
to check the expected segment indexes in the output.
Test case 3: Using inputFile='../../sounds/sax-phrase-short.wav', stdThsld=5, minNoteDur=0.6,
winStable = 3, window='hamming', M=1025, N=2048, H=256, f0et=5.0, t=-100, minf0=310, maxf0=650,
the function segmentStableNotesRegions() should return just one segment. Please use loadTestcases.load()
to check the expected segment indexes in the output.
We also provide the function plotSpectogramF0Segments() to plot the f0 contour and the detected
segments on the top of the spectrogram of the audio signal in order to visually analyse the outcome
of your function. Depending on the analysis parameters and the capabilities of the hardware you
use, the function might take a while to run (even half a minute in some cases).
"""
def segmentStableNotesRegions(inputFile = '../../sounds/sax-phrase-short.wav', stdThsld=10, minNoteDur=0.1,
winStable = 3, window='hamming', M=1024, N=2048, H=256, f0et=5.0, t=-100,
minf0=310, maxf0=650):
"""
Function to segment the stable note regions in an audio signal
Input:
inputFile (string): wav file including the path
stdThsld (float): threshold for detecting stable regions in the f0 contour (in cents)
minNoteDur (float): minimum allowed segment length (note duration)
winStable (integer): number of samples used for computing standard deviation
window (string): analysis window
M (integer): window size used for computing f0 contour
N (integer): FFT size used for computing f0 contour
H (integer): Hop size used for computing f0 contour
f0et (float): error threshold used for the f0 computation
t (float): magnitude threshold in dB used in spectral peak picking
minf0 (float): minimum fundamental frequency in Hz
maxf0 (float): maximum fundamental frequency in Hz
Output:
segments (np.ndarray): Numpy array containing starting and ending frame indexes of every segment.
"""
fs, x = UF.wavread(inputFile) # reading inputFile
w = get_window(window, M) # obtaining analysis window
f0 = HM.f0Detection(x, fs, w, N, H, t, minf0, maxf0, f0et) # estimating F0
# 1. convert f0 values from Hz to Cents (as described in pdf document)
f0_in_cents = 1200.0*np.log2(f0/55.0 + eps)
#2. create an array containing standard deviation of last winStable samples
std_F0 = (stdThsld + eps)*np.ones(winStable - 1,dtype = float)
for i in range(winStable - 1, len(f0_in_cents)):
std_F0 = np.append(std_F0,np.std(f0_in_cents[(i - winStable + 1):(i + 1)]))
# print 'step = ' + str(i)
# print 'nBinLow = ' + str(i - winStable + 1)
# print 'nBinHigh = ' + str(i + 1)
# print 'Values = ' + str(f0_in_cents[(i - winStable + 1):(i + 1)])
# print '****'
#3. apply threshold on standard deviation values to find indexes of the stable points in melody
idx = np.where(std_F0 < stdThsld)[0]
# idx = np.array([3, 4, 5, 6, 12, 13, 17, 18, 19])
#4. create segments of continuous stable points such that consecutive stable points belong to same segment
idx_Start = np.array([],dtype=np.int64)
idx_End = np.array([],dtype=np.int64)
pointer_Start = 0
pointer_End = 0
for i in range(0, len(idx)-1):
# print 'pointer_Start = ' + str(pointer_Start)
# print 'pointer_End = ' + str(pointer_End)
# print '****'
if((idx[i+1] - idx[i]) != 1):
idx_Start = np.append(idx_Start,pointer_Start)
idx_End = np.append(idx_End,pointer_End)
pointer_End += 1
pointer_Start = (i+1)
else:
pointer_End += 1
idx_Start = np.append(idx_Start,pointer_Start)
idx_End = np.append(idx_End,pointer_End)
#5. apply segment filtering, i.e. remove segments with are < minNoteDur in length
idx_segments = np.where((idx_End - idx_Start + 1)*H/float(fs) >= minNoteDur)[0]
segments_Start = idx[idx_Start[idx_segments]]
segments_End = idx[idx_End[idx_segments]]
segments = np.array([segments_Start,segments_End])
segments = np.transpose(segments)
#plotSpectogramF0Segments(x, fs, w, N, H, f0, segments)
return segments
def plotSpectogramF0Segments(x, fs, w, N, H, f0, segments):
"""
Code for plotting the f0 contour on top of the spectrogram
"""
# frequency range to plot
maxplotfreq = 1000.0
fontSize = 16
fig = plt.figure()
ax = fig.add_subplot(111)
mX, pX = stft.stftAnal(x, fs, w, N, H) #using same params as used for analysis
mX = np.transpose(mX[:,:int(N*(maxplotfreq/fs))+1])
timeStamps = np.arange(mX.shape[1])*H/float(fs)
binFreqs = np.arange(mX.shape[0])*fs/float(N)
plt.pcolormesh(timeStamps, binFreqs, mX)
plt.plot(timeStamps, f0, color = 'k', linewidth=5)
for ii in range(segments.shape[0]):
plt.plot(timeStamps[segments[ii,0]:segments[ii,1]], f0[segments[ii,0]:segments[ii,1]], color = '#A9E2F3', linewidth=1.5)
plt.autoscale(tight=True)
plt.ylabel('Frequency (Hz)', fontsize = fontSize)
plt.xlabel('Time (s)', fontsize = fontSize)
plt.legend(('f0','segments'))
xLim = ax.get_xlim()
yLim = ax.get_ylim()
ax.set_aspect((xLim[1]-xLim[0])/(2.0*(yLim[1]-yLim[0])))
plt.autoscale(tight=True)
plt.show()
#inputFile = '../../sounds/sax-phrase-short.wav'
#stdThsld=5
#minNoteDur=0.6
#winStable = 3
#window='hamming'
#M=1025
#N=2048
#H=256
#f0et=5.0
#t=-100
#minf0=310
#maxf0=650
#inputFile = '../../sounds/cello-phrase.wav'
#stdThsld=20
#minNoteDur=0.5
#winStable = 3
#window='hamming'
#M=1025
#N=2048
#H=256
#f0et=5.0
#t=-100
#minf0=310
#maxf0=650
##z = segmentStableNotesRegions(inputFile, stdThsld, minNoteDur, winStable, window, M, N, H, f0et, t, minf0 , maxf0)
#fs, x = UF.wavread(inputFile) # reading inputFile
#w = get_window(window, M) # obtaining analysis window
#f0 = HM.f0Detection(x, fs, w, N, H, t, minf0, maxf0, f0et) # estimating F0
## 1. convert f0 values from Hz to Cents (as described in pdf document)
#f0_in_cents = 1200.0*np.log2(f0/55.0 + eps)
##2. create an array containing standard deviation of last winStable samples
#std_F0 = (stdThsld + eps)*np.ones(winStable - 1,dtype = float)
#for i in range(winStable - 1, len(f0_in_cents)):
# std_F0 = np.append(std_F0,np.std(f0_in_cents[(i - winStable + 1):(i + 1)]))
## print 'step = ' + str(i)
## print 'nBinLow = ' + str(i - winStable + 1)
## print 'nBinHigh = ' + str(i + 1)
## print 'Values = ' + str(f0_in_cents[(i - winStable + 1):(i + 1)])
## print '****'
##3. apply threshold on standard deviation values to find indexes of the stable points in melody
#idx = np.where(std_F0 < stdThsld)[0]
## idx = np.array([3, 4, 5, 6, 12, 13, 17, 18, 19])
##4. create segments of continuous stable points such that consecutive stable points belong to same segment
#idx_Start = np.array([],dtype=np.int64)
#idx_End = np.array([],dtype=np.int64)
#pointer_Start = 0
#pointer_End = 0
#for i in range(0, len(idx)-1):
## print 'pointer_Start = ' + str(pointer_Start)
## print 'pointer_End = ' + str(pointer_End)
## print '****'
#
# if((idx[i+1] - idx[i]) != 1):
# idx_Start = np.append(idx_Start,pointer_Start)
# idx_End = np.append(idx_End,pointer_End)
# pointer_End += 1
# pointer_Start = (i+1)
# else:
# pointer_End += 1
#
#idx_Start = np.append(idx_Start,pointer_Start)
#idx_End = np.append(idx_End,pointer_End)
##5. apply segment filtering, i.e. remove segments with are < minNoteDur in length
#idx_segments = np.where((idx_End - idx_Start + 1)*H/float(fs) >= minNoteDur)[0]
#segments_Start = idx[idx_Start[idx_segments]]
#segments_End = idx[idx_End[idx_segments]]
#segments = np.array([segments_Start,segments_End])
#segments = np.transpose(segments)
#plotSpectogramF0Segments(x, fs, w, N, H, f0, segments)
| agpl-3.0 |
cl4rke/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 130 | 50966 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
xwolf12/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
marcinsiwicki/ThinkStats2 | code/timeseries.py | 66 | 18035 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import statsmodels.tsa.stattools as smtsa
import matplotlib.pyplot as pyplot
import thinkplot
import thinkstats2
FORMATS = ['png']
def ReadData():
"""Reads data about cannabis transactions.
http://zmjones.com/static/data/mj-clean.csv
returns: DataFrame
"""
transactions = pandas.read_csv('mj-clean.csv', parse_dates=[5])
return transactions
def tmean(series):
"""Computes a trimmed mean.
series: Series
returns: float
"""
t = series.values
n = len(t)
if n <= 3:
return t.mean()
trim = max(1, n/10)
return np.mean(sorted(t)[trim:n-trim])
def GroupByDay(transactions, func=np.mean):
"""Groups transactions by day and compute the daily mean ppg.
transactions: DataFrame of transactions
returns: DataFrame of daily prices
"""
groups = transactions[['date', 'ppg']].groupby('date')
daily = groups.aggregate(func)
daily['date'] = daily.index
start = daily.date[0]
one_year = np.timedelta64(1, 'Y')
daily['years'] = (daily.date - start) / one_year
return daily
def GroupByQualityAndDay(transactions):
"""Divides transactions by quality and computes mean daily price.
transaction: DataFrame of transactions
returns: map from quality to time series of ppg
"""
groups = transactions.groupby('quality')
dailies = {}
for name, group in groups:
dailies[name] = GroupByDay(group)
return dailies
def PlotDailies(dailies):
"""Makes a plot with daily prices for different qualities.
dailies: map from name to DataFrame
"""
thinkplot.PrePlot(rows=3)
for i, (name, daily) in enumerate(dailies.items()):
thinkplot.SubPlot(i+1)
title = 'price per gram ($)' if i == 0 else ''
thinkplot.Config(ylim=[0, 20], title=title)
thinkplot.Scatter(daily.ppg, s=10, label=name)
if i == 2:
pyplot.xticks(rotation=30)
else:
thinkplot.Config(xticks=[])
thinkplot.Save(root='timeseries1',
formats=FORMATS)
def RunLinearModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
model = smf.ols('ppg ~ years', data=daily)
results = model.fit()
return model, results
def PlotFittedValues(model, results, label=''):
"""Plots original data and fitted values.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
values = model.endog
thinkplot.Scatter(years, values, s=15, label=label)
thinkplot.Plot(years, results.fittedvalues, label='model')
def PlotResiduals(model, results):
"""Plots the residuals of a model.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
thinkplot.Plot(years, results.resid, linewidth=0.5, alpha=0.5)
def PlotResidualPercentiles(model, results, index=1, num_bins=20):
"""Plots percentiles of the residuals.
model: StatsModel model object
results: StatsModel results object
index: which exogenous variable to use
num_bins: how many bins to divide the x-axis into
"""
exog = model.exog[:, index]
resid = results.resid.values
df = pandas.DataFrame(dict(exog=exog, resid=resid))
bins = np.linspace(np.min(exog), np.max(exog), num_bins)
indices = np.digitize(exog, bins)
groups = df.groupby(indices)
means = [group.exog.mean() for _, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.resid) for _, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
percentiles = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(means, percentiles, label=label)
def SimulateResults(daily, iters=101, func=RunLinearModel):
"""Run simulations based on resampling residuals.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
_, results = func(daily)
fake = daily.copy()
result_seq = []
for _ in range(iters):
fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid)
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def SimulateIntervals(daily, iters=101, func=RunLinearModel):
"""Run simulations based on different subsets of the data.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
result_seq = []
starts = np.linspace(0, len(daily), iters).astype(int)
for start in starts[:-2]:
subset = daily[start:]
_, results = func(subset)
fake = subset.copy()
for _ in range(iters):
fake.ppg = (results.fittedvalues +
thinkstats2.Resample(results.resid))
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def GeneratePredictions(result_seq, years, add_resid=False):
"""Generates an array of predicted values from a list of model results.
When add_resid is False, predictions represent sampling error only.
When add_resid is True, they also include residual error (which is
more relevant to prediction).
result_seq: list of model results
years: sequence of times (in years) to make predictions for
add_resid: boolean, whether to add in resampled residuals
returns: sequence of predictions
"""
n = len(years)
d = dict(Intercept=np.ones(n), years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict_seq = []
for fake_results in result_seq:
predict = fake_results.predict(predict_df)
if add_resid:
predict += thinkstats2.Resample(fake_results.resid, n)
predict_seq.append(predict)
return predict_seq
def GenerateSimplePrediction(results, years):
"""Generates a simple prediction.
results: results object
years: sequence of times (in years) to make predictions for
returns: sequence of predicted values
"""
n = len(years)
inter = np.ones(n)
d = dict(Intercept=inter, years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict = results.predict(predict_df)
return predict
def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateResults(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.3, color='gray')
predict_seq = GeneratePredictions(result_seq, years, add_resid=False)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.5, color='gray')
def PlotIntervals(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions based on different intervals.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateIntervals(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.2, color='gray')
def Correlate(dailies):
"""Compute the correlation matrix between prices for difference qualities.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
df[name] = daily.ppg
return df.corr()
def CorrelateResid(dailies):
"""Compute the correlation matrix between residuals.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
_, results = RunLinearModel(daily)
df[name] = results.resid
return df.corr()
def TestCorrelateResid(dailies, iters=101):
"""Tests observed correlations.
dailies: map from quality to time series of ppg
iters: number of simulations
"""
t = []
names = ['high', 'medium', 'low']
for name in names:
daily = dailies[name]
t.append(SimulateResults(daily, iters=iters))
corr = CorrelateResid(dailies)
arrays = []
for result_seq in zip(*t):
df = pandas.DataFrame()
for name, results in zip(names, result_seq):
df[name] = results.resid
opp_sign = corr * df.corr() < 0
arrays.append((opp_sign.astype(int)))
print(np.sum(arrays))
def RunModels(dailies):
"""Runs linear regression for each group in dailies.
dailies: map from group name to DataFrame
"""
rows = []
for daily in dailies.values():
_, results = RunLinearModel(daily)
intercept, slope = results.params
p1, p2 = results.pvalues
r2 = results.rsquared
s = r'%0.3f (%0.2g) & %0.3f (%0.2g) & %0.3f \\'
row = s % (intercept, p1, slope, p2, r2)
rows.append(row)
# print results in a LaTeX table
print(r'\begin{tabular}{|c|c|c|}')
print(r'\hline')
print(r'intercept & slope & $R^2$ \\ \hline')
for row in rows:
print(row)
print(r'\hline')
print(r'\end{tabular}')
def FillMissing(daily, span=30):
"""Fills missing values with an exponentially weighted moving average.
Resulting DataFrame has new columns 'ewma' and 'resid'.
daily: DataFrame of daily prices
span: window size (sort of) passed to ewma
returns: new DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
ewma = pandas.ewma(reindexed.ppg, span=span)
resid = (reindexed.ppg - ewma).dropna()
fake_data = ewma + thinkstats2.Resample(resid, len(reindexed))
reindexed.ppg.fillna(fake_data, inplace=True)
reindexed['ewma'] = ewma
reindexed['resid'] = reindexed.ppg - ewma
return reindexed
def AddWeeklySeasonality(daily):
"""Adds a weekly pattern.
daily: DataFrame of daily prices
returns: new DataFrame of daily prices
"""
frisat = (daily.index.dayofweek==4) | (daily.index.dayofweek==5)
fake = daily.copy()
fake.ppg[frisat] += np.random.uniform(0, 2, frisat.sum())
return fake
def PrintSerialCorrelations(dailies):
"""Prints a table of correlations with different lags.
dailies: map from category name to DataFrame of daily prices
"""
filled_dailies = {}
for name, daily in dailies.items():
filled_dailies[name] = FillMissing(daily, span=30)
# print serial correlations for raw price data
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.ppg, lag=1)
print(name, corr)
rows = []
for lag in [1, 7, 30, 365]:
row = [str(lag)]
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.resid, lag)
row.append('%.2g' % corr)
rows.append(row)
print(r'\begin{tabular}{|c|c|c|c|}')
print(r'\hline')
print(r'lag & high & medium & low \\ \hline')
for row in rows:
print(' & '.join(row) + r' \\')
print(r'\hline')
print(r'\end{tabular}')
filled = filled_dailies['high']
acf = smtsa.acf(filled.resid, nlags=365, unbiased=True)
print('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f' %
(acf[0], acf[1], acf[7], acf[30], acf[365]))
def SimulateAutocorrelation(daily, iters=1001, nlags=40):
"""Resample residuals, compute autocorrelation, and plot percentiles.
daily: DataFrame
iters: number of simulations to run
nlags: maximum lags to compute autocorrelation
"""
# run simulations
t = []
for _ in range(iters):
filled = FillMissing(daily, span=30)
resid = thinkstats2.Resample(filled.resid)
acf = smtsa.acf(resid, nlags=nlags, unbiased=True)[1:]
t.append(np.abs(acf))
high = thinkstats2.PercentileRows(t, [97.5])[0]
low = -high
lags = range(1, nlags+1)
thinkplot.FillBetween(lags, low, high, alpha=0.2, color='gray')
def PlotAutoCorrelation(dailies, nlags=40, add_weekly=False):
"""Plots autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
nlags: number of lags to compute
add_weekly: boolean, whether to add a simulated weekly pattern
"""
thinkplot.PrePlot(3)
daily = dailies['high']
SimulateAutocorrelation(daily)
for name, daily in dailies.items():
if add_weekly:
daily = AddWeeklySeasonality(daily)
filled = FillMissing(daily, span=30)
acf = smtsa.acf(filled.resid, nlags=nlags, unbiased=True)
lags = np.arange(len(acf))
thinkplot.Plot(lags[1:], acf[1:], label=name)
def MakeAcfPlot(dailies):
"""Makes a figure showing autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
"""
axis = [0, 41, -0.2, 0.2]
thinkplot.PrePlot(cols=2)
PlotAutoCorrelation(dailies, add_weekly=False)
thinkplot.Config(axis=axis,
loc='lower right',
ylabel='correlation',
xlabel='lag (day)')
thinkplot.SubPlot(2)
PlotAutoCorrelation(dailies, add_weekly=True)
thinkplot.Save(root='timeseries9',
axis=axis,
loc='lower right',
xlabel='lag (days)',
formats=FORMATS)
def PlotRollingMean(daily, name):
"""Plots rolling mean and EWMA.
daily: DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
thinkplot.PrePlot(cols=2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
roll_mean = pandas.rolling_mean(reindexed.ppg, 30)
thinkplot.Plot(roll_mean, label='rolling mean')
pyplot.xticks(rotation=30)
thinkplot.Config(ylabel='price per gram ($)')
thinkplot.SubPlot(2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
ewma = pandas.ewma(reindexed.ppg, span=30)
thinkplot.Plot(ewma, label='EWMA')
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries10',
formats=FORMATS)
def PlotFilled(daily, name):
"""Plots the EWMA and filled data.
daily: DataFrame of daily prices
"""
filled = FillMissing(daily, span=30)
thinkplot.Scatter(filled.ppg, s=15, alpha=0.3, label=name)
thinkplot.Plot(filled.ewma, label='EWMA', alpha=0.4)
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries8',
ylabel='price per gram ($)',
formats=FORMATS)
def PlotLinearModel(daily, name):
"""Plots a linear fit to a sequence of prices, and the residuals.
daily: DataFrame of daily prices
name: string
"""
model, results = RunLinearModel(daily)
PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries2',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)',
formats=FORMATS)
PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries3',
title='residuals',
xlabel='years',
ylabel='price per gram ($)',
formats=FORMATS)
#years = np.linspace(0, 5, 101)
#predict = GenerateSimplePrediction(results, years)
def main(name):
thinkstats2.RandomSeed(18)
transactions = ReadData()
dailies = GroupByQualityAndDay(transactions)
PlotDailies(dailies)
RunModels(dailies)
PrintSerialCorrelations(dailies)
MakeAcfPlot(dailies)
name = 'high'
daily = dailies[name]
PlotLinearModel(daily, name)
PlotRollingMean(daily, name)
PlotFilled(daily, name)
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries4',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
name = 'medium'
daily = dailies[name]
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotIntervals(daily, years)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries5',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
spacecowboy/article-annriskgroups-source | AnnVariables.py | 1 | 7842 |
# coding: utf-8
# In[1]:
# import stuffs
#get_ipython().magic('matplotlib inline')
import numpy as np
import pandas as pd
from pyplotthemes import get_savefig, classictheme as plt
plt.latex = True
# In[2]:
from datasets import get_mayo, get_veteran, get_lung, get_breasttrn
d = get_mayo(prints=True, norm_in=True, norm_out=False)
durcol = d.columns[0]
eventcol = d.columns[1]
if np.any(d[durcol] < 0):
raise ValueError("Negative times encountered")
print("End time:", d.iloc[:, 0].max())
d
# In[3]:
import ann
from classensemble import ClassEnsemble
def get_net(rows, incols, func=ann.geneticnetwork.FITNESS_SURV_KAPLAN_MIN, mingroup=None,
hidden_count=3, popsize=100, generations=200, mutchance=0.15, conchance=0,
crossover=ann.geneticnetwork.CROSSOVER_UNIFORM,
selection=ann.geneticnetwork.SELECTION_TOURNAMENT):
outcount = 2
l = incols + hidden_count + outcount + 1
net = ann.geneticnetwork(incols, hidden_count, outcount)
net.fitness_function = func
if mingroup is None:
mingroup = int(0.25 * rows)
# Be explicit here even though I changed the defaults
net.connection_mutation_chance = conchance
net.activation_mutation_chance = 0
# Some other values
net.crossover_method = crossover
net.selection_method = selection
net.population_size = popsize
net.generations = generations
net.weight_mutation_chance = mutchance
ann.utils.connect_feedforward(net, hidden_act=net.TANH, out_act=net.SOFTMAX)
c = net.connections.reshape((l, l))
c[-outcount:, :(incols + hidden_count)] = 1
net.connections = c.ravel()
return net
def _netgen(df, netcount, funcs=None, **kwargs):
# Expects (function, mingroup)
if funcs is None:
funcs = [ann.geneticnetwork.FITNESS_SURV_KAPLAN_MIN,
ann.geneticnetwork.FITNESS_SURV_KAPLAN_MAX]
rows = df.shape[0]
incols = df.shape[1] - 2
hnets = []
lnets = []
for i in range(netcount):
if i % 2:
n = get_net(rows, incols, funcs[0], **kwargs)
hnets.append(n)
else:
n = get_net(rows, incols, funcs[1], **kwargs)
lnets.append(n)
return hnets, lnets
def _kanngen(df, netcount, **kwargs):
return _netgen(df, netcount, **kwargs)
def _riskgen(df, netcount, **kwargs):
return _netgen(df, netcount,
[ann.geneticnetwork.FITNESS_SURV_RISKGROUP_HIGH,
ann.geneticnetwork.FITNESS_SURV_RISKGROUP_LOW],
**kwargs)
def get_kanngen(netcount, **kwargs):
return lambda df: _kanngen(df, netcount, **kwargs)
#e = ClassEnsemble(netgen=netgen)
#er = ClassEnsemble(netgen=riskgen)
class NetFitter(object):
def __init__(self, func=ann.geneticnetwork.FITNESS_SURV_KAPLAN_MIN, **kwargs):
self.kwargs = kwargs
self.func = func
def fit(self, df, duration_col, event_col):
'''
Same as learn, but instead conforms to the interface defined by
Lifelines and accepts a data frame as the data. Also generates
new networks using self.netgen is it was defined.
'''
rows = df.shape[0]
incols = df.shape[1] - 2
self.net = get_net(rows, incols, self.func, **self.kwargs)
# Save columns for prediction later
self.x_cols = df.columns - [duration_col, event_col]
self.net.learn(df[self.x_cols].values,
df[[duration_col, event_col]].values)
def get_log(self, df):
'''
Returns a truncated training log
'''
return pd.Series(self.net.log.ravel()[:self.net.generations])
# In[4]:
from stats import k_fold_cross_validation
from lifelines.estimation import KaplanMeierFitter, median_survival_times
def score(T_actual, labels, E_actual):
'''
Return a score based on grouping
'''
scores = []
labels = labels.ravel()
for g in ['high', 'mid', 'low']:
members = labels == g
if np.sum(members) > 0:
kmf = KaplanMeierFitter()
kmf.fit(T_actual[members],
E_actual[members],
label='{}'.format(g))
# Last survival time
if np.sum(E_actual[members]) > 0:
lasttime = np.max(T_actual[members][E_actual[members] == 1])
else:
lasttime = np.nan
# End survival rate, median survival time, member count, last event
subscore = (kmf.survival_function_.iloc[-1, 0],
median_survival_times(kmf.survival_function_),
np.sum(members),
lasttime)
else:
# Rpart might fail in this respect
subscore = (np.nan, np.nan, np.sum(members), np.nan)
scores.append(subscore)
return scores
def logscore(T_actual, log, E_actual):
# Return last value in the log
return log[-1]
# # Compare stuff
# In[ ]:
#netcount = 6
models = []
# Try different epoch counts
for x in [0.0, 0.01, 0.05, 0.1, 0.15, 0.25, 0.5]:
#e = ClassEnsemble(netgen=get_kanngen(netcount, generations=x))
e = NetFitter(func=ann.geneticnetwork.FITNESS_SURV_KAPLAN_MIN,
popsize=50, generations=100, conchance=x)
#, mingroup=int(0.25*d.shape[0]))
e.var_label = 'Connection chance'
e.var_value = x
models.append(e)
# In[ ]:
n = 10
k = 4
# Repeated cross-validation
repeat_results = []
for rep in range(n):
result = k_fold_cross_validation(models, d, durcol, eventcol, k=k, evaluation_measure=logscore, predictor='get_log')
repeat_results.append(result)
#repeat_results
# # Plot results
# In[ ]:
def plot_logscore(repeat_results, models):
boxes = []
labels = []
var_label = None
# Makes no sense for low here for many datasets...
for i, m in enumerate(models):
labels.append(str(m.var_value))
var_label = m.var_label
vals = []
for result in repeat_results:
vals.extend(result[i])
boxes.append(vals)
plt.figure()
plt.boxplot(boxes, labels=labels, vert=False, colors=plt.colors[:len(models)])
plt.ylabel(var_label)
plt.title("Cross-validation: n={} k={}".format(n, k))
plt.xlabel("Something..")
#plt.gca().set_xscale('log')
plot_logscore(repeat_results, models)
# In[ ]:
def plot_score(repeat_results, models, scoreindex=0):
boxes = []
labels = []
var_label = []
# Makes no sense for low here for many datasets...
for i, g in enumerate(['high', 'mid', 'low']):
for j, m in enumerate(models):
if g == 'high':
labels.append('H ' + str(m.var_value))
elif g == 'low':
labels.append('L ' + str(m.var_value))
else:
labels.append('M ' + str(m.var_value))
var_label = m.var_label
vals = []
for result in repeat_results:
for subscore in result[j]:
vals.append(subscore[i][scoreindex])
boxes.append(vals)
plt.figure()
plt.boxplot(boxes, labels=labels, vert=False, colors=plt.colors[:len(models)])
plt.ylabel(var_label)
plt.title("Cross-validation: n={} k={}".format(n, k))
if scoreindex == 0:
plt.xlabel("End Survival Rate")
elif scoreindex == 1:
plt.xlabel("Median Survival Time")
elif scoreindex == 2:
plt.xlabel("Group size")
elif scoreindex == 3:
plt.xlabel("Last event time")
# In[ ]:
plot_score(repeat_results, models, 0)
# In[ ]:
plot_score(repeat_results, models, 1)
# In[ ]:
plot_score(repeat_results, models, 2)
# In[ ]:
plot_score(repeat_results, models, 3)
# In[ ]:
net = netgen(d)
| gpl-3.0 |
ndingwall/scikit-learn | examples/manifold/plot_mds.py | 17 | 2766 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# License: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
EPSILON = np.finfo(np.float32).eps
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / (similarities + EPSILON) * 100
np.fill_diagonal(similarities, 0)
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(np.full(len(segments), 0.5))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
Eric89GXL/scipy | scipy/stats/_binned_statistic.py | 5 | 25939 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import callable, xrange
from scipy._lib._numpy_compat import suppress_warnings
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]),
array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]),
array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, density=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])
>>> ret.statistic
array([[ 2., 1.],
[ 1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `sample`, or a list of sequences - each with the
same shape as `sample`. If `values` is such a list, the statistic
will be computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std','min','max']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if(statistic != 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
smin = np.zeros(Ndim)
smax = np.zeros(Ndim)
for i in xrange(Ndim):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in xrange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in xrange(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into, in each dimension
sampBin = [
np.digitize(sample[:, i], edges[i])
for i in xrange(Ndim)
]
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in xrange(Ndim):
# Find the rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
binnumbers = np.ravel_multi_index(sampBin, nbin)
result = np.empty([Vdim, nbin.prod()], float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
flatsum2 = np.bincount(binnumbers, values[vv] ** 2)
result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -
(flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic == 'sum':
result.fill(0)
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.median(values[vv, binnumbers == i])
elif statistic == 'min':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.min(values[vv, binnumbers == i])
elif statistic == 'max':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.max(values[vv, binnumbers == i])
elif callable(statistic):
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
sup.filter(RuntimeWarning)
try:
null = statistic([])
except Exception:
null = np.nan
result.fill(null)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = statistic(values[vv, binnumbers == i])
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, nbin))
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = tuple([slice(None)] + Ndim * [slice(1, -1)])
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if(expand_binnumbers and Ndim > 1):
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`reulst`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers)
| bsd-3-clause |
flightgong/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
zfrenchee/pandas | pandas/tests/plotting/test_deprecated.py | 2 | 1528 | # coding: utf-8
import string
import pandas as pd
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import pytest
from numpy.random import randn
import pandas.tools.plotting as plotting
from pandas.tests.plotting.common import TestPlotBase
"""
Test cases for plot functions imported from deprecated
pandas.tools.plotting
"""
@td.skip_if_no_mpl
class TestDeprecatedNameSpace(TestPlotBase):
@pytest.mark.slow
@td.skip_if_no_scipy
def test_scatter_plot_legacy(self):
df = pd.DataFrame(randn(100, 2))
with tm.assert_produces_warning(FutureWarning):
plotting.scatter_matrix(df)
with tm.assert_produces_warning(FutureWarning):
pd.scatter_matrix(df)
@pytest.mark.slow
def test_boxplot_deprecated(self):
df = pd.DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
with tm.assert_produces_warning(FutureWarning):
plotting.boxplot(df, column=['one', 'two'],
by='indic')
@pytest.mark.slow
def test_radviz_deprecated(self):
df = self.iris
with tm.assert_produces_warning(FutureWarning):
plotting.radviz(frame=df, class_column='Name')
@pytest.mark.slow
def test_plot_params(self):
with tm.assert_produces_warning(FutureWarning):
pd.plot_params['xaxis.compat'] = True
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_cont_fullelines/MoreLines.py | 3 | 7227 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111]
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("More Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('MoreLines1.pdf')
plt.clf()
| gpl-2.0 |
gaocegege/treadmill | treadmill/reports.py | 3 | 5262 | """Handles reports over scheduler data."""
import time
import datetime
import itertools
import logging
import numpy as np
import pandas as pd
from functools import reduce
_LOGGER = logging.getLogger(__name__)
def servers(cell):
"""Returns dataframe for servers hierarchy."""
def _server_row(server):
"""Converts server object to dict used to construct dataframe row.
"""
row = {
'name': server.name,
'memory': server.init_capacity[0],
'cpu': server.init_capacity[1],
'disk': server.init_capacity[2],
'traits': server.traits.traits,
'free.memory': server.free_capacity[0],
'free.cpu': server.free_capacity[1],
'free.disk': server.free_capacity[2],
'state': server.state.value,
'valid_until': server.valid_until
}
node = server.parent
while node:
row[node.level] = node.name
node = node.parent
return row
frame = pd.DataFrame.from_dict([
_server_row(server) for server in cell.members().values()
])
if frame.empty:
frame = pd.DataFrame(columns=['name', 'memory', 'cpu', 'disk',
'free.memory', 'free.cpu', 'free.disk',
'state'])
for col in ['valid_until']:
frame[col] = pd.to_datetime(frame[col], unit='s')
return frame.set_index('name')
def allocations(cell):
"""Converts cell allocations into dataframe row."""
def _leafs(path, alloc):
"""Generate leaf allocations - (path, alloc) tuples."""
if not alloc.sub_allocations:
return iter([('/'.join(path), alloc)])
else:
def _chain(acc, item):
"""Chains allocation iterators."""
name, suballoc = item
return itertools.chain(acc, _leafs(path + [name], suballoc))
return reduce(_chain, iter(alloc.sub_allocations.items()), [])
def _alloc_row(label, name, alloc):
"""Converts allocation to dict/dataframe row."""
if not name:
name = 'root'
if not label:
label = '-'
return {
'label': label,
'name': name,
'memory': alloc.reserved[0],
'cpu': alloc.reserved[1],
'disk': alloc.reserved[2],
'rank': alloc.rank,
'traits': alloc.traits,
'max_utilization': alloc.max_utilization,
}
all_allocs = []
for label, partition in cell.partitions.items():
allocation = partition.allocation
leaf_allocs = _leafs([], allocation)
alloc_df = pd.DataFrame.from_dict(
[_alloc_row(label, name, alloc) for name, alloc in leaf_allocs]
).set_index(['label', 'name'])
all_allocs.append(alloc_df)
return pd.concat(all_allocs)
def apps(cell):
"""Return application queue and app details as dataframe."""
def _app_row(item):
"""Converts app queue item into dict for dataframe row."""
rank, util, pending, order, app = item
return {
'instance': app.name,
'affinity': app.affinity.name,
'allocation': app.allocation.name,
'rank': rank,
'label': app.allocation.label,
'util': util,
'pending': pending,
'order': order,
'identity_group': app.identity_group,
'identity': app.identity,
'memory': app.demand[0],
'cpu': app.demand[1],
'disk': app.demand[2],
'lease': app.lease,
'expires': app.placement_expiry,
'data_retention_timeout': app.data_retention_timeout,
'server': app.server
}
queue = []
for partition in cell.partitions.values():
allocation = partition.allocation
queue += allocation.utilization_queue(cell.size(allocation.label))
frame = pd.DataFrame.from_dict([_app_row(item) for item in queue])
if frame.empty:
return frame
for col in ['expires']:
frame[col] = pd.to_datetime(frame[col], unit='s')
for col in ['lease', 'data_retention_timeout']:
frame[col] = pd.to_timedelta(frame[col], unit='s')
return frame.set_index('instance')
def utilization(prev_utilization, apps_df):
"""Returns dataseries describing cell utilization.
prev_utilization - utilization dataframe before current.
apps - app queue dataframe.
"""
# Passed by ref.
row = apps_df.reset_index()
if row.empty:
return row
row['count'] = 1
row['name'] = row['instance'].apply(lambda x: x.split('#')[0])
row = row.groupby('name').agg({'cpu': np.sum,
'memory': np.sum,
'disk': np.sum,
'count': np.sum,
'util': np.max})
row = row.stack()
dt_now = datetime.datetime.fromtimestamp(time.time())
current = pd.DataFrame([row], index=pd.DatetimeIndex([dt_now]))
if prev_utilization is None:
return current
else:
return prev_utilization.append(current)
| apache-2.0 |
matthew-tucker/mne-python | mne/tests/test_evoked.py | 5 | 16593 | # Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Andrew Dykstra <[email protected]>
# Mads Jensen <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
from copy import deepcopy
import warnings
import numpy as np
from scipy import fftpack
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
from nose.tools import assert_true, assert_raises, assert_not_equal
from mne import (equalize_channels, pick_types, read_evokeds, write_evokeds,
grand_average, combine_evoked)
from mne.evoked import _get_peak, EvokedArray
from mne.epochs import EpochsArray
from mne.utils import (_TempDir, requires_pandas, slow_test,
requires_scipy_version)
from mne.io.meas_info import create_info
from mne.externals.six.moves import cPickle as pickle
warnings.simplefilter('always')
fname = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
'test-ave.fif')
fname_gz = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data',
'test-ave.fif.gz')
@requires_scipy_version('0.14')
def test_savgol_filter():
"""Test savgol filtering
"""
h_freq = 10.
evoked = read_evokeds(fname, 0)
freqs = fftpack.fftfreq(len(evoked.times), 1. / evoked.info['sfreq'])
data = np.abs(fftpack.fft(evoked.data))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
assert_raises(ValueError, evoked.savgol_filter, evoked.info['sfreq'])
evoked.savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(evoked.data))
# decent in pass-band
assert_allclose(np.mean(data[:, match_mask], 0),
np.mean(data_filt[:, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert_true(np.mean(data[:, mismatch_mask]) >
np.mean(data_filt[:, mismatch_mask]) * 5)
def test_hash_evoked():
"""Test evoked hashing
"""
ave = read_evokeds(fname, 0)
ave_2 = read_evokeds(fname, 0)
assert_equal(hash(ave), hash(ave_2))
# do NOT use assert_equal here, failing output is terrible
assert_true(pickle.dumps(ave) == pickle.dumps(ave_2))
ave_2.data[0, 0] -= 1
assert_not_equal(hash(ave), hash(ave_2))
@slow_test
def test_io_evoked():
"""Test IO for evoked data (fif + gz) with integer and str args
"""
tempdir = _TempDir()
ave = read_evokeds(fname, 0)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
# This not being assert_array_equal due to windows rounding
assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-3))
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
assert_true(repr(ave))
# test compressed i/o
ave2 = read_evokeds(fname_gz, 0)
assert_true(np.allclose(ave.data, ave2.data, atol=1e-16, rtol=1e-8))
# test str access
condition = 'Left Auditory'
assert_raises(ValueError, read_evokeds, fname, condition, kind='stderr')
assert_raises(ValueError, read_evokeds, fname, condition,
kind='standard_error')
ave3 = read_evokeds(fname, condition)
assert_array_almost_equal(ave.data, ave3.data, 19)
# test read_evokeds and write_evokeds
types = ['Left Auditory', 'Right Auditory', 'Left visual', 'Right visual']
aves1 = read_evokeds(fname)
aves2 = read_evokeds(fname, [0, 1, 2, 3])
aves3 = read_evokeds(fname, types)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), aves1)
aves4 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))
for aves in [aves2, aves3, aves4]:
for [av1, av2] in zip(aves1, aves):
assert_array_almost_equal(av1.data, av2.data)
assert_array_almost_equal(av1.times, av2.times)
assert_equal(av1.nave, av2.nave)
assert_equal(av1.kind, av2.kind)
assert_equal(av1._aspect_kind, av2._aspect_kind)
assert_equal(av1.last, av2.last)
assert_equal(av1.first, av2.first)
assert_equal(av1.comment, av2.comment)
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fname2 = op.join(tempdir, 'test-bad-name.fif')
write_evokeds(fname2, ave)
read_evokeds(fname2)
assert_true(len(w) == 2)
def test_shift_time_evoked():
""" Test for shifting of time scale
"""
tempdir = _TempDir()
# Shift backward
ave = read_evokeds(fname, 0)
ave.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
# Shift forward twice the amount
ave_bshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_bshift.shift_time(0.2, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_bshift)
# Shift backward again
ave_fshift = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_fshift.shift_time(-0.1, relative=True)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave_fshift)
ave_normal = read_evokeds(fname, 0)
ave_relative = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_true(np.allclose(ave_normal.data, ave_relative.data,
atol=1e-16, rtol=1e-3))
assert_array_almost_equal(ave_normal.times, ave_relative.times, 10)
assert_equal(ave_normal.last, ave_relative.last)
assert_equal(ave_normal.first, ave_relative.first)
# Absolute time shift
ave = read_evokeds(fname, 0)
ave.shift_time(-0.3, relative=False)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_absolute = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
assert_true(np.allclose(ave_normal.data, ave_absolute.data,
atol=1e-16, rtol=1e-3))
assert_equal(ave_absolute.first, int(-0.3 * ave.info['sfreq']))
def test_evoked_resample():
"""Test for resampling of evoked data
"""
tempdir = _TempDir()
# upsample, write it out, read it in
ave = read_evokeds(fname, 0)
sfreq_normal = ave.info['sfreq']
ave.resample(2 * sfreq_normal)
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), ave)
ave_up = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
# compare it to the original
ave_normal = read_evokeds(fname, 0)
# and compare the original to the downsampled upsampled version
ave_new = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 0)
ave_new.resample(sfreq_normal)
assert_array_almost_equal(ave_normal.data, ave_new.data, 2)
assert_array_almost_equal(ave_normal.times, ave_new.times)
assert_equal(ave_normal.nave, ave_new.nave)
assert_equal(ave_normal._aspect_kind, ave_new._aspect_kind)
assert_equal(ave_normal.kind, ave_new.kind)
assert_equal(ave_normal.last, ave_new.last)
assert_equal(ave_normal.first, ave_new.first)
# for the above to work, the upsampling just about had to, but
# we'll add a couple extra checks anyway
assert_true(len(ave_up.times) == 2 * len(ave_normal.times))
assert_true(ave_up.data.shape[1] == 2 * ave_normal.data.shape[1])
def test_evoked_detrend():
"""Test for detrending evoked data
"""
ave = read_evokeds(fname, 0)
ave_normal = read_evokeds(fname, 0)
ave.detrend(0)
ave_normal.data -= np.mean(ave_normal.data, axis=1)[:, np.newaxis]
picks = pick_types(ave.info, meg=True, eeg=True, exclude='bads')
assert_true(np.allclose(ave.data[picks], ave_normal.data[picks],
rtol=1e-8, atol=1e-16))
@requires_pandas
def test_to_data_frame():
"""Test evoked Pandas exporter"""
ave = read_evokeds(fname, 0)
assert_raises(ValueError, ave.to_data_frame, picks=np.arange(400))
df = ave.to_data_frame()
assert_true((df.columns == ave.ch_names).all())
df = ave.to_data_frame(index=None).reset_index('time')
assert_true('time' in df.columns)
assert_array_equal(df.values[:, 1], ave.data[0] * 1e13)
assert_array_equal(df.values[:, 3], ave.data[2] * 1e15)
def test_evoked_proj():
"""Test SSP proj operations
"""
for proj in [True, False]:
ave = read_evokeds(fname, condition=0, proj=proj)
assert_true(all(p['active'] == proj for p in ave.info['projs']))
# test adding / deleting proj
if proj:
assert_raises(ValueError, ave.add_proj, [],
{'remove_existing': True})
assert_raises(ValueError, ave.del_proj, 0)
else:
projs = deepcopy(ave.info['projs'])
n_proj = len(ave.info['projs'])
ave.del_proj(0)
assert_true(len(ave.info['projs']) == n_proj - 1)
ave.add_proj(projs, remove_existing=False)
assert_true(len(ave.info['projs']) == 2 * n_proj - 1)
ave.add_proj(projs, remove_existing=True)
assert_true(len(ave.info['projs']) == n_proj)
ave = read_evokeds(fname, condition=0, proj=False)
data = ave.data.copy()
ave.apply_proj()
assert_allclose(np.dot(ave._projector, data), ave.data)
def test_get_peak():
"""Test peak getter
"""
evoked = read_evokeds(fname, condition=0, proj=True)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=1)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmax=0.9)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', tmin=0.02,
tmax=0.01)
assert_raises(ValueError, evoked.get_peak, ch_type='mag', mode='foo')
assert_raises(RuntimeError, evoked.get_peak, ch_type=None, mode='foo')
assert_raises(ValueError, evoked.get_peak, ch_type='misc', mode='foo')
ch_idx, time_idx = evoked.get_peak(ch_type='mag')
assert_true(ch_idx in evoked.ch_names)
assert_true(time_idx in evoked.times)
ch_idx, time_idx = evoked.get_peak(ch_type='mag',
time_as_index=True)
assert_true(time_idx < len(evoked.times))
data = np.array([[0., 1., 2.],
[0., -3., 0]])
times = np.array([.1, .2, .3])
ch_idx, time_idx = _get_peak(data, times, mode='abs')
assert_equal(ch_idx, 1)
assert_equal(time_idx, 1)
ch_idx, time_idx = _get_peak(data * -1, times, mode='neg')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
ch_idx, time_idx = _get_peak(data, times, mode='pos')
assert_equal(ch_idx, 0)
assert_equal(time_idx, 2)
assert_raises(ValueError, _get_peak, data + 1e3, times, mode='neg')
assert_raises(ValueError, _get_peak, data - 1e3, times, mode='pos')
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
evoked = read_evokeds(fname, condition=0, proj=True)
drop_ch = evoked.ch_names[:3]
ch_names = evoked.ch_names[3:]
ch_names_orig = evoked.ch_names
dummy = evoked.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.drop_channels(drop_ch)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
evoked = read_evokeds(fname, condition=0, proj=True)
ch_names = evoked.ch_names[:3]
ch_names_orig = evoked.ch_names
dummy = evoked.pick_channels(ch_names, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, evoked.ch_names)
assert_equal(len(ch_names_orig), len(evoked.data))
evoked.pick_channels(ch_names)
assert_equal(ch_names, evoked.ch_names)
assert_equal(len(ch_names), len(evoked.data))
evoked = read_evokeds(fname, condition=0, proj=True)
assert_true('meg' in evoked)
assert_true('eeg' in evoked)
evoked.pick_types(meg=False, eeg=True)
assert_true('meg' not in evoked)
assert_true('eeg' in evoked)
assert_true(len(evoked.ch_names) == 60)
def test_equalize_channels():
"""Test equalization of channels
"""
evoked1 = read_evokeds(fname, condition=0, proj=True)
evoked2 = evoked1.copy()
ch_names = evoked1.ch_names[2:]
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
my_comparison = [evoked1, evoked2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_evoked_arithmetic():
"""Test evoked arithmetic
"""
ev = read_evokeds(fname, condition=0)
ev1 = EvokedArray(np.ones_like(ev.data), ev.info, ev.times[0], nave=20)
ev2 = EvokedArray(-np.ones_like(ev.data), ev.info, ev.times[0], nave=10)
# combine_evoked([ev1, ev2]) should be the same as ev1 + ev2:
# data should be added according to their `nave` weights
# nave = ev1.nave + ev2.nave
ev = ev1 + ev2
assert_equal(ev.nave, ev1.nave + ev2.nave)
assert_allclose(ev.data, 1. / 3. * np.ones_like(ev.data))
ev = ev1 - ev2
assert_equal(ev.nave, ev1.nave + ev2.nave)
assert_equal(ev.comment, ev1.comment + ' - ' + ev2.comment)
assert_allclose(ev.data, np.ones_like(ev1.data))
# default comment behavior if evoked.comment is None
old_comment1 = ev1.comment
old_comment2 = ev2.comment
ev1.comment = None
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
ev = ev1 - ev2
assert_equal(ev.comment, 'unknown')
ev1.comment = old_comment1
ev2.comment = old_comment2
# equal weighting
ev = combine_evoked([ev1, ev2], weights='equal')
assert_allclose(ev.data, np.zeros_like(ev1.data))
# combine_evoked([ev1, ev2], weights=[1, 0]) should yield the same as ev1
ev = combine_evoked([ev1, ev2], weights=[1, 0])
assert_equal(ev.nave, ev1.nave)
assert_allclose(ev.data, ev1.data)
# simple subtraction (like in oddball)
ev = combine_evoked([ev1, ev2], weights=[1, -1])
assert_allclose(ev.data, 2 * np.ones_like(ev1.data))
assert_raises(ValueError, combine_evoked, [ev1, ev2], weights='foo')
assert_raises(ValueError, combine_evoked, [ev1, ev2], weights=[1])
# grand average
evoked1, evoked2 = read_evokeds(fname, condition=[0, 1], proj=True)
ch_names = evoked1.ch_names[2:]
evoked1.info['bads'] = ['EEG 008'] # test interpolation
evoked1.drop_channels(evoked1.ch_names[:1])
evoked2.drop_channels(evoked2.ch_names[1:2])
gave = grand_average([evoked1, evoked2])
assert_equal(gave.data.shape, [len(ch_names), evoked1.data.shape[1]])
assert_equal(ch_names, gave.ch_names)
assert_equal(gave.nave, 2)
def test_array_epochs():
"""Test creating evoked from array
"""
tempdir = _TempDir()
# creating
rng = np.random.RandomState(42)
data1 = rng.randn(20, 60)
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
evoked1 = EvokedArray(data1, info, tmin=-0.01)
# save, read, and compare evokeds
tmp_fname = op.join(tempdir, 'evkdary-ave.fif')
evoked1.save(tmp_fname)
evoked2 = read_evokeds(tmp_fname)[0]
data2 = evoked2.data
assert_allclose(data1, data2)
assert_allclose(evoked1.times, evoked2.times)
assert_equal(evoked1.first, evoked2.first)
assert_equal(evoked1.last, evoked2.last)
assert_equal(evoked1.kind, evoked2.kind)
assert_equal(evoked1.nave, evoked2.nave)
# now compare with EpochsArray (with single epoch)
data3 = data1[np.newaxis, :, :]
events = np.c_[10, 0, 1]
evoked3 = EpochsArray(data3, info, events=events, tmin=-0.01).average()
assert_allclose(evoked1.data, evoked3.data)
assert_allclose(evoked1.times, evoked3.times)
assert_equal(evoked1.first, evoked3.first)
assert_equal(evoked1.last, evoked3.last)
assert_equal(evoked1.kind, evoked3.kind)
assert_equal(evoked1.nave, evoked3.nave)
# test match between channels info and data
ch_names = ['EEG %03d' % (i + 1) for i in range(19)]
types = ['eeg'] * 19
info = create_info(ch_names, sfreq, types)
assert_raises(ValueError, EvokedArray, data1, info, tmin=-0.01)
| bsd-3-clause |
dssg/wikienergy | disaggregator/build/pandas/pandas/core/groupby.py | 1 | 123222 | import types
from functools import wraps
import numpy as np
import datetime
import collections
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly, Appender, make_signature
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'irow', 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'value_counts', 'unique', 'nunique',
'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
def _count_compat(x, axis=0):
return x.count() # .size != .count(); count excludes nan
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / freqency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to 0
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
>>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A')
>>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
>>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
specify a resample on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=0, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
self.grouper, exclusions, self.obj = _get_grouper(self.obj, [self.key], axis=self.axis,
level=self.level, sort=self.sort)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax.get_level_values(level), name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif isinstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample = next(iter(self.indices))
if isinstance(sample, tuple):
if not isinstance(name, tuple):
msg = ("must supply a tuple to get_group with multiple"
" grouping keys")
raise ValueError(msg)
if not len(name) == len(sample):
try:
# If the original grouper was a tuple
return self.indices[name]
except KeyError:
# turns out it wasn't a tuple
msg = ("must supply a a same-length tuple to get_group"
" with multiple grouping keys")
raise ValueError(msg)
name = tuple([ convert(n, k) for n, k in zip(name,sample) ])
else:
name = convert(name, sample)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings
if g.level is None and g.in_axis]
if len(groupers):
self._group_selection = ax.difference(Index(groupers)).tolist()
def _set_result_index_ordered(self, result):
# set the result index on the passed values object
# return the new object
# related 8046
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate([ indices[v] for v in self.grouper.result_index ]))
result.index = index
result = result.sort_index()
result.index = self.obj.index
return result
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._apply_whitelist)))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis or kwargs_with_axis['axis']==None:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
_count = _groupby_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().astype('int64')
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Parameters
----------
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, (set, list, tuple)):
nth_values = list(set(n))
if dropna is not None:
raise ValueError("dropna option with a list of nth values is not supported")
else:
raise TypeError("n needs to be an int or a list/set/tuple of ints")
m = self.grouper._max_groupsize
# filter out values that are outside [-m, m)
pos_nth_values = [i for i in nth_values if i >= 0 and i < m]
neg_nth_values = [i for i in nth_values if i < 0 and i >= -m]
self._set_selection_from_grouper()
if not dropna: # good choice
if not pos_nth_values and not neg_nth_values:
# no valid nth values
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
for i in pos_nth_values:
rng[i] = True
is_nth = self._cumcount_array(rng)
if neg_nth_values:
rng = np.zeros(m, dtype=bool)
for i in neg_nth_values:
rng[- i - 1] = True
is_nth |= self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).tail(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.value_counts(labels, sort=False)
bin_counts = bin_counts.reindex(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def is_monotonic(self):
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(all_labels)
labs, uniques = algos.factorize(tups)
if self.sort:
uniques, labs = _reorder_by_uniques(uniques, labs)
return labs, uniques
else:
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(len(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
recons = self.get_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def get_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _get_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def get_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return getattr(_algos, fname, None)
ftype = self._cython_functions[how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.astype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups and not counts.all():
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
com._ensure_object(result),
(counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._get_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _get_group_index_sorter(group_index, ngroups)
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self):
# for compat
return self.bins, self.binlabels, self.ngroups
@cache_readonly
def ngroups(self):
return len(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Series(np.zeros(len(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = len(v)
bin_counts = Series(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.astype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'mean': 'group_mean_bin',
'min': 'group_min_bin',
'max': 'group_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._get_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
in_axis : if the Grouping is a column in self.obj and hence among
Groupby.exclusions list
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True, in_axis=False):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
self.in_axis = in_axis
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
self._was_factor = True
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif isinstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there any way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.categories
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not isinstance(self.grouper, (Series, Index, np.ndarray)):
if getattr(self.grouper,'ndim', 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError("Grouper for '%s' not 1-dimensional" % t)
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if getattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
return _groupby_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not call this method grouping by level')
else:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.groupby(self.grouper)
return self._groups
def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_length = len(keys) == len(group_axis)
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if (not any_callable and not all_in_columns
and not any_arraylike and match_axis_length
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
try:
obj._data.items.get_loc(key)
except Exception:
return False
return True
# if the the grouper is obj[name]
def is_in_obj(gpr):
try:
return id(gpr) == id(obj[gpr.name])
except Exception:
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
else:
in_axis, name = False, None
if isinstance(gpr, Categorical) and len(gpr) != len(obj):
raise ValueError("Categorical grouper must have len(grouper) == len(data)")
ping = Grouping(group_axis, gpr, obj=obj, name=name,
level=level, sort=sort, in_axis=in_axis)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return isinstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindex(axis).values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
def _whitelist_method_generator(klass, whitelist) :
"""
Yields all GroupBy member defs for DataFrame/Series names in _whitelist.
Parameters
----------
klass - class where members are defined. Should be Series or DataFrame
whitelist - list of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
method_wrapper_template = \
"""def %(name)s(%(sig)s) :
\"""
%(doc)s
\"""
f = %(self)s.__getattr__('%(name)s')
return f(%(args)s)"""
property_wrapper_template = \
"""@property
def %(name)s(self) :
\"""
%(doc)s
\"""
return self.__getattr__('%(name)s')"""
for name in whitelist :
# don't override anything that was explicitly defined
# in the base class
if hasattr(GroupBy,name) :
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass,name)
doc = f.__doc__
doc = doc if type(doc)==str else ''
if type(f) == types.MethodType :
wrapper_template = method_wrapper_template
decl, args = make_signature(f)
# pass args by name to f because otherwise
# GroupBy._make_wrapper won't know whether
# we passed in an axis parameter.
args_by_name = ['{0}={0}'.format(arg) for arg in args[1:]]
params = {'name':name,
'doc':doc,
'sig':','.join(decl),
'self':args[0],
'args':','.join(args_by_name)}
else :
wrapper_template = property_wrapper_template
params = {'name':name, 'doc':doc}
yield wrapper_template % params
class SeriesGroupBy(GroupBy):
#
# Make class defs of attributes on SeriesGroupBy whitelist
_apply_whitelist = _series_apply_whitelist
for _def_str in _whitelist_method_generator(Series,_series_apply_whitelist) :
exec(_def_str)
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return DataFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
return DataFrame(values, index=index).stack()
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
# if string function
if isinstance(func, compat.string_types):
return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs))
# do we have a cython function
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
return self._transform_fast(cyfunc)
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self,func)
values = func().values
counts = self.count().values
values = np.repeat(values, com._ensure_platform_int(counts))
return self._set_result_index_ordered(Series(values))
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) if true_and_notnull(group) else []
for name, group in self]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.get_numeric_data(copy=False)
for block in data.blocks:
values = block._try_operate(block.values)
if block.is_numeric:
values = _algos.ensure_float64(values)
result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if isinstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if any(isinstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
arg = new_arg
keys = []
if self._selection is not None:
subset = obj
if isinstance(subset, DataFrame):
raise NotImplementedError
for fname, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(subset, selection=self._selection,
grouper=self.grouper)
result[fname] = colg.aggregate(agg_how)
keys.append(fname)
else:
for col, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
result[col] = colg.aggregate(agg_how)
keys.append(col)
if isinstance(list(result.values())[0], DataFrame):
from pandas.tools.merge import concat
result = concat([result[k] for k in keys], keys=keys, axis=1)
else:
result = DataFrame(result)
elif isinstance(arg, list):
return self._aggregate_multiple_funcs(arg)
else:
cyfunc = _intercept_cython(arg)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs([arg])
result.columns = Index(result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
return result.convert_objects()
def _aggregate_multiple_funcs(self, arg):
from pandas.tools.merge import concat
if self.axis != 0:
raise NotImplementedError
obj = self._obj_with_exclusions
results = []
keys = []
for col in obj:
try:
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
result = concat(results, keys=keys, axis=1)
return result
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
# for name in self.indices:
# data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors=None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors=e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
# XXX
return DataFrame({})
key_names = self.grouper.names
if isinstance(values[0], DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
if com._count_not_none(*values) != len(values):
v = next(v for v in values if v is not None)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.tools.merge import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
keys, values, not_indexed_same=True,
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if isinstance(v.index, MultiIndex) or key_index is None:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values, index=key_index,
columns=index)
else:
# GH5788 instead of stacking; concat gets the dtypes correct
from pandas.tools.merge import concat
result = concat(values, keys=key_index,
names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values.T, index=v.index,
columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
if (self._selected_obj.ndim == 2
and self._selected_obj.dtypes.isin(_DATELIKE_DTYPES).any()):
cd = 'coerce'
else:
cd = True
return result.convert_objects(convert_dates=cd)
else:
# only coerce dates if we find at least 1 datetime
cd = 'coerce' if any([ isinstance(v,Timestamp) for v in values ]) else False
return Series(values, index=key_index).convert_objects(convert_dates=cd)
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.tools.merge import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except Exception: # pragma: no cover
res = fast_path(group)
path = fast_path
else:
res = path(group)
# broadcasting
if isinstance(res, Series):
if res.index.is_(obj.index):
group.T.values[:] = res
else:
group.values[:] = res
applied.append(group)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
return self._set_result_index_ordered(concatenated)
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed DataFrame on each group and
return a DataFrame having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each subframe
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
"""
# try to do a fast transform via merge if possible
try:
obj = self._obj_with_exclusions
if isinstance(func, compat.string_types):
result = getattr(self, func)(*args, **kwargs)
else:
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
result = getattr(self, cyfunc)()
else:
return self._transform_general(func, *args, **kwargs)
except:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
# a grouped that doesn't preserve the index, remap index based on the grouper
# and broadcast it
if ((not isinstance(obj.index,MultiIndex) and
type(result.index) != type(obj.index)) or
len(result.index) != len(obj.index)):
results = obj.values.copy()
indices = self.indices
for (name, group), (i, row) in zip(self, result.iterrows()):
indexer = indices[name]
results[indexer] = np.tile(row.values,len(indexer)).reshape(len(indexer),-1)
return DataFrame(results,columns=result.columns,index=obj.index).convert_objects()
# we can merge the result in
# GH 7383
names = result.columns
result = obj.merge(result, how='outer', left_index=True, right_index=True).iloc[:,-result.shape[1]:]
result.columns = names
return result
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notnull(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0)
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if (isinstance(res, (bool, np.bool_)) or
np.isscalar(res) and isnull(res)):
if res and notnull(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._apply_filter(indices, dropna)
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = _dataframe_apply_whitelist
#
# Make class defs of attributes on DataFrameGroupBy whitelist.
for _def_str in _whitelist_method_generator(DataFrame,_apply_whitelist) :
exec(_def_str)
_block_agg_axis = 1
def __getitem__(self, key):
if self._selection is not None:
raise Exception('Column(s) %s already selected' % self._selection)
if isinstance(key, (list, tuple, Series, Index, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: %s"
% str(bad_keys)[1:-1])
return DataFrameGroupBy(self.obj, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
elif not self.as_index:
if key not in self.obj.columns:
raise KeyError("Column not found: %s" % key)
return DataFrameGroupBy(self.obj, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
else:
if key not in self.obj:
raise KeyError("Column not found: %s" % key)
# kind of a kludge
return SeriesGroupBy(self.obj[key], selection=key,
grouper=self.grouper,
exclusions=self.exclusions)
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if result:
if self.axis == 0:
result = DataFrame(result, index=obj.columns,
columns=result_index).T
else:
result = DataFrame(result, index=obj.index,
columns=result_index)
else:
result = DataFrame(result)
return result
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _insert_inaxis_grouper_inplace(self, result):
# zip in reverse so we can always insert at loc 0
izip = zip(* map(reversed, (
self.grouper.names,
self.grouper.get_group_levels(),
[grp.in_axis for grp in self.grouper.groupings])))
for name, lev, in_axis in izip:
if in_axis:
result.insert(0, name, lev)
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
self._insert_inaxis_grouper_inplace(result)
result = result.consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result).convert_objects()
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result.consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result).convert_objects()
def _reindex_output(self, result):
"""
if we have categorical groupers, then we want to make sure that
we have a fully reindex-output to the levels. These may have not participated in
the groupings (e.g. may have all been nan groups)
This can re-expand the output space
"""
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result
elif not any([ping._was_factor for ping in groupings]):
return result
levels_list = [ ping._group_index for ping in groupings ]
index = MultiIndex.from_product(levels_list, names=self.grouper.names)
d = { self.obj._get_axis_name(self.axis) : index, 'copy' : False }
return result.reindex(**d).sortlevel(axis=self.axis)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _apply_to_column_groupbys(self, func):
from pandas.tools.merge import concat
return concat(
(func(col_groupby) for _, col_groupby
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
from pandas.tools.plotting import boxplot_frame_groupby
DataFrameGroupBy.boxplot = boxplot_frame_groupby
class PanelGroupBy(NDFrameGroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self._selected_obj.items
else:
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
raise NotImplementedError
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def aggregate(self, arg, *args, **kwargs):
"""
Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a Panel or when passed to Panel.apply. If
pass a dict, the keys must be DataFrame column names
Returns
-------
aggregated : Panel
"""
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
return self._aggregate_generic(arg, *args, **kwargs)
def _wrap_generic_output(self, result, obj):
if self.axis == 0:
new_axes = list(obj.axes)
new_axes[0] = self.grouper.result_index
elif self.axis == 1:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, z, x]
else:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, y, x]
result = Panel._from_axes(result, new_axes)
if self.axis == 1:
result = result.swapaxes(0, 1).swapaxes(0, 2)
elif self.axis == 2:
result = result.swapaxes(0, 2)
return result
def _aggregate_item_by_item(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
result = {}
if self.axis > 0:
for item in obj:
try:
itemg = DataFrameGroupBy(obj[item],
axis=self.axis - 1,
grouper=self.grouper)
result[item] = itemg.aggregate(func, *args, **kwargs)
except (ValueError, TypeError):
raise
new_axes = list(obj.axes)
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
raise NotImplementedError
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
class NDArrayGroupBy(GroupBy):
pass
#----------------------------------------------------------------------
# Splitting / application
class DataSplitter(object):
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
self.labels = com._ensure_int64(labels)
self.ngroups = ngroups
self.axis = axis
@cache_readonly
def slabels(self):
# Sorted labels
return com.take_nd(self.labels, self.sort_idx, allow_fill=False)
@cache_readonly
def sort_idx(self):
# Counting sort indexer
return _get_group_index_sorter(self.labels, self.ngroups)
def __iter__(self):
sdata = self._get_sorted_data()
if self.ngroups == 0:
raise StopIteration
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for i, (start, end) in enumerate(zip(starts, ends)):
# Since I'm now compressing the group ids, it's now not "possible"
# to produce empty slices because such groups would not be observed
# in the data
# if start >= end:
# raise AssertionError('Start %s must be less than end %s'
# % (str(start), str(end)))
yield i, self._chop(sdata, slice(start, end))
def _get_sorted_data(self):
return self.data.take(self.sort_idx, axis=self.axis, convert=False)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def apply(self, f):
raise NotImplementedError
class ArraySplitter(DataSplitter):
pass
class SeriesSplitter(DataSplitter):
def _chop(self, sdata, slice_obj):
return sdata._get_values(slice_obj).to_dense()
class FrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(FrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
def fast_apply(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except:
# fails when all -1
return [], True
sdata = self._get_sorted_data()
results, mutated = lib.apply_frame_axis0(sdata, f, names, starts, ends)
return results, mutated
def _chop(self, sdata, slice_obj):
if self.axis == 0:
return sdata.iloc[slice_obj]
else:
return sdata._slice(slice_obj, axis=1) # ix[:, slice_obj]
class NDFrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(NDFrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
self.factory = data._constructor
def _get_sorted_data(self):
# this is the BlockManager
data = self.data._data
# this is sort of wasteful but...
sorted_axis = data.axes[self.axis].take(self.sort_idx)
sorted_data = data.reindex_axis(sorted_axis, axis=self.axis)
return sorted_data
def _chop(self, sdata, slice_obj):
return self.factory(sdata.get_slice(slice_obj, axis=self.axis))
def get_splitter(data, *args, **kwargs):
if isinstance(data, Series):
klass = SeriesSplitter
elif isinstance(data, DataFrame):
klass = FrameSplitter
else:
klass = NDFrameSplitter
return klass(data, *args, **kwargs)
#----------------------------------------------------------------------
# Misc utilities
def get_group_index(label_list, shape):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations.
"""
if len(label_list) == 1:
return label_list[0]
n = len(label_list[0])
group_index = np.zeros(n, dtype=np.int64)
mask = np.zeros(n, dtype=bool)
for i in range(len(shape)):
stride = np.prod([x for x in shape[i + 1:]], dtype=np.int64)
group_index += com._ensure_int64(label_list[i]) * stride
mask |= label_list[i] < 0
np.putmask(group_index, mask, -1)
return group_index
_INT64_MAX = np.iinfo(np.int64).max
def _int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
the_prod *= long(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def _indexer_from_factorized(labels, shape, compress=True):
if _int64_overflow_possible(shape):
indexer = np.lexsort(np.array(labels[::-1]))
return indexer
group_index = get_group_index(labels, shape)
if compress:
comp_ids, obs_ids = _compress_group_index(group_index)
max_group = len(obs_ids)
else:
comp_ids = group_index
max_group = com._long_prod(shape)
indexer = _get_group_index_sorter(comp_ids.astype(np.int64), max_group)
return indexer
def _lexsort_indexer(keys, orders=None, na_position='last'):
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
for key, order in zip(keys, orders):
# we are already a Categorical
if is_categorical_dtype(key):
c = key
# create the Categorical
else:
c = Categorical(key,ordered=True)
if na_position not in ['last','first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
n = len(c.categories)
codes = c.codes.copy()
mask = (c.codes == -1)
if order: # ascending
if na_position == 'last':
codes = np.where(mask, n, codes)
elif na_position == 'first':
codes += 1
else: # not order means descending
if na_position == 'last':
codes = np.where(mask, n, n-codes-1)
elif na_position == 'first':
codes = np.where(mask, 0, n-codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return _indexer_from_factorized(labels, shape)
def _nargsort(items, kind='quicksort', ascending=True, na_position='last'):
"""
This is intended to be a drop-in replacement for np.argsort which handles NaNs
It adds ascending and na_position parameters.
GH #6399, #5231
"""
# specially handle Categorical
if is_categorical_dtype(items):
return items.argsort(ascending=ascending)
items = np.asanyarray(items)
idx = np.arange(len(items))
mask = isnull(items)
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to na_position
if na_position == 'last':
indexer = np.concatenate([indexer, nan_idx])
elif na_position == 'first':
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
return indexer
class _KeyMapper(object):
"""
Ease my suffering. Map compressed group id -> key tuple
"""
def __init__(self, comp_ids, ngroups, labels, levels):
self.levels = levels
self.labels = labels
self.comp_ids = comp_ids.astype(np.int64)
self.k = len(labels)
self.tables = [_hash.Int64HashTable(ngroups) for _ in range(self.k)]
self._populate_tables()
def _populate_tables(self):
for labs, table in zip(self.labels, self.tables):
table.map(self.comp_ids, labs.astype(np.int64))
def get_key(self, comp_id):
return tuple(level[table.get_item(comp_id)]
for table, level in zip(self.tables, self.levels))
def _get_indices_dict(label_list, keys):
shape = list(map(len, keys))
ngroups = np.prod(shape)
group_index = get_group_index(label_list, shape)
sorter = _get_group_index_sorter(group_index, ngroups)
sorted_labels = [lab.take(sorter) for lab in label_list]
group_index = group_index.take(sorter)
return lib.indices_fast(sorter, group_index, keys, sorted_labels)
#----------------------------------------------------------------------
# sorting levels...cleverly?
def _get_group_index_sorter(group_index, ngroups):
"""
_algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby keys. This can be huge when doing multi-key groupby.
np.argsort(kind='mergesort') is O(count x log(count)) where count is the
length of the data-frame;
Both algorithms are `stable` sort and that is necessary for correctness of
groupby operations. e.g. consider:
df.groupby(key)[col].transform('first')
"""
count = len(group_index)
alpha = 0.0 # taking complexities literally; there may be
beta = 1.0 # some room for fine-tuning these parameters
if alpha + beta * ngroups < count * np.log(count):
sorter, _ = _algos.groupsort_indexer(com._ensure_int64(group_index),
ngroups)
return com._ensure_platform_int(sorter)
else:
return group_index.argsort(kind='mergesort')
def _compress_group_index(group_index, sort=True):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
table = _hash.Int64HashTable(min(1000000, len(group_index)))
group_index = com._ensure_int64(group_index)
# note, group labels come out ascending (ie, 1,2,3 etc)
comp_ids, obs_group_ids = table.get_labels_groupby(group_index)
if sort and len(obs_group_ids) > 0:
obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)
return comp_ids, obs_group_ids
def _reorder_by_uniques(uniques, labels):
# sorter is index where elements ought to go
sorter = uniques.argsort()
# reverse_indexer is where elements came from
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
# move labels to right locations (ie, unsort ascending labels)
labels = com.take_nd(reverse_indexer, labels, allow_fill=False)
np.putmask(labels, mask, -1)
# sort observed ids
uniques = com.take_nd(uniques, sorter, allow_fill=False)
return uniques, labels
_func_table = {
builtins.sum: np.sum,
builtins.max: np.max,
builtins.min: np.min
}
_cython_table = {
builtins.sum: 'sum',
builtins.max: 'max',
builtins.min: 'min',
np.sum: 'sum',
np.mean: 'mean',
np.prod: 'prod',
np.std: 'std',
np.var: 'var',
np.median: 'median',
np.max: 'max',
np.min: 'min'
}
def _intercept_function(func):
return _func_table.get(func, func)
def _intercept_cython(func):
return _cython_table.get(func)
def _groupby_indices(values):
return _algos.groupby_indices(_values_from_object(com._ensure_object(values)))
def numpy_groupby(data, labels, axis=0):
s = np.argsort(labels)
keys, inv = np.unique(labels, return_inverse=True)
i = inv.take(s)
groups_at = np.where(i != np.concatenate(([-1], i[:-1])))[0]
ordered_data = data.take(s, axis=axis)
group_sums = np.add.reduceat(ordered_data, groups_at, axis=axis)
return group_sums
| mit |
cogstat/cogstat | cogstat/cogstat_gui.py | 1 | 45612 | # -*- coding: utf-8 -*-
"""
GUI for CogStat.
"""
# Splash screen
import os
import sys
import importlib
from PyQt5 import QtGui, QtWidgets
from PyQt5.QtCore import Qt
app = QtWidgets.QApplication(sys.argv)
pixmap = QtGui.QPixmap(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources',
'CogStat splash screen.png'), 'PNG')
splash_screen = QtWidgets.QSplashScreen(pixmap)
splash_screen.show()
splash_screen.showMessage('', Qt.AlignBottom, Qt.white) # TODO find something else to make the splash visible
# go on with regular imports, etc.
from distutils.version import LooseVersion
import gettext
import logging
import os
import sys
import traceback
from urllib.request import urlopen
import webbrowser
from PyQt5 import QtCore, QtGui, QtWidgets, QtPrintSupport
from . import cogstat
from . import cogstat_dialogs
from . import cogstat_config as csc
csc.versions['cogstat'] = cogstat.__version__
from . import cogstat_util as cs_util
cs_util.app_devicePixelRatio = app.devicePixelRatio()
cs_util.get_versions()
logging.root.setLevel(logging.INFO)
importlib.reload(sys) # TODO why do we need this?
t = gettext.translation('cogstat', os.path.dirname(os.path.abspath(__file__))+'/locale/', [csc.language], fallback=True)
_ = t.gettext
rtl_lang = True if csc.language in ['he', 'fa', 'ar'] else False
broken_analysis = _('%s Oops, something went wrong, CogStat could not run the analysis. You may want to report it.') \
+ ' ' + _('Read more about how to report an issue <a href = "%s">here</a>.') \
% 'https://github.com/cogstat/cogstat/wiki/Report-a-bug'
class StatMainWindow(QtWidgets.QMainWindow):
"""
CogStat GUI.
"""
def __init__(self):
super(StatMainWindow, self).__init__() # TOD do we need super()?
self._init_UI()
# Check if all required components are installed
# TODO Maybe all these checking can be removed
missing_required_components, missing_recommended_components = self._check_installed_components()
if missing_required_components or missing_recommended_components:
QtWidgets.QMessageBox.critical(self, 'Incomplete installation', 'Install missing component(s): ' +
''.join([x+', ' for x in
missing_required_components+missing_recommended_components])[:-2] +
'.<br><br>' + '<a href = "https://github.com/cogstat/cogstat/wiki/'
'Installation">Visit the installation help page</a> to see how '
'to complete the installation.', QtWidgets.QMessageBox.Ok)
if missing_required_components:
sys.exit()
self.analysis_results = []
# analysis_result stores list of GuiResultPackages.
# It will be useful when we can rerun all the previous analysis in the GUI output
# At the moment no former results can be manipulated later
csc.output_type = 'gui' # For some GUI specific formatting
self.check_for_update()
# Only for testing
# self.open_file('cogstat/test/data/example_data.csv'); #self.compare_groups()
# self.open_file('cogstat/test/data/VA_test.csv')
# self.open_clipboard()
# self.print_data()
# self.explore_variable(['X'])
# self.explore_variable(['a'], freq=False)
# self.explore_variable_pair(['X', 'Y'])
# self.pivot([u'X'], row_names=[], col_names=[], page_names=[u'CONDITION', u'TIME3'], function='N')
# self.diffusion(error_name=['Error'], RT_name=['RT_sec'], participant_name=['Name'],
# condition_names=['Num1', 'Num2'])
# self.compare_variables(['X', 'Y'])
# self.compare_variables(['A', 'B', 'C1'])
# self.compare_variables(['D', 'E', 'F'])
# self.compare_variables()
# self.compare_variables(['a', 'b', 'c1', 'd', 'e', 'f', 'g', 'h'],
# factors=[['factor1', 2], ['factor2', 2], ['factor3', 2]])
# self.compare_variables([u'CONDITION', u'CONDITION2', u'CONDITION3'])
# self.compare_groups(['slope'], ['group'], ['slope_SE'], 25)
# self.compare_groups(['A'], ['G', 'H'])
# self.compare_groups(['X'], ['TIME', 'CONDITION'])
# self.compare_groups(['dep_nom'], ['g0', 'g1', 'g2', 'g3'])
# self.save_result_as()
# self.save_result_as(filename='CogStat analysis result.pdf')
def check_for_update(self):
"""Check for update, and if update is available, display a message box with the download link.
The version number is available in a plain text file, at the appropriate web address."""
try:
latest_version = urlopen('http://kognitiv.elte.hu/cogstat/version', timeout=3).read().decode('utf-8')
if LooseVersion(cogstat.__version__) < LooseVersion(latest_version):
QtWidgets.QMessageBox.about(self, _('Update available'),
_('New version is available.') + '<br><br>' +
_('You can download the new version<br>from the <a href = "%s">CogStat '
'download page</a>.') % 'http://www.cogstat.org/download.html')
except:
print("Couldn't check for update")
def _init_UI(self):
self.resize(830, 600)
self.setWindowTitle('CogStat')
# FIXME there could be issues if the __file__ path includes unicode chars
# e.g., see pixmap = QtGui.QPixmap(os.path.join(os.path.dirname(os.path.abspath(__file__)).decode('utf-8'),
# u'resources', u'CogStat splash screen.png'), 'PNG')
self.setWindowIcon(QtGui.QIcon(os.path.dirname(os.path.abspath(__file__)) + '/resources/CogStat.ico'))
if rtl_lang:
self.setLayoutDirection(QtCore.Qt.RightToLeft)
# Menus and commands
# The list will be used to construct the menus
# Items include the icon name, the menu name, the shortcuts and the function to call, add it to the toolbar
icon_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources', 'icons')
menu_commands = [
[_('&Data'),
['/icons8-folder.svg', _('&Open data file')+'...', _('Ctrl+O'), 'self.open_file',
True],
['/icons8-folder-eye.svg', _('Open d&emo data file')+'...', _('Ctrl+E'),
'self.open_demo_file',True],
['/icons8-paste.svg', _('&Paste data'), _('Ctrl+V'), 'self.open_clipboard', True],
['separator'],
['/icons8-filter.svg', _('&Filter outliers')+'...', _('Ctrl+L'),
'self.filter_outlier', True],
['separator'],
['/icons8-data-sheet.svg', _('&Display data'), _('Ctrl+D'), 'self.print_data', False],
['/icons8-data-sheet-check.svg', _('Display data &briefly'), _('Ctrl+B'),
'self._print_data_brief', True],
['toolbar separator']
],
[_('&Analysis'),
['/icons8-normal-distribution-histogram.svg', _('&Explore variable')+'...',
_('Ctrl+1'), 'self.explore_variable', True],
['/icons8-scatter-plot.svg', _('Explore relation of variable &pair')+'...',
_('Ctrl+2'), 'self.explore_variable_pair', True],
['separator'],
['/icons8-pivot-table.svg', _('Pivot &table')+'...', 'Ctrl+T', 'self.pivot', True],
['/icons8-electrical-threshold.svg', _('Behavioral data &diffusion analysis') +
'...', 'Ctrl+Shift+D', 'self.diffusion', True],
['separator'],
['/icons8-combo-chart.svg', _('Compare repeated measures va&riables')+'...',
'Ctrl+R', 'self.compare_variables', True],
['/icons8-bar-chart.svg', _('Compare &groups')+'...', 'Ctrl+G',
'self.compare_groups', True],
['toolbar separator']
],
[_('&Results'),
['/icons8-file.svg', _('&Clear results'), _('Ctrl+Del'), 'self.delete_output', True],
['/icons8-search.svg', _('&Find text'), _('Ctrl+F'), 'self.find_text', True],
['separator'],
['/icons8-zoom-in.svg', _('&Increase text size'), _('Ctrl++'), 'self.zoom_in', True],
['/icons8-zoom-out.svg', _('&Decrease text size'), _('Ctrl+-'), 'self.zoom_out',
True],
#['', _('Reset &zoom'), _('Ctrl+0'), _(''), 'self.zoom_reset'],
# TODO how can we reset to 100%?
['/icons8-edit-file.svg', _('Text is &editable'), _('Ctrl+Shift+E'),
'self.text_editable', False],
['separator'],
['/icons8-pdf.svg', _('&Save results'), _('Ctrl+P'), 'self.save_result', False],
['/icons8-pdf-edit.svg', _('Save results &as')+'...', _('Ctrl+Shift+P'),
'self.save_result_as', False],
['toolbar separator']
],
[_('&CogStat'),
['/icons8-help.svg', _('&Help'), _('F1'), 'self._open_help_webpage', True],
['/icons8-settings.svg', _('&Preferences')+'...', _('Ctrl+Shift+R'),
'self._show_preferences', True],
['/icons8-file-add.svg', _('Request a &feature'), '', 'self._open_reqfeat_webpage',
False],
['separator'],
#['/icons8-toolbar.svg', _('Show the &toolbar'), '',
# 'self.toolbar.toggleViewAction().trigger', False],
#['separator'],
['/icons8-bug.svg', _('&Report a problem'), '', 'self._open_reportbug_webpage',
False],
['/icons8-system-report.svg', _('&Diagnosis information'), '', 'self.print_versions',
False],
['separator'],
['/icons8-info.svg', _('&About'), '', 'self._show_about', False],
['separator'],
['/icons8-exit.svg', _('&Exit'), _('Ctrl+Q'), 'self.close', False]
]
]
# Enable these commands only when active_data is available
self.analysis_commands = [_('&Save data'), _('Save data &as') + '...', _('&Display data'),
_('Display data &briefly'), _('&Filter outliers') + '...', _('Pivot &table') + '...',
_('&Explore variable') + '...', _('Behavioral data &diffusion analysis') + '...',
_('Explore relation of variable &pair') + '...',
_('Compare repeated measures va&riables') + '...', _('Compare &groups') + '...',
_('&Compare groups and variables') + '...']
# Create menus and commands, create toolbar
self.menubar = self.menuBar()
self.menus = []
self.menu_commands = {}
self.toolbar_actions = {}
self.toolbar = self.addToolBar('General')
for menu in menu_commands:
self.menus.append(self.menubar.addMenu(menu[0]))
for menu_item in menu:
if isinstance(menu_item, str): # Skip the name of the main menus
continue
if menu_item[0] == 'separator':
self.menus[-1].addSeparator()
elif menu_item[0] == 'toolbar separator':
self.toolbar.addSeparator()
else:
self.menu_commands[menu_item[1]] = QtWidgets.QAction(QtGui.QIcon(icon_path + menu_item[0]),
menu_item[1], self)
self.menu_commands[menu_item[1]].setShortcut(menu_item[2])
#self.menu_commands[menu_item[1]].setStatusTip(menu_item[3])
self.menu_commands[menu_item[1]].triggered.connect(eval(menu_item[3]))
self.menus[-1].addAction(self.menu_commands[menu_item[1]])
if menu_item[4]: # if the menu item should be added to the toolbar
self.toolbar_actions[menu_item[1]] = QtWidgets.QAction(QtGui.QIcon(icon_path + menu_item[0]),
menu_item[1] + ' (' + menu_item[2] + ')',
self)
self.toolbar_actions[menu_item[1]].triggered.connect(eval(menu_item[3]))
self.toolbar.addAction(self.toolbar_actions[menu_item[1]])
self.menus[2].actions()[5].setCheckable(True) # _('&Text is editable') menu is a checkbox
# # see also text_editable()
#self.toolbar.actions()[15].setCheckable(True) # TODO rewrite Text is editable switches, because the menu and
# the toolbar works independently
#self.menus[3].actions()[4].setCheckable(True) # Show the toolbar menu is a checkbox
#self.menus[3].actions()[4].setChecked(True) # Set the default value On
# TODO if the position of these menus are changed, then this setting will not work
self._show_data_menus(on=False)
# Prepare Output pane
self.output_pane = QtWidgets.QTextBrowser() # QTextBrowser can handle links, QTextEdit cannot
self.output_pane.document().setDefaultStyleSheet('body {color:black;} h2 {color:%s;} h3 '
'{color:%s} .table_cs_pd th {font-weight:normal;}' %
(csc.mpl_theme_color_dark, csc.mpl_theme_color))
#self.output_pane.setLineWrapMode(QtWidgets.QTextEdit.NoWrap)
welcome_message = '%s%s%s%s<br>%s<br>%s<br>' % \
('<cs_h1>', _('Welcome to CogStat!'), '</cs_h1>',
_('CogStat makes statistical analysis more simple and efficient.'),
_('To start working open a data file or paste your data from a spreadsheet.'),
_('Find more information about CogStat on its <a href = "https://www.cogstat.org">webpage</a> '
'or read the <a href="https://github.com/cogstat/cogstat/wiki/Quick-Start-Tutorial">'
'quick start tutorial.</a>'))
self.output_pane.setText(cs_util.convert_output([welcome_message])[0])
self.welcome_text_on = True # Used for deleting the welcome text at the first analysis
self.output_pane.setReadOnly(True)
self.output_pane.setOpenExternalLinks(True)
self.output_pane.setStyleSheet("QTextBrowser { background-color: white; }")
# Some styles use non-white background (e.g. Linux Mint 17 Mate uses gray)
# Set default font
#print self.output_pane.currentFont().toString()
# http://stackoverflow.com/questions/2475750/using-qt-css-to-set-own-q-propertyqfont
font = QtGui.QFont()
font.setFamily(csc.default_font)
font.setPointSizeF(csc.default_font_size)
self.output_pane.setFont(font)
#print self.output_pane.currentFont().toString()
self.setCentralWidget(self.output_pane)
self.setAcceptDrops(True)
#self.statusBar().showMessage(_('Ready'))
self.unsaved_output = False # Do not want to save the output with the welcome message
self.output_filename = ''
self.show()
def _show_data_menus(self, on=True):
"""
Enable or disable data handling menus depending on whether data is loaded.
parameters:
on: True to enable menus
False to disable
default is True
"""
for menu in self.analysis_commands:
try:
self.menu_commands[menu].setEnabled(on)
self.toolbar_actions[menu].setEnabled(on)
except:
pass
def dragEnterEvent(self, event):
if event.mimeData().hasFormat("text/uri-list"):
event.accept()
elif event.mimeData().hasFormat("text/plain"):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasFormat("text/uri-list"):
self.open_file(filename=event.mimeData().urls()[0].toString(options=QtCore.QUrl.PreferLocalFile))
elif event.mimeData().hasFormat("text/plain"):
# print 'Dropped Text: ', event.mimeData().text()
self._open_data(data=str(event.mimeData().text()))
def _check_installed_components(self):
"""
Check if all required and recommended components are installed.
Return the list of missing components as strings.
"""
missing_required_components = []
missing_recommended_components = []
# Required components
for module in ['pyqt', 'numpy', 'pandas', 'scipy', 'statsmodels']:
if csc.versions[module] is None:
missing_required_components.append(module)
# Recommended components
for module in []: # At the moment it's empty
if csc.versions[module] is None:
missing_recommended_components.append(module)
'''
# Check R only on Linux, since Win doesn't have a working rpy at the moment
if sys.platform in ['linux2', 'linux']:
for module in ['r', 'rpy2', 'car']:
if csc.versions[module] is None:
missing_recommended_components.append(module)
'''
if missing_required_components:
logging.error('Missing required components: %s' % missing_required_components)
if missing_recommended_components:
logging.error('Missing recommended components: %s' % missing_recommended_components)
return missing_required_components, missing_recommended_components
def _busy_signal(self, on):
"""
Changes the mouse, signalling that the system is busy
"""
# http://qt-project.org/doc/qt-4.7/qt.html see CursorShape
# http://qt-project.org/doc/qt-4.7/qapplication.html#id-19f00dae-ec43-493e-824c-ef07ce96d4c6
if on:
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
#QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.BusyCursor))
else:
while QtWidgets.QApplication.overrideCursor() is not None:
# TODO if for some reason (unhandled exception) the cursor was not set back formerly,
# then next time set it back
# FIXME exception handling should solve this problem on the long term
QtWidgets.QApplication.restoreOverrideCursor()
#QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
def _print_to_output_pane(self, index=-1):
"""Print a GuiResultPackage to GUI output pane
:param index: index of the item in self.analysis_results to be printed
If no index is given, the last item is printed.
"""
if self.welcome_text_on:
self.output_pane.clear()
#self.output_pane.setHtml(cs_util.convert_output(['<cs_h1> </cs_h1>'])[0])
self.welcome_text_on = False
#self.output_pane.append('<h2>test2</h2>testt<h3>test3</h3>testt<br>testpbr')
#self.output_pane.append('<h2>test2</h2>testt<h3>test3</h3>testt<br>testpbr')
#print(self.output_pane.toHtml())
for output in self.analysis_results[index].output:
if isinstance(output, str):
self.output_pane.append(output) # insertHtml() messes up the html doc,
# check it with self.output_pane.toHtml()
elif isinstance(output, QtGui.QImage):
self.output_pane.moveCursor(11, 0) # Moves cursor to the end
self.output_pane.textCursor().insertImage(output)
elif output is None:
pass # We simply don't do anything with None-s
else:
logging.error('Unknown output type: %s' % type(output))
self.unsaved_output = True
### Data menu methods ###
def open_file(self, filename=''):
"""Open file.
:param filename: filename with path
"""
if filename in ['', False]:
filename = cogstat_dialogs.open_data_file()
#print(filename)
if filename:
self._open_data(str(filename))
### Data menu methods ###
def open_demo_file(self, filename=''):
"""Open file.
:param filename: filename with path
"""
if filename in ['', False]:
filename = cogstat_dialogs.open_demo_data_file()
#print(filename)
if filename:
self._open_data(str(filename))
def open_clipboard(self):
"""Open data copied to clipboard."""
clipboard = QtWidgets.QApplication.clipboard()
if clipboard.mimeData().hasFormat("text/plain"):
self._open_data(str(clipboard.text()))
def _open_data(self, data):
""" Core of the import process.
"""
self._busy_signal(True)
try:
self.active_data = cogstat.CogStatData(data=data)
if self.active_data.import_source == _('Import failed'):
QtWidgets.QMessageBox.warning(self, _('Import error'), _('Data could not be loaded.'),
QtWidgets.QMessageBox.Ok)
self._show_data_menus(False)
else:
self._show_data_menus()
'''
self.statusBar().showMessage((_('Data loaded from file: ') if self.active_data.import_source[:9] in
['text file', 'SPSS file'] else _('Data loaded from clipboard: ')) + _('%s variables and %s cases.') %
(len(self.active_data.data_frame.columns), len(self.active_data.data_frame.index)))
'''
self.print_data(brief=True, display_import_message=True)
except Exception as e:
self.analysis_results.append(GuiResultPackage())
self.analysis_results[-1].add_command('self._open_data()') # TODO
try:
file_content = '<br>' + _('Data file content') + ':<br>' + open(data, 'r').read()[:1000].replace('\n', '<br>') if os.path.exists(data) else ''
except:
file_content = ''
self.analysis_results[-1].\
add_output(cs_util.reformat_output('<cs_h1>' + _('Data') + '</cs_h1>' +
_('Oops, something went wrong, CogStat could not open the '
'data. You may want to report the issue.') + ' ' +
_('Read more about how to report an issue <a href = "%s">here</a>.')
% 'https://github.com/cogstat/cogstat/wiki/Report-a-bug') +
'<br><br>' + _('Error code') + ': %s' %e +
'<br><br>' + _('Data to be imported') +
':<br>%s<br>%s' % (data, file_content))
traceback.print_exc()
self._print_to_output_pane()
self._busy_signal(False)
def filter_outlier(self, var_names=None):
"""Filter outliers.
Arguments:
var_names (list): variable names
"""
if not var_names:
try:
self.dial_filter
except:
# Only interval variables can be used for filtering
names = [name for name in self.active_data.data_frame.columns if (self.active_data.data_measlevs[name]
in ['int', 'unk'])]
self.dial_filter = cogstat_dialogs.filter_outlier(names=names)
else: # TODO is it not necessary anymore? For all dialogs
# Only interval variables can be used for filtering
names = [name for name in self.active_data.data_frame.columns if (self.active_data.data_measlevs[name]
in ['int', 'unk'])]
self.dial_filter.init_vars(names=names)
if self.dial_filter.exec_():
var_names = self.dial_filter.read_parameters()
else:
return
self._busy_signal(True)
try:
self.analysis_results.append(GuiResultPackage())
self.analysis_results[-1].add_command('self.filter_outlier()') # TODO
result = self.active_data.filter_outlier(var_names)
self.analysis_results[-1].add_output(result)
self._print_to_output_pane()
except:
self.analysis_results[-1].add_output(cs_util.reformat_output(broken_analysis % _('Filter outlier.')))
traceback.print_exc()
self._print_to_output_pane()
self._busy_signal(False)
def print_data(self, brief=False, display_import_message=False):
"""Print the current data to the output.
:param brief (bool): print only the first 10 rows
:param display_import_message (bool):
"""
self.analysis_results.append(GuiResultPackage())
self.analysis_results[-1].add_command('self.print_data') # TODO commands will be used to rerun the analysis
self.analysis_results[-1].add_output(self.active_data.print_data(brief=brief))
if self.active_data.import_message and display_import_message:
self.analysis_results[-1].add_output(cs_util.reformat_output(self.active_data.import_message))
self._print_to_output_pane()
def _print_data_brief(self):
self.print_data(brief=True)
### Analysis menu methods ###
def explore_variable(self, var_names=None, freq=True, dist=True, descr=True, norm=True, loc_test=True,
loc_test_value=0):
"""Computes various properties of variables.
Arguments:
var_names (list): variable names
freq (bool): compute frequencies (default True)
dist (bool): compute distribution (default True)
descr (bool): compute descriptive statistics (default True)
norm (bool): check normality (default True)
loc_test (bool): test location (e.g. t-test) (default True)
loc_test_value (numeric): test location against this value (default 0.0)
"""
if not var_names:
try:
self.dial_var_prop
except:
self.dial_var_prop = cogstat_dialogs.explore_var_dialog(names=self.active_data.data_frame.columns)
else: # TODO is it not necessary anymore? For all dialogs
self.dial_var_prop.init_vars(names=self.active_data.data_frame.columns)
if self.dial_var_prop.exec_():
var_names, freq, loc_test_value = self.dial_var_prop.read_parameters()
else:
return
self._busy_signal(True)
try:
for var_name in var_names:
self.analysis_results.append(GuiResultPackage())
self.analysis_results[-1].add_command('self.explore_variable()') # TODO
result = self.active_data.explore_variable(var_name, frequencies=freq, central_value=loc_test_value)
self.analysis_results[-1].add_output(result)
self._print_to_output_pane()
except:
self.analysis_results[-1].add_output(cs_util.reformat_output(broken_analysis % _('Explore variable.')))
traceback.print_exc()
self._print_to_output_pane()
self._busy_signal(False)
def explore_variable_pair(self, var_names=None, xlims=[None, None], ylims=[None, None]):
"""Explore variable pairs.
Arguments:
var_names (list): variable names
"""
if not var_names:
try:
self.dial_var_pair
except:
self.dial_var_pair = cogstat_dialogs.explore_var_pairs_dialog(names=self.active_data.data_frame.columns)
else:
self.dial_var_pair.init_vars(names=self.active_data.data_frame.columns)
if self.dial_var_pair.exec_():
var_names, xlims, ylims = self.dial_var_pair.read_parameters()
else:
return
self._busy_signal(True)
if len(var_names) < 2: # TODO this check should go to the appropriate dialog
self.analysis_results.append(GuiResultPackage())
text_result = cs_util.reformat_output('%s %s' % (_('Explore variable pair.'),
_('At least two variables should be set.')))
self.analysis_results[-1].add_output(text_result)
else:
try:
for x in var_names:
pass_diag = False
for y in var_names:
if pass_diag:
self.analysis_results.append(GuiResultPackage())
self.analysis_results[-1].add_command('self.explore_variable_pair') # TODO
result_list = self.active_data.explore_variable_pair(x, y, xlims, ylims)
self.analysis_results[-1].add_output(result_list)
self._print_to_output_pane()
if x == y:
pass_diag = True
except:
self.analysis_results[-1].add_output(cs_util.reformat_output(broken_analysis %
_('Explore variable pair.')))
traceback.print_exc()
self._print_to_output_pane()
self._busy_signal(False)
def pivot(self, depend_names=None, row_names=[], col_names=[], page_names=[], function='Mean'):
"""Build a pivot table.
Arguments:
depend_names (list of str): name of the dependent variable
row_names, col_names, page_names (lists of str): name of the independent variables
function (str): available functions: N,Sum, Mean, Median, Standard Deviation, Variance (default Mean)
"""
if not depend_names:
try:
self.dial_pivot
except:
self.dial_pivot = cogstat_dialogs.pivot_dialog(names=self.active_data.data_frame.columns)
else:
self.dial_pivot.init_vars(names=self.active_data.data_frame.columns)
if self.dial_pivot.exec_():
row_names, col_names, page_names, depend_names, function = self.dial_pivot.read_parameters()
else:
return
self._busy_signal(True)
self.analysis_results.append(GuiResultPackage())
if not depend_names or not (row_names or col_names or page_names): # TODO this check should go to the dialog
text_result = cs_util.reformat_output('%s %s' % (_('Pivot table.'),
_('The dependent variable and at least one grouping '
'variable should be given.')))
else:
try:
text_result = self.active_data.pivot(depend_names, row_names, col_names, page_names, function)
except:
text_result = cs_util.reformat_output(broken_analysis % _('Pivot table.'))
traceback.print_exc()
self.analysis_results[-1].add_output(text_result)
self._print_to_output_pane()
self._busy_signal(False)
def diffusion(self, error_name=[], RT_name=[], participant_name=[], condition_names=[]):
"""Run a diffusion analysis on behavioral data.
Arguments:
RT_name, error name, participant_name (lists of str): name of the variables
condition_names (lists of str): name of the condition(s) variables
"""
if not RT_name:
try:
self.dial_diffusion
except:
self.dial_diffusion = cogstat_dialogs.diffusion_dialog(names=self.active_data.data_frame.columns)
else:
self.dial_diffusion.init_vars(names=self.active_data.data_frame.columns)
if self.dial_diffusion.exec_():
error_name, RT_name, participant_name, condition_names = self.dial_diffusion.read_parameters()
else:
return
self._busy_signal(True)
self.analysis_results.append(GuiResultPackage())
if (not RT_name) or (not error_name): # TODO this check should go to the dialog
text_result = cs_util.reformat_output('%s %s' % (
_('Diffusion analysis.'), _('At least the reaction time and the error variables should be given.')))
else:
try:
text_result = self.active_data.diffusion(error_name, RT_name, participant_name, condition_names)
except:
text_result = cs_util.reformat_output(broken_analysis % _('Diffusion analysis.'))
traceback.print_exc()
self.analysis_results[-1].add_output(text_result)
self._print_to_output_pane()
self._busy_signal(False)
def compare_variables(self, var_names=None, factors=[], ylims=[None, None]):
"""Compare variables.
Arguments:
var_names (list): variable names
"""
if not var_names:
try:
self.dial_comp_var
except:
self.dial_comp_var = cogstat_dialogs.compare_vars_dialog(names=self.active_data.data_frame.columns)
else:
self.dial_comp_var.init_vars(names=self.active_data.data_frame.columns)
if self.dial_comp_var.exec_():
var_names, factors, ylims = self.dial_comp_var.read_parameters() # TODO check if settings are
# appropriate
else:
return
self._busy_signal(True)
self.analysis_results.append(GuiResultPackage())
self.analysis_results[-1].add_command('self.compare_variables()') # TODO
if len(factors) == 1:
factors = [] # ignore single factor
if len(var_names) < 2:
text_result = cs_util.reformat_output('%s %s' %
(_('Compare variables.'), _('At least two variables should be set.')))
self.analysis_results[-1].add_output(text_result)
else:
try:
result_list = self.active_data.compare_variables(var_names, factors, ylims)
for result in result_list: # TODO is this a list of lists? Can we remove the loop?
self.analysis_results[-1].add_output(result)
except:
self.analysis_results[-1].add_output(cs_util.reformat_output(broken_analysis % _('Compare variables.')))
traceback.print_exc()
self._print_to_output_pane()
self._busy_signal(False)
def compare_groups(self, var_names=None, groups=None, single_case_slope_SE=None, single_case_slope_trial_n=None,
ylims=[None, None]):
"""Compare groups.
Arguments:
var_names (list): dependent variable names
groups (list): grouping variable names
"""
if not var_names:
try:
self.dial_comp_grp
except:
self.dial_comp_grp = cogstat_dialogs.compare_groups_dialog(names=self.active_data.data_frame.columns)
else:
self.dial_comp_grp.init_vars(names=self.active_data.data_frame.columns)
if self.dial_comp_grp.exec_():
var_names, groups, single_case_slope_SE, single_case_slope_trial_n, ylims = self.dial_comp_grp.\
read_parameters() # TODO check if settings are appropriate
else:
return
self._busy_signal(True)
if not var_names or not groups:
self.analysis_results.append(GuiResultPackage())
self.analysis_results[-1].add_command('self.compare_groups()') # TODO
text_result = cs_util.reformat_output('%s %s' % (_('Compare groups.'),
_('Both the dependent and the grouping variables should '
'be set.')))
self.analysis_results[-1].add_output(text_result)
else:
for var_name in var_names:
try:
self.analysis_results.append(GuiResultPackage())
self.analysis_results[-1].add_command('self.compare_groups()') # TODO
result_list = self.active_data.compare_groups(var_name, groups, single_case_slope_SE,
single_case_slope_trial_n, ylims)
self.analysis_results[-1].add_output(result_list)
self._print_to_output_pane()
except:
self.analysis_results[-1].add_output(cs_util.reformat_output(broken_analysis %
_('Compare groups.')))
traceback.print_exc()
self._print_to_output_pane()
self._busy_signal(False)
### Result menu methods ###
def delete_output(self):
reply = QtWidgets.QMessageBox.question(self, _('Clear output'),
_('Are you sure you want to delete the output?'),
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
self.output_pane.clear()
self.analysis_results = []
self.unsaved_output = False # Not necessary to save the empty output
def find_text(self):
self.dial_find_text = cogstat_dialogs.find_text_dialog(output_pane=self.output_pane)
self.dial_find_text.exec_()
def zoom_in(self):
self.output_pane.zoomIn(1)
def zoom_out(self):
self.output_pane.zoomOut(1)
def text_editable(self):
self.output_pane.setReadOnly(not(self.menus[2].actions()[5].isChecked())) # see also _init_UI
#self.output_pane.setReadOnly(not(self.toolbar.actions()[15].isChecked()))
# TODO if the position of this menu is changed, then this function will not work
# TODO rewrite Text is editable switches, because the menu and the toolbar works independently
def save_result(self):
"""Save the output pane to pdf file."""
if self.output_filename == '':
self.save_result_as()
else:
pdf_printer = QtPrintSupport.QPrinter()
pdf_printer.setOutputFormat(QtPrintSupport.QPrinter.PdfFormat)
pdf_printer.setColorMode(QtPrintSupport.QPrinter.Color)
pdf_printer.setOutputFileName(self.output_filename)
self.output_pane.print_(pdf_printer)
self.unsaved_output = False
def save_result_as(self, filename=None):
"""Save the output pane to pdf file.
Arguments:
filename (str): name of the file to save to
"""
if not filename:
filename = cogstat_dialogs.save_output()
self.output_filename = filename
if filename:
pdf_printer = QtPrintSupport.QPrinter()
pdf_printer.setOutputFormat(QtPrintSupport.QPrinter.PdfFormat)
pdf_printer.setOutputFileName(self.output_filename)
self.output_pane.print_(pdf_printer)
self.unsaved_output = False
### Cogstat menu methods ###
def _open_help_webpage(self):
webbrowser.open('https://github.com/cogstat/cogstat/wiki')
def _show_preferences(self):
try:
self.dial_pref
except:
self.dial_pref = cogstat_dialogs.preferences_dialog()
self.dial_pref.exec_()
def _open_reqfeat_webpage(self):
webbrowser.open('https://github.com/cogstat/cogstat/wiki/Suggest-a-new-feature')
def _open_reportbug_webpage(self):
webbrowser.open('https://github.com/cogstat/cogstat/wiki/Report-a-bug')
def _show_about(self):
QtWidgets.QMessageBox.about(self, _('About CogStat ') + csc.versions['cogstat'], 'CogStat ' +
csc.versions['cogstat'] + ('<br>%s<br><br>Copyright © %s-%s Attila Krajcsi<br><br>'
'<a href = "http://www.cogstat.org">%s</a>' %
(_('Simple automatic data analysis software'),
2012, 2021, _('Visit CogStat website'))))
def print_versions(self):
"""Print the versions of the software components CogStat uses."""
# Intentionally not localized.
self._busy_signal(True)
text_output = cs_util.reformat_output(cs_util.print_versions(self))
self.analysis_results.append(GuiResultPackage())
self.analysis_results[-1].add_output(cs_util.convert_output(['<cs_h1>' + _('System components') + '</cs_h1>'])
[0])
self.analysis_results[-1].add_output(text_output)
self._print_to_output_pane()
self._busy_signal(False)
def closeEvent(self, event):
# Override the close behavior, otherwise alt+F4 quits unconditionally.
# http://stackoverflow.com/questions/1414781/prompt-on-exit-in-pyqt-application
# Check if everything is saved
tosave = True
while self.unsaved_output and tosave:
reply = QtWidgets.QMessageBox.question(self, _('Save output'),
_('Output has unsaved results. Do you want to save it?'), QtWidgets.QMessageBox.Yes |
QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.Yes)
if reply == QtWidgets.QMessageBox.Yes:
self.save_result()
else:
tosave = False
"""
reply = QtGui.QMessageBox.question(self, _('Confirm exit'),
_('Are you sure you want to exit the program?'), QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
QtGui.qApp.quit()
else:
event.ignore()
"""
# -*- coding: utf-8 -*-
class GuiResultPackage():
""" A class for storing a package of results.
Result object includes:
- self.command: Command to run (python code) - not used yet
- self.output:
- list of strings (html) or figures (QImages)
- the first item is recommended to be the title line
"""
def __init__(self):
self.command = []
self.output = []
def add_command(self, command):
self.command.append(command)
def add_output(self, output):
"""Add output to the self.output
:param output: item or list of items to add
"""
if isinstance(output, list):
for outp in output:
self.output.append(outp)
else:
self.output.append(output)
def main():
splash_screen.close()
ex = StatMainWindow()
sys.exit(app.exec_())
| gpl-3.0 |
djevans071/Rebalancing-Citibike | modeling.py | 1 | 3212 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 15 07:32:49 2017
@author: psamtik071
"""
# routines for further feature creation for modeling purposes
from matplotlib import pyplot as plt
#import seaborn as sns
from workflow.data import *
from workflow.features import *
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
#from sqlalchemy_utils import database_exists, create_database
import psycopg2
# connect to SQL database
username = 'psam071'
host = 'localhost'
dbname = 'citibike'
db = create_engine('postgres://%s%s/%s' % (username,host,dbname))
con = None
con = psycopg2.connect(database = dbname, user = username, host = host)
print "Querying Database..."
#get stations for 2015 to work on
# query_stations2015 = """
# SELECT DISTINCT a.id
# FROM features a
# LEFT JOIN stations b ON a.id = b.id
# WHERE a.date = '2015-03-01' AND tot_docks > 0
# ORDER BY a.id;
# """
stations_2015 = pd.read_pickle('websitetools/stations.pickle')
def cleanup(year):
# clean-up dataset further and introduce new features from new_features module
df = pd.read_sql_query(bulk_query(year),con)
df = strip_unused_stations(df, stations_2015.id.unique())
df = new_features(df)
df.date = pd.to_datetime(df.date)
return df
index_col = ['date']
data_cols = ['id', 'long', 'lat', 'hour', 'dayofweek',
'month', 'is_weekday', 'is_holiday',
'precip', 'temp', 'pct_avail_bikes', 'pct_avail_docks']
df2015 = cleanup(2015)
df2016 = cleanup(2016)
# ------------ MODELING -------------------
print "Training predictor..."
# train model
from sklearn.ensemble import RandomForestRegressor
# data = df[data_cols + hist_cols].sort_index()
# target = df.flux_type
# X_train, X_test, y_train, y_test = train_test_split(data, target,
# train_size = 0.75, test_size = 0.25)
target_label = 'pct_flux'
X_train = df2015[data_cols]
y_train = df2015[target_label]
X_test = df2016[data_cols]
y_test = df2016[target_label]
reg = RandomForestRegressor(min_samples_leaf=16, min_samples_split=6,
max_features = 0.95, n_jobs=-1)
reg.fit(X_train, y_train)
pred = reg.predict(X_test)
print "Saving Data..."
# ------------- SAVE DATA TO PICKLE --------------
import pickle
PICKLE_FILENAME = "regressor.pkl"
DATASET_PICKLE_FILENAME = "dataset.pkl"
FEATURE_LIST_FILENAME = "feature_list.pkl"
def dump_model_and_data(clf, dataset, feature_list):
with open(PICKLE_FILENAME, "w") as reg_outfile:
pickle.dump(reg, reg_outfile)
with open(DATASET_PICKLE_FILENAME, "w") as dataset_outfile:
pickle.dump(dataset, dataset_outfile)
with open(FEATURE_LIST_FILENAME, "w") as featurelist_outfile:
pickle.dump(feature_list, featurelist_outfile)
def load_model_and_data():
with open(PICKLE_FILENAME, "r") as reg_infile:
reg = pickle.load(reg_infile)
with open(DATASET_PICKLE_FILENAME, "r") as dataset_infile:
dataset = pickle.load(dataset_infile)
with open(FEATURE_LIST_FILENAME, "r") as featurelist_infile:
feature_list = pickle.load(featurelist_infile)
return reg, dataset, feature_list
dump_model_and_data(reg, df2015, data_cols)
| mit |
HarllanAndrye/nilmtk | nilmtk/elecmeter.py | 5 | 30305 | from __future__ import print_function, division
from warnings import warn
from collections import namedtuple
from copy import deepcopy
from itertools import izip
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
from .preprocessing import Clip
from .stats import TotalEnergy, GoodSections, DropoutRate
from .stats.totalenergyresults import TotalEnergyResults
from .hashable import Hashable
from .appliance import Appliance
from .datastore import Key
from .measurement import (select_best_ac_type, AC_TYPES, PHYSICAL_QUANTITIES,
PHYSICAL_QUANTITIES_WITH_AC_TYPES,
check_ac_type, check_physical_quantity)
from .node import Node
from .electric import Electric
from .timeframe import TimeFrame, list_of_timeframe_dicts
from nilmtk.exceptions import MeasurementError
from .utils import flatten_2d_list, capitalise_first_letter
from nilmtk.timeframegroup import TimeFrameGroup
import nilmtk
ElecMeterID = namedtuple('ElecMeterID', ['instance', 'building', 'dataset'])
class ElecMeter(Hashable, Electric):
"""Represents a physical electricity meter.
Attributes
----------
appliances : list of Appliance objects connected immediately downstream
of this meter. Will be [] if no appliances are connected directly
to this meter.
store : nilmtk.DataStore
key : string
key into nilmtk.DataStore to access data.
metadata : dict.
See http://nilm-metadata.readthedocs.org/en/latest/dataset_metadata.html#elecmeter
STATIC ATTRIBUTES
-----------------
meter_devices : dict, static class attribute
See http://nilm-metadata.readthedocs.org/en/latest/dataset_metadata.html#meterdevice
"""
meter_devices = {}
def __init__(self, store=None, metadata=None, meter_id=None):
# Store and check parameters
self.appliances = []
self.metadata = {} if metadata is None else metadata
assert isinstance(self.metadata, dict)
self.store = store
self.identifier = meter_id
# Insert self into nilmtk.global_meter_group
if self.identifier is not None:
assert isinstance(self.identifier, ElecMeterID)
if self not in nilmtk.global_meter_group.meters:
nilmtk.global_meter_group.meters.append(self)
@property
def key(self):
return self.metadata['data_location']
def instance(self):
return self._identifier_attr('instance')
def building(self):
return self._identifier_attr('building')
def dataset(self):
return self._identifier_attr('dataset')
@property
def name(self):
return self.metadata.get('name')
@name.setter
def name(self, value):
self.metadata['name'] = value
def _identifier_attr(self, attr):
if self.identifier is None:
return
else:
return getattr(self.identifier, attr)
def get_timeframe(self):
self._check_store()
return self.store.get_timeframe(key=self.key)
def _check_store(self):
if self.store is None:
raise RuntimeError("ElecMeter needs `store` attribute set to an"
" instance of a `nilmtk.DataStore` subclass")
def upstream_meter(self, raise_warning=True):
"""
Returns
-------
ElecMeterID of upstream meter or None if is site meter.
"""
if self.is_site_meter():
if raise_warning:
warn("There is no meter upstream of this meter '{}' because"
" it is a site meter.".format(self.identifier))
return
submeter_of = self.metadata.get('submeter_of')
# Sanity checks
if submeter_of is None:
raise ValueError(
"This meter has no 'submeter_of' metadata attribute.")
if submeter_of < 0:
raise ValueError("'submeter_of' must be >= 0.")
upstream_meter_in_building = self.metadata.get(
'upstream_meter_in_building')
if (upstream_meter_in_building is not None and
upstream_meter_in_building != self.identifier.building):
raise NotImplementedError(
"'upstream_meter_in_building' not implemented yet.")
id_of_upstream = ElecMeterID(instance=submeter_of,
building=self.identifier.building,
dataset=self.identifier.dataset)
upstream_meter = nilmtk.global_meter_group[id_of_upstream]
if upstream_meter is None:
warn("No upstream meter found for '{}'.".format(self.identifier))
return upstream_meter
@classmethod
def load_meter_devices(cls, store):
dataset_metadata = store.load_metadata('/')
ElecMeter.meter_devices.update(
dataset_metadata.get('meter_devices', {}))
def save(self, destination, key):
"""
Convert all relevant attributes to a dict to be
saved as metadata in destination at location specified
by key
"""
# destination.write_metadata(key, self.metadata)
# then save data
raise NotImplementedError
@property
def device(self):
"""
Returns
-------
dict describing the MeterDevice for this meter (sample period etc).
"""
device_model = self.metadata.get('device_model')
if device_model:
return deepcopy(ElecMeter.meter_devices[device_model])
else:
return {}
def sample_period(self):
device = self.device
if device:
return device['sample_period']
def is_site_meter(self):
return self.metadata.get('site_meter', False)
def dominant_appliance(self):
"""Tries to find the most dominant appliance on this meter,
and then returns that appliance object. Will return None
if there are no appliances on this meter.
"""
n_appliances = len(self.appliances)
if n_appliances == 0:
return
elif n_appliances == 1:
return self.appliances[0]
else:
for app in self.appliances:
if app.metadata.get('dominant_appliance'):
return app
warn('Multiple appliances are associated with meter {}'
' but none are marked as the dominant appliance. Hence'
' returning the first appliance in the list.', RuntimeWarning)
return self.appliances[0]
def label(self, pretty=True):
"""Returns a string describing this meter.
Parameters
----------
pretty : boolean
If True then just return the type name of the dominant appliance
(without the instance number) or metadata['name'], with the
first letter capitalised.
Returns
-------
string : A label listing all the appliance types.
"""
if pretty:
return self._pretty_label()
meter_names = []
if self.is_site_meter():
meter_names.append('SITE METER')
elif "name" in self.metadata:
meter_names.append(self.metadata["name"])
else:
for appliance in self.appliances:
appliance_name = appliance.label()
if appliance.metadata.get('dominant_appliance'):
appliance_name = appliance_name.upper()
meter_names.append(appliance_name)
label = ", ".join(meter_names)
return label
def _pretty_label(self):
name = self.metadata.get("name")
if name:
label = name
elif self.is_site_meter():
label = 'Site meter'
elif self.dominant_appliance() is not None:
label = self.dominant_appliance().identifier.type
else:
meter_names = []
for appliance in self.appliances:
appliance_name = appliance.label()
if appliance.metadata.get('dominant_appliance'):
appliance_name = appliance_name.upper()
meter_names.append(appliance_name)
label = ", ".join(meter_names)
return label
label = capitalise_first_letter(label)
return label
def available_ac_types(self, physical_quantity):
"""Finds available alternating current types for a specific physical quantity.
Parameters
----------
physical_quantity : str or list of strings
Returns
-------
list of strings e.g. ['apparent', 'active']
"""
if isinstance(physical_quantity, list):
ac_types = [self.available_ac_types(pq) for pq in physical_quantity]
return list(set(flatten_2d_list(ac_types)))
if physical_quantity not in PHYSICAL_QUANTITIES:
raise ValueError("`physical_quantity` must by one of '{}', not '{}'"
.format(PHYSICAL_QUANTITIES, physical_quantity))
measurements = self.device['measurements']
return [m['type'] for m in measurements
if m['physical_quantity'] == physical_quantity
and 'type' in m]
def available_physical_quantities(self):
"""
Returns
-------
list of strings e.g. ['power', 'energy']
"""
measurements = self.device['measurements']
return list(set([m['physical_quantity'] for m in measurements]))
def available_columns(self):
"""
Returns
-------
list of 2-tuples of strings e.g. [('power', 'active')]
"""
measurements = self.device['measurements']
return list(set([(m['physical_quantity'], m.get('type', ''))
for m in measurements]))
def __repr__(self):
string = super(ElecMeter, self).__repr__()
# Now add list of appliances...
string = string[:-1] # remove last bracket
# Site meter
if self.metadata.get('site_meter'):
string += ', site_meter'
# Appliances
string += ', appliances={}'.format(self.appliances)
# METER ROOM
room = self.metadata.get('room')
if room:
string += ', room={}'.format(room)
string += ')'
return string
def matches(self, key):
"""
Parameters
----------
key : dict
Returns
-------
Bool
"""
if not key:
return True
if not isinstance(key, dict):
raise TypeError()
match = True
for k, v in key.iteritems():
if hasattr(self.identifier, k):
if getattr(self.identifier, k) != v:
match = False
elif k in self.metadata:
if self.metadata[k] != v:
match = False
elif k in self.device:
metadata_value = self.device[k]
if (isinstance(metadata_value, list) and
not isinstance(v, list)):
if v not in metadata_value:
match = False
elif metadata_value != v:
match = False
else:
raise KeyError("'{}' not a valid key.".format(k))
return match
def load(self, **kwargs):
"""Returns a generator of DataFrames loaded from the DataStore.
By default, `load` will load all available columns from the DataStore.
Specific columns can be selected in one or two mutually exclusive ways:
1. specify a list of column names using the `cols` parameter.
2. specify a `physical_quantity` and/or an `ac_type` parameter to ask
`load` to automatically select columns.
If 'resample' is set to 'True' then the default behaviour is for
gaps shorter than max_sample_period will be forward filled.
Parameters
---------------
physical_quantity : string or list of strings
e.g. 'power' or 'voltage' or 'energy' or ['power', 'energy'].
If a single string then load columns only for that physical quantity.
If a list of strings then load columns for all those physical
quantities.
ac_type : string or list of strings, defaults to None
Where 'ac_type' is short for 'alternating current type'. e.g.
'reactive' or 'active' or 'apparent'.
If set to None then will load all AC types per physical quantity.
If set to 'best' then load the single best AC type per
physical quantity.
If set to a single AC type then load just that single AC type per
physical quantity, else raise an Exception.
If set to a list of AC type strings then will load all those
AC types and will raise an Exception if any cannot be found.
cols : list of tuples, using NILMTK's vocabulary for measurements.
e.g. [('power', 'active'), ('voltage', ''), ('energy', 'reactive')]
`cols` can't be used if `ac_type` and/or `physical_quantity` are set.
sample_period : int, defaults to None
Number of seconds to use as the new sample period for resampling.
If None then will use self.sample_period()
resample : boolean, defaults to False
If True then will resample data using `sample_period`.
Defaults to True if `sample_period` is not None.
resample_kwargs : dict of key word arguments (other than 'rule') to
`pass to pd.DataFrame.resample()`. Defaults to set 'limit' to
`sample_period / max_sample_period` and sets 'fill_method' to ffill.
preprocessing : list of Node subclass instances
e.g. [Clip()].
**kwargs : any other key word arguments to pass to `self.store.load()`
Returns
-------
Always return a generator of DataFrames (even if it only has a single
column).
Raises
------
nilmtk.exceptions.MeasurementError if a measurement is specified
which is not available.
"""
verbose = kwargs.get('verbose')
if verbose:
print()
print("ElecMeter.load")
print(self)
if 'sample_period' in kwargs:
kwargs['resample'] = True
if kwargs.get('resample'):
# Set default key word arguments for resampling.
resample_kwargs = kwargs.setdefault('resample_kwargs', {})
resample_kwargs.setdefault('fill_method', 'ffill')
if 'limit' not in resample_kwargs:
sample_period = kwargs.get('sample_period', self.sample_period())
max_number_of_rows_to_ffill = int(
np.ceil(self.device['max_sample_period'] / sample_period))
resample_kwargs.update({'limit': max_number_of_rows_to_ffill})
if verbose:
print("kwargs after setting resample setting:")
print(kwargs)
kwargs = self._prep_kwargs_for_sample_period_and_resample(**kwargs)
if verbose:
print("kwargs after processing")
print(kwargs)
# Get source node
preprocessing = kwargs.pop('preprocessing', [])
last_node = self.get_source_node(**kwargs)
generator = last_node.generator
# Connect together all preprocessing nodes
for node in preprocessing:
node.upstream = last_node
last_node = node
generator = last_node.process()
return generator
def _ac_type_to_columns(self, ac_type):
if ac_type is None:
return []
if isinstance(ac_type, list):
cols2d = [self._ac_type_to_columns(a_t) for a_t in ac_type]
return list(set(flatten_2d_list(cols2d)))
check_ac_type(ac_type)
cols_matching = [col for col in self.available_columns()
if col[1] == ac_type]
return cols_matching
def _physical_quantity_to_columns(self, physical_quantity):
if physical_quantity is None:
return []
if isinstance(physical_quantity, list):
cols2d = [self._physical_quantity_to_columns(p_q)
for p_q in physical_quantity]
return list(set(flatten_2d_list(cols2d)))
check_physical_quantity(physical_quantity)
cols_matching = [col for col in self.available_columns()
if col[0] == physical_quantity]
return cols_matching
def _get_columns_with_best_ac_type(self, physical_quantity=None):
if physical_quantity is None:
physical_quantity = self.available_physical_quantities()
if isinstance(physical_quantity, list):
columns = set()
for pq in physical_quantity:
best = self._get_columns_with_best_ac_type(pq)
if best:
columns.update(best)
return list(columns)
check_physical_quantity(physical_quantity)
available_pqs = self.available_physical_quantities()
if physical_quantity not in available_pqs:
return []
ac_types = self.available_ac_types(physical_quantity)
try:
best_ac_type = select_best_ac_type(ac_types)
except KeyError:
return []
else:
return [(physical_quantity, best_ac_type)]
def _convert_physical_quantity_and_ac_type_to_cols(
self, physical_quantity=None, ac_type=None, cols=None,
**kwargs):
"""Returns kwargs dict with physical_quantity and ac_type removed
and cols populated appropriately."""
if cols:
if (ac_type or physical_quantity):
raise ValueError("Cannot use `ac_type` and/or `physical_quantity`"
" with `cols` parameter.")
else:
if set(cols).issubset(self.available_columns()):
kwargs['cols'] = cols
return kwargs
else:
msg = ("'{}' is not a subset of the available columns: '{}'"
.format(cols, self.available_columns()))
raise MeasurementError(msg)
msg = ""
if not (ac_type or physical_quantity):
cols = self.available_columns()
elif ac_type == 'best':
cols = self._get_columns_with_best_ac_type(physical_quantity)
if not cols:
msg += "No AC types for physical quantity {}".format(physical_quantity)
else:
if ac_type:
cols = self._ac_type_to_columns(ac_type)
if not cols:
msg += "AC type '{}' not available. ".format(ac_type)
if physical_quantity:
cols_matching_pq = self._physical_quantity_to_columns(physical_quantity)
if not cols_matching_pq:
msg += ("Physical quantity '{}' not available. "
.format(physical_quantity))
if cols:
cols = list(set(cols).intersection(cols_matching_pq))
if not cols:
msg += ("No measurement matching ({}, {}). "
.format(physical_quantity, ac_type))
else:
cols = cols_matching_pq
if msg:
msg += "Available columns = {}. ".format(self.available_columns())
raise MeasurementError(msg)
kwargs['cols'] = cols
return kwargs
def dry_run_metadata(self):
return self.metadata
def get_metadata(self):
return self.metadata
def get_source_node(self, **loader_kwargs):
if self.store is None:
raise RuntimeError(
"Cannot get source node if meter.store is None!")
loader_kwargs = self._convert_physical_quantity_and_ac_type_to_cols(**loader_kwargs)
generator = self.store.load(key=self.key, **loader_kwargs)
self.metadata['device'] = self.device
return Node(self, generator=generator)
def total_energy(self, **loader_kwargs):
"""
Parameters
----------
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
if `full_results` is True then return TotalEnergyResults object
else returns a pd.Series with a row for each AC type.
"""
nodes = [Clip, TotalEnergy]
return self._get_stat_from_cache_or_compute(
nodes, TotalEnergy.results_class(), loader_kwargs)
def dropout_rate(self, ignore_gaps=True, **loader_kwargs):
"""
Parameters
----------
ignore_gaps : bool, default=True
If True then will only calculate dropout rate for good sections.
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
DropoutRateResults object if `full_results` is True,
else float
"""
nodes = [DropoutRate]
if ignore_gaps:
loader_kwargs['sections'] = self.good_sections(**loader_kwargs)
return self._get_stat_from_cache_or_compute(
nodes, DropoutRate.results_class(), loader_kwargs)
def good_sections(self, **loader_kwargs):
"""
Parameters
----------
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
if `full_results` is True then return nilmtk.stats.GoodSectionsResults
object otherwise return list of TimeFrame objects.
"""
loader_kwargs.setdefault('n_look_ahead_rows', 10)
nodes = [GoodSections]
results_obj = GoodSections.results_class(self.device['max_sample_period'])
return self._get_stat_from_cache_or_compute(
nodes, results_obj, loader_kwargs)
def _get_stat_from_cache_or_compute(self, nodes, results_obj, loader_kwargs):
"""General function for computing statistics and/or loading them from
cache.
Cached statistics lives in the DataStore at
'building<I>/elec/cache/meter<K>/<statistic_name>' e.g.
'building1/elec/cache/meter1/total_energy'. We store the
'full' statistic... i.e we store a representation of the `Results._data`
DataFrame. Some times we need to do some conversion to store
`Results._data` on disk. The logic for doing this conversion lives
in the `Results` class or subclass. The cache can be cleared by calling
`ElecMeter.clear_cache()`.
Parameters
----------
nodes : list of nilmtk.Node classes
results_obj : instance of nilmtk.Results subclass
loader_kwargs : dict
Returns
-------
if `full_results` is True then return nilmtk.Results subclass
instance otherwise return nilmtk.Results.simple().
See Also
--------
clear_cache
_compute_stat
key_for_cached_stat
get_cached_stat
"""
full_results = loader_kwargs.pop('full_results', False)
verbose = loader_kwargs.get('verbose')
if 'ac_type' in loader_kwargs or 'physical_quantity' in loader_kwargs:
loader_kwargs = self._convert_physical_quantity_and_ac_type_to_cols(**loader_kwargs)
cols = loader_kwargs.get('cols', [])
ac_types = set([m[1] for m in cols if m[1]])
results_obj_copy = deepcopy(results_obj)
# Prepare `sections` list
sections = loader_kwargs.get('sections')
if sections is None:
tf = self.get_timeframe()
tf.include_end = True
sections = [tf]
sections = TimeFrameGroup(sections)
sections = [s for s in sections if not s.empty]
# Retrieve usable stats from cache
key_for_cached_stat = self.key_for_cached_stat(results_obj.name)
if loader_kwargs.get('preprocessing') is None:
cached_stat = self.get_cached_stat(key_for_cached_stat)
results_obj.import_from_cache(cached_stat, sections)
def find_sections_to_compute():
# Get sections_to_compute
results_obj_timeframes = results_obj.timeframes()
sections_to_compute = set(sections) - set(results_obj_timeframes)
sections_to_compute = list(sections_to_compute)
sections_to_compute.sort()
return sections_to_compute
try:
ac_type_keys = results_obj.simple().keys()
except:
sections_to_compute = find_sections_to_compute()
else:
if ac_types.issubset(ac_type_keys):
sections_to_compute = find_sections_to_compute()
else:
sections_to_compute = sections
results_obj = results_obj_copy
else:
sections_to_compute = sections
if verbose and not results_obj._data.empty:
print("Using cached result.")
# If we get to here then we have to compute some stats
if sections_to_compute:
loader_kwargs['sections'] = sections_to_compute
computed_result = self._compute_stat(nodes, loader_kwargs)
# Merge cached results with newly computed
results_obj.update(computed_result.results)
# Save to disk newly computed stats
stat_for_store = computed_result.results.export_to_cache()
try:
self.store.append(key_for_cached_stat, stat_for_store)
except ValueError:
# the old table probably had different columns
self.store.remove(key_for_cached_stat)
self.store.put(key_for_cached_stat, results_obj.export_to_cache())
if full_results:
return results_obj
else:
res = results_obj.simple()
if ac_types:
try:
ac_type_keys = res.keys()
except:
return res
else:
return pd.Series(res[ac_types], index=ac_types)
else:
return res
def _compute_stat(self, nodes, loader_kwargs):
"""
Parameters
----------
nodes : list of nilmtk.Node subclass objects
loader_kwargs : dict
Returns
-------
Node subclass object
See Also
--------
clear_cache
_get_stat_from_cache_or_compute
key_for_cached_stat
get_cached_stat
"""
results = self.get_source_node(**loader_kwargs)
for node in nodes:
results = node(results)
results.run()
return results
def key_for_cached_stat(self, stat_name):
"""
Parameters
----------
stat_name : str
Returns
-------
key : str
See Also
--------
clear_cache
_compute_stat
_get_stat_from_cache_or_compute
get_cached_stat
"""
if isinstance(self.instance(), tuple):
meter_str = "_".join([str(i) for i in (self.instance())])
else:
meter_str = "{:d}".format(self.instance())
return ("building{:d}/elec/cache/meter{}/{:s}"
.format(self.building(), meter_str, stat_name))
def clear_cache(self, verbose=False):
"""
See Also
--------
_compute_stat
_get_stat_from_cache_or_compute
key_for_cached_stat
get_cached_stat
"""
if self.store is not None:
key_for_cache = self.key_for_cached_stat('')
try:
self.store.remove(key_for_cache)
except KeyError:
if verbose:
print("No existing cache for", key_for_cache)
else:
print("Removed", key_for_cache)
def get_cached_stat(self, key_for_stat):
"""
Parameters
----------
key_for_stat : str
Returns
-------
pd.DataFrame
See Also
--------
_compute_stat
_get_stat_from_cache_or_compute
key_for_cached_stat
clear_cache
"""
if self.store is None:
return pd.DataFrame()
try:
stat_from_cache = self.store[key_for_stat]
except KeyError:
return pd.DataFrame()
else:
return pd.DataFrame() if stat_from_cache is None else stat_from_cache
# def total_on_duration(self):
# """Return timedelta"""
# raise NotImplementedError
# def on_durations(self):
# raise NotImplementedError
# def activity_distribution(self, bin_size, timespan):
# raise NotImplementedError
# def on_off_events(self):
# use self.metadata.minimum_[off|on]_duration
# raise NotImplementedError
# def discrete_appliance_activations(self):
# """
# Return a Mask defining the start and end times of each appliance
# activation.
# """
# raise NotImplementedError
# def contiguous_sections(self):
# """retuns Mask object"""
# raise NotImplementedError
# def clean_and_export(self, destination_datastore):
# """Apply all cleaning configured in meter.cleaning and then export. Also identifies
# and records the locations of gaps. Also records metadata about exactly which
# cleaning steps have been executed and some summary results (e.g. the number of
# implausible values removed)"""
# raise NotImplementedError
| apache-2.0 |
fbagirov/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 105 | 26588 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
| bsd-3-clause |
ndhuang/python-lib | TimeStream.py | 1 | 5108 | import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
class TimeStream:
"""
Contains a pair of vectors: data points and the times they were collected
This class provides convenient access to a data time stream. It will
eventually provide support for various time standards (labview, matlab,
linux, windows?). Calling the object will return the vector of data
points.
"""
TIME_TYPES = ['matlab', 'labview', 'unix']
def __init__(self, values, t, timeType, mask = np.ma.nomask):
if (not isinstance(values, np.ma.MaskedArray)):
self.values = np.ma.array(values, mask = mask)
else:
self.values = values
timeType = timeType.lower()
self._CheckTimeType(timeType)
if (timeType == 'matlab'):
# significant hackery here: matlab defines
# datenum('Jan-1-0000 00:00:00') = 1, whereas fromordinal
# considers 1/1/0001 to be 1.
t = [dt.datetime.fromordinal(np.floor(t[i])) - \
dt.timedelta(days = 366) + \
dt.timedelta(t[i] % 1) for i in range(len(t))]
elif (timeType == 'unix'):
t = [dt.datetime.fromtimestamp(t[i]) \
for i in range(len(t))]
elif (timeType == 'labview'):
delta = dt.datetime(1904, 1, 1, 0, 0, 0) - \
dt.datetime(1970, 1, 1, 0, 0, 0)
t += delta.total_seconds()
t = [dt.datetime.fromtimestamp(t[i]) \
for i in range(len(t))]
self.t = np.ma.array(t, mask = self.values.mask)
def derivative(self, gaps = True):
"""
Return the time derivative of the time stream.
In units of [value units]/second
"""
self._check_masks()
if (gaps):
delta_val = diff(self.values.compressed())
delta_t = diff(self.t.compressed())
else:
delta_val = diff(self.values).compressed()
delta_t = diff(self.t).compressed()
for i in range(len(delta_t)):
delta_t[i] = delta_t[i].total_seconds()
return (delta_val / delta_t)
def integral(self, gaps = True):
"""
Return the time integral of the time stream.
In units of [value units] * second
"""
self._check_masks()
if (gaps):
def get_contiguous(self, minsize = 0, maxgap = 0):
"""
return a list of arrays, each of which contains contiguous
samples
"""
def get_time(self):
'''
return the time as a vector of datetime objects
'''
return self.t
def get_unixtime(self):
'''
return a vector in the unix format (seconds since the eopch)
'''
epoch = dt.datetime(1970, 1, 1, 0, 0, 0, 0)
t = np.zeros(np.shape(self.t))
i = 0
for tmp in self.t:
delta = tmp - epoch
t[i] = delta.total_seconds()
i += 1
return t
def get_matlabtime(self):
'''
return a vector in the matlab format (days since 12/31/-0001)
'''
# more hackery here, see above
t = np.zeros(np.shape(self.t))
i = 0
for tmp in self.t:
t[i] = tmp.toordinal() + 366 + tmp.hours / 24 + \
tmp.minutes / (60 * 24) + tmp.seconds / (60 * 60 * 24) + \
tmp.microsecond / (1e6 * 60 * 60 * 24)
return t
def get_labviewtime(self):
'''
return a vector in the laview format
(seconds since 1/1/1904 00:00:00)
'''
epoch = dt.datetime(1904, 1, 1, 0, 0, 0, 0)
t = np.zeros(np.shape(self.t))
i = 0
for tmp in self.t:
delta = tmp - epoch
t[i] = delta.total_seconds()
i += 1
return t
def remove(self, ind):
'''
remove data points given by ind
Parameters
----------
ind : array-like
an array of indices to remove
'''
for i in np.atleast_1d(ind):
self.t[i] = np.ma.mask
self.values[i] = np.ma.mask
self.t = self.t[~self.t.mask]
self.values = self.values[~self.values.mask]
def plot(self, ax = None, locator_args = None):
if (ax == None):
fig = plt.figure()
ax = sig.add_subplot(111)
ax.plot(self.t, self.values, '.')
# set tickmarks for dates
loc = mdates.AutoDateLocator(**locator_args)
ax.xaxis.set_major_locator(loc)
ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
def _check_time_type(self, timeType):
if (timeType not in TIME_TYPES):
raise ValueError('Time format %s is invalid' %timeType)
def _check_mask(self):
raise ValueError('Masks do not match')
return np.any(self.values.mask ^ self.t.mask)
def __call__(self):
return self.values
| mit |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/HardContact_ElPPlShear/Area/A_1e-4/Normal_Stress_Plot.py | 72 | 2800 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain*100,normal_stress/1000,'-r',label='Analytical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain*100,normal_stress/1000,'-k',label='Numerical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain [%]")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig('Normal_Stress.pdf', bbox_inches='tight')
# plt.show()
| cc0-1.0 |
PyWavelets/pywt | doc/source/pyplots/plot_mallat_2d.py | 3 | 1471 | import numpy as np
import pywt
from matplotlib import pyplot as plt
from pywt._doc_utils import wavedec2_keys, draw_2d_wp_basis
x = pywt.data.camera().astype(np.float32)
shape = x.shape
max_lev = 3 # how many levels of decomposition to draw
label_levels = 3 # how many levels to explicitly label on the plots
fig, axes = plt.subplots(2, 4, figsize=[14, 8])
for level in range(0, max_lev + 1):
if level == 0:
# show the original image before decomposition
axes[0, 0].set_axis_off()
axes[1, 0].imshow(x, cmap=plt.cm.gray)
axes[1, 0].set_title('Image')
axes[1, 0].set_axis_off()
continue
# plot subband boundaries of a standard DWT basis
draw_2d_wp_basis(shape, wavedec2_keys(level), ax=axes[0, level],
label_levels=label_levels)
axes[0, level].set_title('{} level\ndecomposition'.format(level))
# compute the 2D DWT
c = pywt.wavedec2(x, 'db2', mode='periodization', level=level)
# normalize each coefficient array independently for better visibility
c[0] /= np.abs(c[0]).max()
for detail_level in range(level):
c[detail_level + 1] = [d/np.abs(d).max() for d in c[detail_level + 1]]
# show the normalized coefficients
arr, slices = pywt.coeffs_to_array(c)
axes[1, level].imshow(arr, cmap=plt.cm.gray)
axes[1, level].set_title('Coefficients\n({} level)'.format(level))
axes[1, level].set_axis_off()
plt.tight_layout()
plt.show()
| mit |
winklerand/pandas | pandas/core/config_init.py | 1 | 16281 | """
This module is imported from the pandas package __init__.py file
in order to ensure that the core.config options registered here will
be available as soon as the user loads the package. if register_option
is invoked inside specific modules, they will not be registered until that
module is imported, which may or may not be a problem.
If you need to make sure options are available even before a certain
module is imported, register them here rather then in the module.
"""
import pandas.core.config as cf
from pandas.core.config import (is_int, is_bool, is_text, is_instance_factory,
is_one_of_factory, get_default_val,
is_callable)
from pandas.io.formats.console import detect_console_encoding
# compute
use_bottleneck_doc = """
: bool
Use the bottleneck library to accelerate if it is installed,
the default is True
Valid values: False,True
"""
def use_bottleneck_cb(key):
from pandas.core import nanops
nanops.set_use_bottleneck(cf.get_option(key))
use_numexpr_doc = """
: bool
Use the numexpr library to accelerate computation if it is installed,
the default is True
Valid values: False,True
"""
def use_numexpr_cb(key):
from pandas.core.computation import expressions
expressions.set_use_numexpr(cf.get_option(key))
with cf.config_prefix('compute'):
cf.register_option('use_bottleneck', True, use_bottleneck_doc,
validator=is_bool, cb=use_bottleneck_cb)
cf.register_option('use_numexpr', True, use_numexpr_doc,
validator=is_bool, cb=use_numexpr_cb)
#
# options from the "display" namespace
pc_precision_doc = """
: int
Floating point output precision (number of significant digits). This is
only a suggestion
"""
pc_colspace_doc = """
: int
Default space for DataFrame columns.
"""
pc_max_rows_doc = """
: int
If max_rows is exceeded, switch to truncate view. Depending on
`large_repr`, objects are either centrally truncated or printed as
a summary view. 'None' value means unlimited.
In case python/IPython is running in a terminal and `large_repr`
equals 'truncate' this can be set to 0 and pandas will auto-detect
the height of the terminal and print a truncated object which fits
the screen height. The IPython notebook, IPython qtconsole, or
IDLE do not run in a terminal and hence it is not possible to do
correct auto-detection.
"""
pc_max_cols_doc = """
: int
If max_cols is exceeded, switch to truncate view. Depending on
`large_repr`, objects are either centrally truncated or printed as
a summary view. 'None' value means unlimited.
In case python/IPython is running in a terminal and `large_repr`
equals 'truncate' this can be set to 0 and pandas will auto-detect
the width of the terminal and print a truncated object which fits
the screen width. The IPython notebook, IPython qtconsole, or IDLE
do not run in a terminal and hence it is not possible to do
correct auto-detection.
"""
pc_max_categories_doc = """
: int
This sets the maximum number of categories pandas should output when
printing out a `Categorical` or a Series of dtype "category".
"""
pc_max_info_cols_doc = """
: int
max_info_columns is used in DataFrame.info method to decide if
per column information will be printed.
"""
pc_nb_repr_h_doc = """
: boolean
When True, IPython notebook will use html representation for
pandas objects (if it is available).
"""
pc_date_dayfirst_doc = """
: boolean
When True, prints and parses dates with the day first, eg 20/01/2005
"""
pc_date_yearfirst_doc = """
: boolean
When True, prints and parses dates with the year first, eg 2005/01/20
"""
pc_pprint_nest_depth = """
: int
Controls the number of nested levels to process when pretty-printing
"""
pc_multi_sparse_doc = """
: boolean
"sparsify" MultiIndex display (don't display repeated
elements in outer levels within groups)
"""
pc_encoding_doc = """
: str/unicode
Defaults to the detected encoding of the console.
Specifies the encoding to be used for strings returned by to_string,
these are generally strings meant to be displayed on the console.
"""
float_format_doc = """
: callable
The callable should accept a floating point number and return
a string with the desired format of the number. This is used
in some places like SeriesFormatter.
See formats.format.EngFormatter for an example.
"""
max_colwidth_doc = """
: int
The maximum width in characters of a column in the repr of
a pandas data structure. When the column overflows, a "..."
placeholder is embedded in the output.
"""
colheader_justify_doc = """
: 'left'/'right'
Controls the justification of column headers. used by DataFrameFormatter.
"""
pc_expand_repr_doc = """
: boolean
Whether to print out the full DataFrame repr for wide DataFrames across
multiple lines, `max_columns` is still respected, but the output will
wrap-around across multiple "pages" if its width exceeds `display.width`.
"""
pc_show_dimensions_doc = """
: boolean or 'truncate'
Whether to print out dimensions at the end of DataFrame repr.
If 'truncate' is specified, only print out the dimensions if the
frame is truncated (e.g. not display all rows and/or columns)
"""
pc_line_width_doc = """
: int
Deprecated.
"""
pc_east_asian_width_doc = """
: boolean
Whether to use the Unicode East Asian Width to calculate the display text
width.
Enabling this may affect to the performance (default: False)
"""
pc_ambiguous_as_wide_doc = """
: boolean
Whether to handle Unicode characters belong to Ambiguous as Wide (width=2)
(default: False)
"""
pc_latex_repr_doc = """
: boolean
Whether to produce a latex DataFrame representation for jupyter
environments that support it.
(default: False)
"""
pc_table_schema_doc = """
: boolean
Whether to publish a Table Schema representation for frontends
that support it.
(default: False)
"""
pc_html_border_doc = """
: int
A ``border=value`` attribute is inserted in the ``<table>`` tag
for the DataFrame HTML repr.
"""
pc_html_border_deprecation_warning = """\
html.border has been deprecated, use display.html.border instead
(currently both are identical)
"""
pc_width_doc = """
: int
Width of the display in characters. In case python/IPython is running in
a terminal this can be set to None and pandas will correctly auto-detect
the width.
Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a
terminal and hence it is not possible to correctly detect the width.
"""
pc_height_doc = """
: int
Deprecated.
"""
pc_chop_threshold_doc = """
: float or None
if set to a float value, all float values smaller then the given threshold
will be displayed as exactly 0 by repr and friends.
"""
pc_max_seq_items = """
: int or None
when pretty-printing a long sequence, no more then `max_seq_items`
will be printed. If items are omitted, they will be denoted by the
addition of "..." to the resulting string.
If set to None, the number of items to be printed is unlimited.
"""
pc_max_info_rows_doc = """
: int or None
df.info() will usually show null-counts for each column.
For large frames this can be quite slow. max_info_rows and max_info_cols
limit this null check only to frames with smaller dimensions than
specified.
"""
pc_large_repr_doc = """
: 'truncate'/'info'
For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can
show a truncated table (the default from 0.13), or switch to the view from
df.info() (the behaviour in earlier versions of pandas).
"""
pc_memory_usage_doc = """
: bool, string or None
This specifies if the memory usage of a DataFrame should be displayed when
df.info() is called. Valid values True,False,'deep'
"""
pc_latex_escape = """
: bool
This specifies if the to_latex method of a Dataframe uses escapes special
characters.
Valid values: False,True
"""
pc_latex_longtable = """
:bool
This specifies if the to_latex method of a Dataframe uses the longtable
format.
Valid values: False,True
"""
pc_latex_multicolumn = """
: bool
This specifies if the to_latex method of a Dataframe uses multicolumns
to pretty-print MultiIndex columns.
Valid values: False,True
"""
pc_latex_multicolumn_format = """
: string
This specifies the format for multicolumn headers.
Can be surrounded with '|'.
Valid values: 'l', 'c', 'r', 'p{<width>}'
"""
pc_latex_multirow = """
: bool
This specifies if the to_latex method of a Dataframe uses multirows
to pretty-print MultiIndex rows.
Valid values: False,True
"""
style_backup = dict()
def table_schema_cb(key):
from pandas.io.formats.printing import _enable_data_resource_formatter
_enable_data_resource_formatter(cf.get_option(key))
with cf.config_prefix('display'):
cf.register_option('precision', 6, pc_precision_doc, validator=is_int)
cf.register_option('float_format', None, float_format_doc,
validator=is_one_of_factory([None, is_callable]))
cf.register_option('column_space', 12, validator=is_int)
cf.register_option('max_info_rows', 1690785, pc_max_info_rows_doc,
validator=is_instance_factory((int, type(None))))
cf.register_option('max_rows', 60, pc_max_rows_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('max_categories', 8, pc_max_categories_doc,
validator=is_int)
cf.register_option('max_colwidth', 50, max_colwidth_doc, validator=is_int)
cf.register_option('max_columns', 20, pc_max_cols_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('large_repr', 'truncate', pc_large_repr_doc,
validator=is_one_of_factory(['truncate', 'info']))
cf.register_option('max_info_columns', 100, pc_max_info_cols_doc,
validator=is_int)
cf.register_option('colheader_justify', 'right', colheader_justify_doc,
validator=is_text)
cf.register_option('notebook_repr_html', True, pc_nb_repr_h_doc,
validator=is_bool)
cf.register_option('date_dayfirst', False, pc_date_dayfirst_doc,
validator=is_bool)
cf.register_option('date_yearfirst', False, pc_date_yearfirst_doc,
validator=is_bool)
cf.register_option('pprint_nest_depth', 3, pc_pprint_nest_depth,
validator=is_int)
cf.register_option('multi_sparse', True, pc_multi_sparse_doc,
validator=is_bool)
cf.register_option('encoding', detect_console_encoding(), pc_encoding_doc,
validator=is_text)
cf.register_option('expand_frame_repr', True, pc_expand_repr_doc)
cf.register_option('show_dimensions', 'truncate', pc_show_dimensions_doc,
validator=is_one_of_factory([True, False, 'truncate']))
cf.register_option('chop_threshold', None, pc_chop_threshold_doc)
cf.register_option('max_seq_items', 100, pc_max_seq_items)
cf.register_option('height', 60, pc_height_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('width', 80, pc_width_doc,
validator=is_instance_factory([type(None), int]))
# redirected to width, make defval identical
cf.register_option('line_width', get_default_val('display.width'),
pc_line_width_doc)
cf.register_option('memory_usage', True, pc_memory_usage_doc,
validator=is_one_of_factory([None, True,
False, 'deep']))
cf.register_option('unicode.east_asian_width', False,
pc_east_asian_width_doc, validator=is_bool)
cf.register_option('unicode.ambiguous_as_wide', False,
pc_east_asian_width_doc, validator=is_bool)
cf.register_option('latex.repr', False,
pc_latex_repr_doc, validator=is_bool)
cf.register_option('latex.escape', True, pc_latex_escape,
validator=is_bool)
cf.register_option('latex.longtable', False, pc_latex_longtable,
validator=is_bool)
cf.register_option('latex.multicolumn', True, pc_latex_multicolumn,
validator=is_bool)
cf.register_option('latex.multicolumn_format', 'l', pc_latex_multicolumn,
validator=is_text)
cf.register_option('latex.multirow', False, pc_latex_multirow,
validator=is_bool)
cf.register_option('html.table_schema', False, pc_table_schema_doc,
validator=is_bool, cb=table_schema_cb)
cf.register_option('html.border', 1, pc_html_border_doc,
validator=is_int)
with cf.config_prefix('html'):
cf.register_option('border', 1, pc_html_border_doc,
validator=is_int)
cf.deprecate_option('html.border', msg=pc_html_border_deprecation_warning,
rkey='display.html.border')
tc_sim_interactive_doc = """
: boolean
Whether to simulate interactive mode for purposes of testing
"""
with cf.config_prefix('mode'):
cf.register_option('sim_interactive', False, tc_sim_interactive_doc)
use_inf_as_null_doc = """
: boolean
use_inf_as_null had been deprecated and will be removed in a future
version. Use `use_inf_as_na` instead.
"""
use_inf_as_na_doc = """
: boolean
True means treat None, NaN, INF, -INF as NA (old way),
False means None and NaN are null, but INF, -INF are not NA
(new way).
"""
# We don't want to start importing everything at the global context level
# or we'll hit circular deps.
def use_inf_as_na_cb(key):
from pandas.core.dtypes.missing import _use_inf_as_na
_use_inf_as_na(key)
with cf.config_prefix('mode'):
cf.register_option('use_inf_as_na', False, use_inf_as_na_doc,
cb=use_inf_as_na_cb)
cf.register_option('use_inf_as_null', False, use_inf_as_null_doc,
cb=use_inf_as_na_cb)
cf.deprecate_option('mode.use_inf_as_null', msg=use_inf_as_null_doc,
rkey='mode.use_inf_as_na')
# user warnings
chained_assignment = """
: string
Raise an exception, warn, or no action if trying to use chained assignment,
The default is warn
"""
with cf.config_prefix('mode'):
cf.register_option('chained_assignment', 'warn', chained_assignment,
validator=is_one_of_factory([None, 'warn', 'raise']))
# Set up the io.excel specific configuration.
writer_engine_doc = """
: string
The default Excel writer engine for '{ext}' files. Available options:
auto, {others}.
"""
_xls_options = ['xlwt']
_xlsm_options = ['openpyxl']
_xlsx_options = ['openpyxl', 'xlsxwriter']
with cf.config_prefix("io.excel.xls"):
cf.register_option("writer", "auto",
writer_engine_doc.format(
ext='xls',
others=', '.join(_xls_options)),
validator=str)
with cf.config_prefix("io.excel.xlsm"):
cf.register_option("writer", "auto",
writer_engine_doc.format(
ext='xlsm',
others=', '.join(_xlsm_options)),
validator=str)
with cf.config_prefix("io.excel.xlsx"):
cf.register_option("writer", "auto",
writer_engine_doc.format(
ext='xlsx',
others=', '.join(_xlsx_options)),
validator=str)
# Set up the io.parquet specific configuration.
parquet_engine_doc = """
: string
The default parquet reader/writer engine. Available options:
'auto', 'pyarrow', 'fastparquet', the default is 'auto'
"""
with cf.config_prefix('io.parquet'):
cf.register_option(
'engine', 'auto', parquet_engine_doc,
validator=is_one_of_factory(['auto', 'pyarrow', 'fastparquet']))
| bsd-3-clause |
zhangmianhongni/MyPractice | Python/MachineLearning/ud120-projects-master/naive_bayes/nb_author_id.py | 1 | 1071 | #!/usr/bin/python
"""
This is the code to accompany the Lesson 1 (Naive Bayes) mini-project.
Use a Naive Bayes Classifier to identify emails by their authors
authors and labels:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
from sklearn.naive_bayes import GaussianNB
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
clf = GaussianNB()
t0 = time()
clf.fit(features_train, labels_train)
print 'training time', round(time() - t0, 3) ,'s'
t0 = time()
print clf.score(features_test, labels_test)
print 'predicting time', round(time() - t0, 3),'s'
pred = clf.predict(features_test)
print pred
#########################################################
### your code goes here ###
#########################################################
| apache-2.0 |
dssg/givinggraph | givinggraph/util/text2tfidf.py | 3 | 1772 | ''' Convert text into tf-idf.
usage: python data_loader.py TEXT_FILE
File should have one document per row. E.g. this file:
apple banana
banana cherry
results in
1:0.579739 0:0.814802
2:0.814802 1:0.579739
with a vocabulary of
[(u'apple', 0), (u'banana', 1), (u'cherry', 2)]
'''
import io
import operator
import sys
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
def text2tfidf(data_generator):
"""Transform text data into tf-idf vectors. This can be used in a
streaming fashion, so that each line is represented sparsely as its read.
data_generator .... generator of strings. This can simply be a list of
strings, or a file object.
>>> vocab, data = text2tfidf(['apple banana', 'banana cherry'])
>>> data = data.toarray()
>>> print data[0]
[ 0.81480247 0.57973867 0. ]
>>> print data[1]
[ 0. 0.57973867 0.81480247]
>>> print sorted(vocab.iteritems(), key=operator.itemgetter(1))
[(u'apple', 0), (u'banana', 1), (u'cherry', 2)]
"""
counter = CountVectorizer(min_df=0.)
data = counter.fit_transform(data_generator)
tfidf = TfidfTransformer()
data = tfidf.fit_transform(data)
return counter.vocabulary_, data
def print_sparse_matrix(data):
"""Print a sparse matrix in format <index>:<value>"""
for ri in range(data.get_shape()[0]):
row = data.getrow(ri)
print ' '.join(['%d:%g' % (i, d)
for (i, d) in zip(row.indices, row.data)])
if (__name__ == '__main__'):
if len(sys.argv) == 1:
print 'python data_loader.py TEXT_FILE'
vocab, data = text2tfidf(io.open(sys.argv[1], mode='rt', encoding='utf8'))
print_sparse_matrix(data)
| mit |
heli522/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
arjoly/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 73 | 6086 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
silky/sms-tools | lectures/06-Harmonic-model/plots-code/sines-partials-harmonics-phase.py | 22 | 1986 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/sine-440-490.wav')
w = np.hamming(3529)
N = 32768
hN = N/2
t = -20
pin = 4850
x1 = x[pin:pin+w.size]
mX1, pX1 = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX1, t)
pmag = mX1[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX1, pX1, ploc)
plt.figure(1, figsize=(9, 6))
plt.subplot(311)
plt.plot(fs*np.arange(pX1.size)/float(N), pX1, 'c', lw=1.5)
plt.plot(fs * iploc / N, ipphase, marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([200, 1000, -2, 8])
plt.title('pX + peaks (sine-440-490.wav)')
(fs, x) = UF.wavread('../../../sounds/vibraphone-C6.wav')
w = np.blackman(401)
N = 1024
hN = N/2
t = -80
pin = 200
x2 = x[pin:pin+w.size]
mX2, pX2 = DFT.dftAnal(x2, w, N)
ploc = UF.peakDetection(mX2, t)
pmag = mX2[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX2, pX2, ploc)
plt.subplot(3,1,2)
plt.plot(fs*np.arange(pX2.size)/float(N), pX2, 'c', lw=1.5)
plt.plot(fs * iploc/N, ipphase, marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([500,10000,min(pX2), 25])
plt.title('pX + peaks (vibraphone-C6.wav)')
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
w = np.blackman(651)
N = 2048
hN = N/2
t = -80
pin = 10000
x3 = x[pin:pin+w.size]
mX3, pX3 = DFT.dftAnal(x3, w, N)
ploc = UF.peakDetection(mX3, t)
pmag = mX3[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX3, pX3, ploc)
plt.subplot(3,1,3)
plt.plot(fs*np.arange(pX3.size)/float(N), pX3, 'c', lw=1.5)
plt.plot(fs * iploc / N, ipphase, marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([0,6000,2, 24])
plt.title('pX + peaks (oboe-A4.wav)')
plt.tight_layout()
plt.savefig('sines-partials-harmonics-phase.png')
plt.show()
| agpl-3.0 |
ajrichards/bayesian-examples | hypothesis-testing/binomial_prob.py | 2 | 1308 | #!/usr/bin/env python
"""
Let's say we play a game where I keep flipping a coin until I get
heads. If the first time I get heads is on the nth coin, then I pay
you 2n-1 dollars. How much would you pay me to play this game?
You should end up with a sequence that you need to find the closed
form of. If you don't know how to do this, write some python code that
sums the first 100.
E(W) = sum_{n >= 1} (2n-1)/2^n = 3
"""
import matplotlib.pyplot as plt
import numpy as np
## simulate the number of flips before heads
def coin():
tails, num_flips = True, 0
while tails:
num_flips += 1
if np.random.binomial(1,0.5):
tails = False
return num_flips
if __name__ == '__main__':
## simulate
flips = [coin() for k in xrange(10000)]
## get the distribution of counts condition on the number of flips
range_flips = range(1, max(flips) + 1)
counts = np.array([flips.count(k)*1. for k in range_flips])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.bar(range_flips,counts,alpha=0.4)
ax.set_ylabel("counts")
ax.set_xlabel("num flips to win")
#print [int(i) for i in counts]
winnings = sum([counts[k - 1]*(2*(k)-1)/sum(counts) for k in range_flips])
#print range_flips
print winnings
plt.show()
| bsd-3-clause |
tmhm/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
shruthiag96/ns3-dev-vns | src/flow-monitor/examples/wifi-olsr-flowmon.py | 108 | 7439 | # -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <[email protected]>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
mfouesneau/pyphot | pyphot/sandbox.py | 1 | 55913 | """
Sandbox of new developments
Use at your own risks
Photometric package using Astropy Units
=======================================
Defines a Filter class and associated functions to extract photometry.
This also include functions to keep libraries up to date
.. note::
integrations are done using :func:`trapz`
Why not Simpsons? Simpsons principle is to take sequence of 3 points to
make a quadratic interpolation. Which in the end, when filters have sharp
edges, the error due to this "interpolation" are extremely large in
comparison to the uncertainties induced by trapeze integration.
"""
from __future__ import print_function, division
import os
from functools import wraps
import numpy as np
import tables
from scipy.integrate import trapz
from .simpletable import SimpleTable
from .vega import Vega
from .config import libsdir
from .licks import reduce_resolution as _reduce_resolution
from .licks import LickIndex, LickLibrary
# directories
# __default__ = libsdir + '/filters.hd5'
# __default__ = libsdir + '/filters'
__default__ = libsdir + '/new_filters.hd5'
__default_lick__ = libsdir + '/licks.dat'
from .ezunits import unit as Unit
class Constants(object):
""" A namespace for constants """
# Planck's constant in erg * sec
h = 6.626075540e-27 * Unit('erg * s')
# Speed of light in cm/s
c = Unit('c').to('AA/s')
def hasUnit(val):
""" Check is an object has units """
return hasattr(val, 'unit') or hasattr(val, 'units')
class set_method_default_units(object):
""" Decorator for classmethods that makes sure that
the inputs of slamb, sflux are in given units
expects the decorated method to be defined as
>> def methodname(self, lamb, flux)
"""
def __init__(self, wavelength_unit, flux_unit, output_unit=None):
self.wavelength_unit = Unit(wavelength_unit)
self.flux_unit = Unit(flux_unit)
self.output_unit = output_unit
@classmethod
def force_units(cls, value, unit):
if unit is None:
return value
try:
return value.to(unit)
except AttributeError:
msg = 'Warning: assuming {0:s} units to unitless object.'
print(msg.format(str(unit)))
return value * unit
def __call__(self, func):
@wraps(func)
def wrapper(filter_, slamb, sflux, *args, **kwargs):
_slamb = set_method_default_units.force_units(slamb,
self.wavelength_unit)
_sflux = set_method_default_units.force_units(sflux,
self.flux_unit)
output = func(filter_, _slamb, _sflux, *args, **kwargs)
return set_method_default_units.force_units(output,
self.output_unit)
return wrapper
def _drop_units(q):
""" Drop the unit definition silently """
try:
return q.value
except AttributeError:
try:
return q.magnitude
except AttributeError:
return q
class UnitFilter(object):
""" Evolution of Filter that makes sure the input spectra and output fluxes
have units to avoid mis-interpretation.
Note the usual (non SI) units of flux definitions:
flam = erg/s/cm**2/AA
fnu = erg/s/cm**2/Hz
photflam = photon/s/cm**2/AA
photnu = photon/s/cm**2/Hz
Define a filter by its name, wavelength and transmission
The type of detector (energy or photon counter) can be specified for
adapting calculations. (default: photon)
Attributes
----------
name: str
name of the filter
cl: float
central wavelength of the filter
norm: float
normalization factor of the filter
lpivot: float
pivot wavelength of the filter
wavelength: ndarray
wavelength sequence defining the filter transmission curve
transmit: ndarray
transmission curve of the filter
dtype: str
detector type, either "photon" or "energy" counter
unit: str
wavelength units
"""
def __init__(self, wavelength, transmit, name='', dtype="photon",
unit=None):
"""Constructor"""
self.name = name
self.set_dtype(dtype)
try: # get units from the inputs
self._wavelength = wavelength.value
unit = str(wavelength.unit)
except AttributeError:
self._wavelength = wavelength
self.set_wavelength_unit(unit)
# make sure input data are ordered and cleaned of weird values.
idx = np.argsort(self._wavelength)
self._wavelength = self._wavelength[idx]
self.transmit = np.clip(transmit[idx], 0., np.nanmax(transmit))
self.norm = trapz(self.transmit, self._wavelength)
self._lT = trapz(self._wavelength * self.transmit, self._wavelength)
self._lpivot = self._calculate_lpivot()
if self.norm > 0:
self._cl = self._lT / self.norm
else:
self._cl = 0.
def _calculate_lpivot(self):
if self.transmit.max() <= 0:
return 0.
if 'photon' in self.dtype:
lpivot2 = self._lT / trapz(self.transmit / self._wavelength,
self._wavelength)
else:
lpivot2 = self.norm / trapz(self.transmit / self._wavelength ** 2,
self._wavelength)
return np.sqrt(lpivot2)
def set_wavelength_unit(self, unit):
""" Set the wavelength units """
try: # get units from the inputs
self.wavelength_unit = str(self._wavelength.unit)
except AttributeError:
self.wavelength_unit = unit
def set_dtype(self, dtype):
""" Set the detector type (photon or energy)"""
_d = dtype.lower()
if "phot" in _d:
self.dtype = "photon"
elif "ener" in _d:
self.dtype = "energy"
else:
raise ValueError('Unknown detector type {0}'.format(dtype))
def info(self, show_zeropoints=True):
""" display information about the current filter"""
msg = """Filter object information:
name: {s.name:s}
detector type: {s.dtype:s}
wavelength units: {s.wavelength_unit}
central wavelength: {s.cl:f}
pivot wavelength: {s.lpivot:f}
effective wavelength: {s.leff:f}
photon wavelength: {s.lphot:f}
minimum wavelength: {s.lmin:f}
maximum wavelength: {s.lmax:f}
norm: {s.norm:f}
effective width: {s.width:f}
fullwidth half-max: {s.fwhm:f}
definition contains {s.transmit.size:d} points"""
print(msg.format(s=self).replace('None', 'unknown'))
# zero points only if units
if (self.wavelength_unit is None) or (not show_zeropoints):
return
print("""
Zeropoints
Vega: {s.Vega_zero_mag:f} mag,
{s.Vega_zero_flux},
{s.Vega_zero_Jy}
{s.Vega_zero_photons}
AB: {s.AB_zero_mag:f} mag,
{s.AB_zero_flux},
{s.AB_zero_Jy}
ST: {s.ST_zero_mag:f} mag,
{s.ST_zero_flux},
{s.ST_zero_Jy}
""".format(s=self))
def __repr__(self):
return "Filter: {0:s}, {1:s}".format(self.name, object.__repr__(self))
@property
def wavelength(self):
""" Unitwise wavelength definition """
if self.wavelength_unit is not None:
return self._wavelength * Unit(self.wavelength_unit)
else:
return self._wavelength
@property
def lmax(self):
""" Calculated as the last value with a transmission at least 1% of
maximum transmission """
cond = (self.transmit / self.transmit.max()) > 1./100
return max(self.wavelength[cond])
@property
def lmin(self):
""" Calculate das the first value with a transmission at least 1% of
maximum transmission """
cond = (self.transmit / self.transmit.max()) > 1./100
return min(self.wavelength[cond])
@property
def width(self):
""" Effective width
Equivalent to the horizontal size of a rectangle with height equal
to maximum transmission and with the same area that the one covered by
the filter transmission curve.
W = int(T dlamb) / max(T)
"""
return (self.norm / max(self.transmit)) * Unit(self.wavelength_unit)
@property
def fwhm(self):
""" the difference between the two wavelengths for which filter
transmission is half maximum
..note::
This calculation is not exact but rounded to the nearest passband
data points
"""
vals = self.transmit / self.transmit.max() - 0.5
zero_crossings = np.where(np.diff(np.sign(vals)))[0]
lambs = self.wavelength[zero_crossings]
return np.diff(lambs)[0]
@property
def lpivot(self):
""" Unitwise wavelength definition """
if self.wavelength_unit is not None:
return self._lpivot * Unit(self.wavelength_unit)
else:
return self._lpivot
@property
def cl(self):
""" Unitwise wavelength definition """
if self.wavelength_unit is not None:
return self._cl * Unit(self.wavelength_unit)
else:
return self._cl
@property
def leff(self):
""" Unitwise Effective wavelength
leff = int (lamb * T * Vega dlamb) / int(T * Vega dlamb)
"""
with Vega() as v:
s = self.reinterp(v.wavelength)
w = s._wavelength
if s.transmit.max() > 0:
leff = np.trapz(w * s.transmit * v.flux.value, w, axis=-1)
leff /= np.trapz(s.transmit * v.flux.value, w, axis=-1)
else:
leff = float('nan')
if s.wavelength_unit is not None:
leff = leff * Unit(s.wavelength_unit)
if self.wavelength_unit is not None:
return leff.to(self.wavelength_unit)
return leff
else:
return leff
@classmethod
def _validate_sflux(cls, slamb, sflux):
""" clean data for inf in input """
_sflux = _drop_units(sflux)
_slamb = _drop_units(slamb)
if True in np.isinf(sflux):
indinf = np.where(np.isinf(_sflux))
indfin = np.where(np.isfinite(_sflux))
_sflux[indinf] = np.interp(_slamb[indinf], _slamb[indfin],
_sflux[indfin], left=0, right=0)
try:
_unit = str(sflux.unit)
return _sflux * Unit(_unit)
except AttributeError:
return _sflux
@classmethod
def _get_zero_like(cls, sflux, axis=-1):
"""return a zero value corresponding to a flux calculation on sflux"""
# _sflux = _drop_units(sflux)
# shape = _sflux.shape
# if axis < 0:
# axis = len(shape) + axis
# newshape = shape[:axis] + shape[axis + 1:]
# return np.zeros(newshape, _sflux.dtype)
return np.zeros_like(sflux).sum(axis=axis)
@property
def lphot(self):
""" Photon distribution based effective wavelength. Defined as
lphot = int(lamb ** 2 * T * Vega dlamb) / int(lamb * T * Vega dlamb)
which we calculate as
lphot = get_flux(lamb * vega) / get_flux(vega)
"""
if self.wavelength_unit is None:
raise AttributeError('Needs wavelength units')
with Vega() as v:
wave = v.wavelength.value
# Cheating units to avoid making a new filter
f_vega = self.get_flux(v.wavelength, v.flux, axis=-1)
f_lamb_vega = self.get_flux(v.wavelength, wave * v.flux, axis=-1)
f_lamb2_vega = self.get_flux(v.wavelength, wave ** 2 * v.flux,
axis=-1)
if 'photon' in self.dtype:
lphot = (f_lamb_vega / f_vega)
else:
lphot = f_lamb2_vega / f_lamb_vega
return (lphot * Unit(str(v.wavelength.unit))).to(self.wavelength_unit)
def _get_filter_in_units_of(self, slamb=None):
w = self.wavelength
if hasUnit(slamb) & hasUnit(w):
return w.to(str(slamb.unit)).value
else:
print("Warning: assuming units are consistent")
return self._wavelength
@set_method_default_units('AA', 'flam',
output_unit='photon*s**-1*cm**-2*AA**-1')
def get_Nphotons(self, slamb, sflux, axis=-1):
"""getNphot the number of photons through the filter
(Ntot / width in the documentation)
getflux() * leff / hc
Parameters
----------
slamb: ndarray(dtype=float, ndim=1)
spectrum wavelength definition domain
sflux: ndarray(dtype=float, ndim=1)
associated flux in erg/s/cm2/AA
Returns
-------
N: float
Number of photons of the spectrum within the filter
"""
passb = self.reinterp(slamb)
wave = passb._wavelength
dlambda = np.diff(wave)
# h = 6.626075540e-27 # erg * s
# c = 2.99792458e18 # AA / s
h = Constants.h.to('erg * s').value
c = Constants.c.to('AA/s').value
vals = sflux.value * wave * passb.transmit
vals[~np.isfinite(vals)] = 0.
Nphot = 0.5 * np.sum((vals[1:] + vals[:-1]) * dlambda) / (h * c)
Nphot = Nphot * Unit('photon*s**-1*cm**-2')
return Nphot / passb.width # photons / cm2 / s / A
@property
def Vega_zero_photons(self):
""" Vega number of photons per wavelength unit
.. note::
see `self.get_Nphotons`
"""
with Vega() as v:
return self.get_Nphotons(v.wavelength, v.flux)
@set_method_default_units('AA', 'flam',
output_unit='erg*s**-1*cm**-2*AA**-1')
def get_flux(self, slamb, sflux, axis=-1):
"""getFlux
Integrate the flux within the filter and return the integrated energy
If you consider applying the filter to many spectra, you might want to
consider extractSEDs.
Parameters
----------
slamb: ndarray(dtype=float, ndim=1)
spectrum wavelength definition domain
sflux: ndarray(dtype=float, ndim=1)
associated flux
Returns
-------
flux: float
Energy of the spectrum within the filter
"""
passb = self.reinterp(slamb)
ifT = passb.transmit
_slamb = _drop_units(slamb)
_sflux = _drop_units(passb._validate_sflux(slamb, sflux))
_w_unit = str(slamb.unit)
_f_unit = str(sflux.unit)
# if the filter is null on that wavelength range flux is then 0
# ind = ifT > 0.
nonzero = np.where(ifT > 0)[0]
if nonzero.size <= 0:
return passb._get_zero_like(sflux)
# avoid calculating many zeros
nonzero_start = max(0, min(nonzero) - 5)
nonzero_end = min(len(ifT), max(nonzero) + 5)
ind = np.zeros(len(ifT), dtype=bool)
ind[nonzero_start:nonzero_end] = True
if True in ind:
try:
_sflux = _sflux[:, ind]
except Exception:
_sflux = _sflux[ind]
# limit integrals to where necessary
if 'photon' in passb.dtype:
a = np.trapz(_slamb[ind] * ifT[ind] * _sflux, _slamb[ind],
axis=axis)
b = np.trapz(_slamb[ind] * ifT[ind], _slamb[ind])
a = a * Unit('*'.join((_w_unit, _f_unit, _w_unit)))
b = b * Unit('*'.join((_w_unit, _w_unit)))
elif 'energy' in passb.dtype:
a = np.trapz(ifT[ind] * _sflux, _slamb[ind], axis=axis)
b = np.trapz(ifT[ind], _slamb[ind])
a = a * Unit('*'.join((_f_unit, _w_unit)))
b = b * Unit(_w_unit)
if (np.isinf(a.value).any() | np.isinf(b.value).any()):
print(self.name, "Warn for inf value")
return a / b
else:
return passb._get_zero_like(_sflux)
def getFlux(self, slamb, sflux, axis=-1):
"""
Integrate the flux within the filter and return the integrated energy
If you consider applying the filter to many spectra, you might want to
consider extractSEDs.
Parameters
----------
slamb: ndarray(dtype=float, ndim=1)
spectrum wavelength definition domain
sflux: ndarray(dtype=float, ndim=1)
associated flux
Returns
-------
flux: float
Energy of the spectrum within the filter
"""
return self.get_flux(slamb, sflux, axis=axis)
def reinterp(self, lamb):
""" reinterpolate filter onto a different wavelength definition """
_wavelength = self._get_filter_in_units_of(lamb)
_lamb = _drop_units(lamb)
try:
_unit = str(lamb.unit)
except Exception:
_unit = self.wavelength_unit
ifT = np.interp(_lamb, _wavelength, self.transmit, left=0., right=0.)
return self.__class__(_lamb, ifT, name=self.name, dtype=self.dtype,
unit=_unit)
def __call__(self, slamb, sflux):
return self.applyTo(slamb, sflux)
def apply_transmission(self, slamb, sflux):
"""
Apply filter transmission to a spectrum (with reinterpolation of the
filter)
Parameters
----------
slamb: ndarray
spectrum wavelength definition domain
sflux: ndarray
associated flux
Returns
-------
flux: float
new spectrum values accounting for the filter
"""
_wavelength = self._get_filter_in_units_of(slamb)
_lamb = _drop_units(slamb)
ifT = np.interp(_lamb, _wavelength, self.transmit, left=0., right=0.)
return ifT * sflux
def applyTo(self, slamb, sflux):
""" For compatibility but bad name """
return self.apply_transmission(slamb, sflux)
@classmethod
def from_ascii(cls, fname, dtype='csv', **kwargs):
""" Load filter from ascii file """
lamb = kwargs.pop('lamb', None)
name = kwargs.pop('name', None)
detector = kwargs.pop('detector', 'photon')
unit = kwargs.pop('unit', None)
t = SimpleTable(fname, dtype=dtype, **kwargs)
w = t['WAVELENGTH'].astype(float)
r = t['THROUGHPUT'].astype(float)
# update properties from file header
detector = t.header.get('DETECTOR', detector)
unit = t.header.get('WAVELENGTH_UNIT', unit)
name = t.header.get('NAME', name)
# try from the comments in the header first
if name in (None, 'None', 'none', ''):
name = [k.split()[1]
for k in t.header.get('COMMENT', '').split('\n')
if 'COMPNAME' in k]
name = ''.join(name).replace('"', '').replace("'", '')
# if that did not work try the table header directly
if name in (None, 'None', 'none', ''):
name = t.header['NAME']
_filter = UnitFilter(w, r, name=name, dtype=detector, unit=unit)
# reinterpolate if requested
if lamb is not None:
_filter = _filter.reinterp(lamb)
return _filter
def write_to(self, fname, **kwargs):
""" Export filter to a file
Parameters
----------
fname: str
filename
Uses `SimpleTable.write` parameters
"""
data = self.to_Table()
data.write(fname, **kwargs)
def to_Table(self, **kwargs):
""" Export filter to a SimpleTable object
Parameters
----------
fname: str
filename
Uses `SimpleTable` parameters
"""
data = SimpleTable({'WAVELENGTH': self._wavelength,
'THROUGHPUT': self.transmit})
if self.wavelength_unit is not None:
data.header['WAVELENGTH_UNIT'] = self.wavelength_unit
data.header['DETECTOR'] = self.dtype
data.header['COMPNAME'] = str(self.name)
data.header['NAME'] = str(self.name)
data.set_comment('THROUGHPUT', 'filter throughput definition')
data.set_comment('WAVELENGTH', 'filter wavelength definition')
data.set_comment('WAVELENGTH', self.wavelength_unit or 'AA')
return data
def to_dict(self):
""" Return a dictionary of the filter """
data = {'WAVELENGTH': self._wavelength, 'THROUGHPUT': self.transmit}
if self.wavelength_unit is not None:
data['WAVELENGTH_UNIT'] = self.wavelength_unit
data['DETECTOR'] = self.dtype
data['NAME'] = self.name
data['PIVOT'] = self._lpivot
data['CENTRAL'] = self._cl
data['EFFECTIVE'] = _drop_units(self.leff)
data['NORM'] = self.norm
return data
@classmethod
def make_integration_filter(cls, lmin, lmax, name='', dtype='photon',
unit=None):
""" Generate an heavyside filter between lmin and lmax """
dyn = lmax - lmin
try:
unit = str(dyn.unit)
dyn = _drop_units(dyn)
except Exception:
pass
w = np.array([lmin - 0.01 * dyn, lmin, lmax, lmax + 0.01 * dyn])
f = np.array([0., 1., 1., 0.])
return UnitFilter(w, f, name=name, dtype=dtype, unit=unit)
@property
def AB_zero_mag(self):
""" AB magnitude zero point
ABmag = -2.5 * log10(f_nu) - 48.60
= -2.5 * log10(f_lamb) - 2.5 * log10(lpivot ** 2 / c) - 48.60
= -2.5 * log10(f_lamb) - zpts
"""
if self.wavelength_unit is None:
raise AttributeError('Needs wavelength units')
C1 = (Unit(self.wavelength_unit).to('AA') ** 2 /
Constants.c.to('AA/s').value)
c1 = self._lpivot ** 2 * C1
m = 2.5 * np.log10(_drop_units(c1)) + 48.6
return m
@property
def AB_zero_flux(self):
""" AB flux zero point in erg/s/cm2/AA """
return 10 ** (-0.4 * self.AB_zero_mag) * Unit('erg*s**-1*cm**-2*AA**-1')
@property
def AB_zero_Jy(self):
""" AB flux zero point in Jansky (Jy) """
c = 1e-8 * Constants.c.to('m/s').value
f = 1e5 / c * self.lpivot.to('AA').value ** 2 * self.AB_zero_flux.value
return f * Unit('Jy')
@property
def Vega_zero_mag(self):
""" vega magnitude zero point
vegamag = -2.5 * log10(f_lamb) + 2.5 * log10(f_vega)
vegamag = -2.5 * log10(f_lamb) - zpts
"""
flux = self.Vega_zero_flux.value
if flux > 0:
return -2.5 * np.log10(flux)
else:
return float('nan')
@property
def Vega_zero_flux(self):
""" Vega flux zero point in erg/s/cm2/AA """
with Vega() as v:
f_vega = self.get_flux(v.wavelength, v.flux, axis=-1)
return f_vega
@property
def Vega_zero_Jy(self):
""" Vega flux zero point in Jansky (Jy) """
c = 1e-8 * Constants.c.to('m/s').value
f = 1e5 / c * (self.lpivot.to('AA').value ** 2 *
self.Vega_zero_flux.to('erg*s**-1*cm**-2*AA**-1').value)
return f * Unit('Jy')
@property
def ST_zero_mag(self):
""" ST magnitude zero point
STmag = -2.5 * log10(f_lamb) -21.1
"""
return 21.1
@property
def ST_zero_flux(self):
""" ST flux zero point in erg/s/cm2/AA """
return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s**-1*cm**-2*AA**-1')
@property
def ST_zero_Jy(self):
""" ST flux zero point in Jansky (Jy) """
c = 1e-8 * Constants.c.to('m/s').value
f = 1e5 / c * self.lpivot.to('AA').value ** 2 * self.ST_zero_flux.value
return f * Unit('Jy')
class UncertainFilter(UnitFilter):
""" What could be a filter with uncertainties
Attributes
----------
wavelength: ndarray
wavelength sequence defining the filter transmission curve
mean_: Filter
mean passband transmission
samples_: sequence(Filter)
samples from the uncertain passband transmission model
name: string
name of the passband
dtype: str
detector type, either "photon" or "energy" counter
unit: str
wavelength units
"""
def __init__(self, wavelength, mean_transmit, samples,
name='', dtype='photon', unit=None):
""" Constructor """
self.mean_ = UnitFilter(wavelength, mean_transmit,
name=name, dtype=dtype, unit=unit)
self.samples_ = [UnitFilter(wavelength, transmit_k,
name=name + '_{0:d}'.format(num),
dtype=dtype, unit=unit)
for (num, transmit_k) in enumerate(samples)]
self.name = name
self.dtype = self.mean_.dtype
self.model_ = None
@classmethod
def from_gp_model(cls, model, xprime=None, n_samples=10, **kwargs):
""" Generate a filter object from a sklearn GP model
Parameters
----------
model: sklearn.gaussian_process.GaussianProcessRegressor
model of the passband
xprime: ndarray
wavelength to express the model in addition to the training points
n_samples: int
number of samples to generate from the model.
**kwawrgs: dict
UncertainFilter keywords
"""
if xprime is None:
xpred = model.X_train_
else:
xpred = np.unique(np.hstack([_drop_units(xprime),
model.X_train_.ravel()]))
xpred = xpred.reshape(1, -1).T
unit_ = kwargs.pop('unit', None)
if unit_ is None:
unit_ = str(getattr(xprime, 'units', None))
mean_transmit, _ = model.predict(xpred, return_std=True)
samples = model.sample_y(xpred, n_samples=n_samples)
unc_filter = cls(xpred.ravel(),
mean_transmit,
samples.T, unit=unit_, **kwargs)
unc_filter.model_ = model
return unc_filter
def info(self, show_zeropoints=True):
""" display information about the current filter"""
string = self.mean_.info(show_zeropoints)
string = string.replace('Filter object information',
'Filter object mean information only')
return string
def set_dtype(self, dtype):
""" Set the detector type (photon or energy)"""
self.mean_.set_dtype(dtype)
for filter_k in self.samples_:
filter_k.set_dtype(dtype)
self.dtype = self.mean_.dtype
def set_wavelength_unit(self, unit):
""" Set the wavelength units """
self.mean_.set_wavelength_unit(unit)
for filter_k in self.samples_:
filter_k.set_wavelength_unit(unit)
@property
def wavelength(self):
""" Unitwise wavelength definition """
return self.mean_.wavelength
@property
def wavelength_unit(self):
""" Unit wavelength definition """
return self.mean_.wavelength_unit
@property
def _wavelength(self):
""" Unitless wavelength definition """
return self.mean_._wavelength
@property
def transmit(self):
""" Transmission curves """
return self._get_mean_and_samples_attribute('transmit')
def _get_samples_attribute(self, attr, *args, **kwargs):
""" Returns the attribute from all samples """
try:
vals = [getattr(fk, attr)(*args, **kwargs) for fk in self.samples_]
except TypeError:
vals = [getattr(fk, attr) for fk in self.samples_]
try:
unit_ = Unit(str(vals[0].unit))
return np.array([v.value for v in vals]) * unit_
except AttributeError:
return np.array(vals)
def _get_mean_attribute(self, attr, *args, **kwargs):
""" Returns the attribute from the mean passband """
attr = getattr(self.mean_, attr)
try:
return attr(*args, **kwargs)
except TypeError:
return attr
def _get_mean_and_samples_attribute(self, attr, *args, **kwargs):
""" Compute / extract mean and smapled filter attributes
Parameters
----------
attr: str
attribute to get (can be a callable attribute)
args: sequence
any argument of attr
kwargs: dict
any keywords for attr
Returns
-------
mean_: object
value from the mean passband
samples_: sequence(object)
values from each sampled passband
"""
return (self._get_mean_attribute(attr, *args, **kwargs),
self._get_samples_attribute(attr, *args, **kwargs))
@property
def lmax(self):
""" Calculated as the last value with a transmission at least 1% of
maximum transmission """
return self._get_mean_and_samples_attribute('lmax')
@property
def lmin(self):
""" Calculate das the first value with a transmission at least 1% of
maximum transmission """
return self._get_mean_and_samples_attribute('lmin')
@property
def width(self):
""" Effective width
Equivalent to the horizontal size of a rectangle with height equal
to maximum transmission and with the same area that the one covered by
the filter transmission curve.
W = int(T dlamb) / max(T)
"""
return self._get_mean_and_samples_attribute('width')
@property
def fwhm(self):
""" the difference between the two wavelengths for which filter
transmission is half maximum
..note::
This calculation is not exact but rounded to the nearest passband
data points
"""
return self._get_mean_and_samples_attribute('fwhm')
@property
def lpivot(self):
""" Unitwise wavelength definition """
return self._get_mean_and_samples_attribute('lpivot')
@property
def cl(self):
""" Unitwise wavelength definition """
return self._get_mean_and_samples_attribute('cl')
@property
def leff(self):
""" Unitwise Effective wavelength
leff = int (lamb * T * Vega dlamb) / int(T * Vega dlamb)
"""
return self._get_mean_and_samples_attribute('leff')
@property
def lphot(self):
""" Photon distribution based effective wavelength. Defined as
lphot = int(lamb ** 2 * T * Vega dlamb) / int(lamb * T * Vega dlamb)
which we calculate as
lphot = get_flux(lamb * vega) / get_flux(vega)
"""
return self._get_mean_and_samples_attribute('lphot')
def get_Nphotons(self, slamb, sflux, axis=-1):
"""getNphot the number of photons through the filter
(Ntot / width in the documentation)
getflux() * leff / hc
Parameters
----------
slamb: ndarray(dtype=float, ndim=1)
spectrum wavelength definition domain
sflux: ndarray(dtype=float, ndim=1)
associated flux in erg/s/cm2/AA
Returns
-------
N: float
Number of photons of the spectrum within the filter
"""
mean, samples = self._get_mean_and_samples_attribute('get_Nphotons',
slamb, sflux,
axis=axis)
return mean, samples
@property
def Vega_zero_photons(self):
""" Vega number of photons per wavelength unit
.. note::
see `self.get_Nphotons`
"""
return self._get_mean_and_samples_attribute('Vega_zero_photons')
def getFlux(self, slamb, sflux, axis=-1):
"""getFlux
Integrate the flux within the filter and return the integrated energy
If you consider applying the filter to many spectra, you might want to
consider extractSEDs.
Parameters
----------
slamb: ndarray(dtype=float, ndim=1)
spectrum wavelength definition domain
sflux: ndarray(dtype=float, ndim=1)
associated flux
Returns
-------
flux: float
Energy of the spectrum within the filter
"""
mean, samples = self._get_mean_and_samples_attribute('getFlux',
slamb, sflux,
axis=axis)
return mean, samples
def reinterp(self, lamb):
""" reinterpolate filter onto a different wavelength definition """
mean, samples = self._get_mean_and_samples_attribute('reinterp')
mean_val = mean(lamb)
samp_val = [sk(mean_val.wavelength) for sk in samples]
samp_transmissions = [sk.transmit for sk in samp_val]
return self.__class__(mean_val.wavelength, mean_val.transmit,
samp_transmissions, name=self.name,
dtype=mean_val.dtype,
unit=mean_val.wavelength_unit)
def apply_transmission(self, slamb, sflux):
"""
Apply filter transmission to a spectrum
(with reinterpolation of the filter)
Parameters
----------
slamb: ndarray
spectrum wavelength definition domain
sflux: ndarray
associated flux
Returns
-------
flux: float
new spectrum values accounting for the filter
"""
mean, samples = self._get_mean_and_samples_attribute('apply_transmission')
mean_val = mean(slamb, sflux)
samp_val = [sk(slamb, sflux) for sk in samples]
return mean_val, samp_val
@property
def AB_zero_mag(self):
""" AB magnitude zero point
ABmag = -2.5 * log10(f_nu) - 48.60
= -2.5 * log10(f_lamb) - 2.5 * log10(lpivot ** 2 / c) - 48.60
= -2.5 * log10(f_lamb) - zpts
"""
return self._get_mean_and_samples_attribute('AB_zero_mag')
@property
def AB_zero_flux(self):
""" AB flux zero point in erg/s/cm2/AA """
return self._get_mean_and_samples_attribute('AB_zero_flux')
@property
def AB_zero_Jy(self):
""" AB flux zero point in Jansky (Jy) """
return self._get_mean_and_samples_attribute('AB_zero_Jy')
@property
def Vega_zero_mag(self):
""" Vega magnitude zero point
Vegamag = -2.5 * log10(f_lamb) + 2.5 * log10(f_vega)
Vegamag = -2.5 * log10(f_lamb) - zpts
"""
return self._get_mean_and_samples_attribute('Vega_zero_mag')
@property
def Vega_zero_flux(self):
""" Vega flux zero point in erg/s/cm2/AA """
return self._get_mean_and_samples_attribute('Vega_zero_flux')
@property
def Vega_zero_Jy(self):
""" Vega flux zero point in Jansky (Jy) """
return self._get_mean_and_samples_attribute('Vega_zero_Jy')
@property
def ST_zero_mag(self):
""" ST magnitude zero point
STmag = -2.5 * log10(f_lamb) -21.1
"""
return 21.1
@property
def ST_zero_flux(self):
""" ST flux zero point in erg/s/cm2/AA """
return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s-1*cm-2*AA-1')
@property
def ST_zero_Jy(self):
""" ST flux zero point in Jansky (Jy) """
return self._get_mean_and_samples_attribute('ST_zero_Jy')
def to_Table(self, **kwargs):
""" Export filter to a SimpleTable object
Parameters
----------
fname: str
filename
Uses `SimpleTable` parameters
"""
mean_transmit, transmit_ = self.transmit
data_ = {'WAVELENGTH': self._wavelength,
'THROUGHPUT': mean_transmit}
for num, filterk in enumerate(transmit_, 1):
data_['THROUGHPUT_{0:d}'.format(num)] = filterk
data = SimpleTable(data_)
if self.wavelength_unit is not None:
data.header['WAVELENGTH_UNIT'] = self.wavelength_unit
data.header['DETECTOR'] = self.dtype
data.header['COMPNAME'] = self.name
data.header['NAME'] = self.name
data.set_comment('THROUGHPUT', 'filter throughput definition')
data.set_comment('WAVELENGTH', 'filter wavelength definition')
for num in range(1, len(transmit_) + 1):
data.set_comment('THROUGHPUT_{0:d}'.format(num),
'filter throughput sample')
data.set_comment('WAVELENGTH', self.wavelength_unit or 'AA')
return data
@classmethod
def from_ascii(cls, fname, dtype='csv', **kwargs):
""" Load filter from ascii file """
lamb = kwargs.pop('lamb', None)
name = kwargs.pop('name', None)
detector = kwargs.pop('detector', 'photon')
unit_ = kwargs.pop('unit', None)
if not isinstance(fname, SimpleTable):
t = SimpleTable(fname, dtype=dtype, **kwargs)
else:
t = fname
w = t['WAVELENGTH'].astype(float)
r = t['THROUGHPUT'].astype(float)
keys = [k for k in t.keys() if 'THROUGHPUT_' in k]
# update properties from file header
detector = t.header.get('DETECTOR', detector)
unit_ = t.header.get('WAVELENGTH_UNIT', unit_)
# try from the comments in the header first
if name in (None, 'None', 'none', ''):
name = [k.split()[1]
for k in t.header.get('COMMENT', '').split('\n')
if 'COMPNAME' in k]
name = ''.join(name).replace('"', '').replace("'", '')
# if that did not work try the table header directly
if name in (None, 'None', 'none', ''):
name = t.header['NAME']
if len(keys) > 0:
samp = np.array([t[key] for key in keys])
_filter = cls(w, r, samp, name=name, dtype=detector, unit=unit_)
else:
_filter = UnitFilter(w, r, name=name, dtype=detector, unit=unit_)
# reinterpolate if requested
if lamb is not None:
_filter = _filter.reinterp(lamb)
return _filter
class UnitLibrary(object):
""" Common grounds for filter libraries """
def __init__(self, source=__default__, *args, **kwargs):
""" Construct the library """
self.source = None
def __repr__(self):
msg = "Filter Library: {0}\n{1:s}"
return msg.format(self.source, object.__repr__(self))
def __enter__(self):
""" Enter context """
return self
def __exit__(self, *exc_info):
""" end context """
return False
def __len__(self):
""" Size of the library """
return len(self.content)
def to_csv(self, directory='./', progress=True, **kwargs):
""" Export each filter into a csv file with its own name
Parameters
----------
directory: str
directory to write into
progress: bool
show progress if set
"""
from .helpers import progress_enumerate
try:
os.stat(directory)
except Exception:
os.mkdir(directory)
with self as s:
for _, k in progress_enumerate(s.content, desc='export',
show_progress=progress):
f = s[k]
if f.wavelength_unit is None:
f.wavelength_unit = 'AA'
f.write_to("{0:s}/{1:s}.csv".format(directory, f.name).lower(),
fmt="%.6f", **kwargs)
def to_hdf(self, fname='filters.hd5', progress=True, **kwargs):
""" Export each filter into a csv file with its own name
Parameters
----------
directory: str
directory to write into
progress: bool
show progress if set
"""
from .helpers import progress_enumerate
with self as s:
for _, k in progress_enumerate(s.content, desc='export',
show_progress=progress):
f = s[k]
if f.wavelength_unit is None:
f.wavelength_unit = 'AA'
f.write_to("{0:s}".format(fname),
tablename='/filters/{0}'.format(f.name),
createparents=True, append=True, silent=True,
**kwargs)
@classmethod
def from_hd5(cls, filename, **kwargs):
return UnitHDF_Library(filename, **kwargs)
@classmethod
def from_ascii(cls, filename, **kwargs):
return UnitAscii_Library(filename, **kwargs)
@property
def content(self):
""" Get the content list """
return self.get_library_content()
def __getitem__(self, name):
""" Make this object like a dictionary and load one or multiple filters
"""
with self as s:
try:
f = s._load_filter(name)
except TypeError:
f = [s._load_filter(k) for k in name]
return f
def _load_filter(self, *args, **kwargs):
""" Load a given filter from the library """
raise NotImplementedError
def get_library_content(self):
""" get the content of the library """
raise NotImplementedError
def load_all_filters(self, interp=True, lamb=None):
""" load all filters from the library """
raise NotImplementedError
def add_filter(self, f):
""" add a filter to the library """
raise NotImplementedError
def find(self, name, case_sensitive=True):
r = []
if case_sensitive:
_n = name.lower()
for k in self.get_library_content():
if _n in k.lower():
r.append(k)
else:
for k in self.content:
if name in k:
r.append(k)
return r
class UnitAscii_Library(UnitLibrary):
""" Interface one or multiple directory or many files as a filter library
>>> lib = Ascii_Library(['ground', 'hst', 'myfilter.csv'])
"""
def __init__(self, source):
self.source = source
def _load_filter(self, fname, interp=True, lamb=None, *args, **kwargs):
""" Load a given filter from the library """
try:
fil = UnitFilter.from_ascii(fname, *args, **kwargs)
except Exception:
content = self.content
r = [k for k in content if fname in k]
if len(r) <= 0: # try all lower for filenames (ascii convention)
r = [k for k in content if fname.lower() in k]
if len(r) > 1:
print("auto correction found multiple choices")
print(r)
raise ValueError('Refine name to one of {0}'.format(r))
elif len(r) <= 0:
raise ValueError('Cannot find filter {0}'.format(fname))
else:
fil = UnitFilter.from_ascii(r[0], *args, **kwargs)
if (interp is True) and (lamb is not None):
return fil.reinterp(lamb)
else:
return fil
def get_library_content(self):
""" get the content of the library """
from glob import glob
try:
os.path.isdir(self.source)
lst = glob(self.source + '/*')
except TypeError:
lst = self.source
dircheck = True
while dircheck is True:
dircheck = False
newlst = []
for entry in lst:
if os.path.isdir(entry):
newlst.extend(glob(entry + '/*'))
dircheck = True
else:
newlst.append(entry)
lst = newlst
return lst
def load_all_filters(self, interp=True, lamb=None):
""" load all filters from the library """
return [self._load_filter(k, interp=interp, lamb=lamb)
for k in self.content]
def load_filters(self, names, interp=True, lamb=None, filterLib=None):
""" load a limited set of filters
Parameters
----------
names: list[str]
normalized names according to filtersLib
interp: bool
reinterpolate the filters over given lambda points
lamb: ndarray[float, ndim=1]
desired wavelength definition of the filter
filterLib: path
path to the filter library hd5 file
Returns
-------
filters: list[filter]
list of filter objects
"""
filters = [self._load_filter(fname, interp=interp, lamb=lamb)
for fname in names]
return(filters)
def add_filters(self, filter_object, fmt="%.6f", **kwargs):
""" Add a filter to the library permanently
Parameters
----------
filter_object: Filter object
filter to add
"""
if not isinstance(filter_object, UnitFilter):
msg = "Argument of type Filter expected. Got type {0}"
raise TypeError(msg.format(type(filter_object)))
if filter_object.wavelength_unit is None:
msg = "Filter wavelength must have units for storage."
raise AttributeError(msg)
fname = "{0:s}/{1:s}.csv".format(self.source, filter_object.name)
filter_object.write_to(fname.lower(),
fmt=fmt, **kwargs)
class UnitHDF_Library(UnitLibrary):
""" Storage based on HDF """
def __init__(self, source=__default__, mode='r'):
self.source = source
self.hdf = None
self.mode = mode
def __enter__(self):
""" Enter context """
if self.hdf is None:
self.hdf = tables.open_file(self.source, self.mode)
return self
def __exit__(self, *exc_info):
""" end context """
if self.hdf is not None:
self.hdf.close()
self.hdf = None
return False
def _load_filter(self, fname, interp=True, lamb=None):
""" Load a given filter from the library
Parameters
----------
fname: str
normalized names according to filtersLib
interp: bool, optional
reinterpolate the filters over given lambda points
lamb: ndarray[float, ndim=1]
desired wavelength definition of the filter
integrationFilter: bool, optional
set True for specail integraion filter such as Qion or E_uv
if set, lamb should be given
Returns
-------
filter: Filter instance
filter object
"""
ftab = self.hdf
if hasattr(fname, 'decode'):
fnode = ftab.get_node('/filters/' + fname.decode('utf8'))
else:
fnode = ftab.get_node('/filters/' + fname)
flamb = fnode[:]['WAVELENGTH']
transmit = fnode[:]['THROUGHPUT']
dtype = 'photon'
unit = None
attrs = fnode.attrs
if 'DETECTOR' in attrs:
dtype = attrs['DETECTOR']
if 'WAVELENGTH_UNIT' in attrs:
unit = attrs['WAVELENGTH_UNIT']
fil = UnitFilter(flamb, transmit, name=fnode.name,
dtype=dtype, unit=unit)
if interp & (lamb is not None):
fil = fil.reinterp(lamb)
return fil
def get_library_content(self):
""" get the content of the library """
with self as s:
try:
filters = s.hdf.root.content.cols.TABLENAME[:]
except Exception:
filters = list(s.hdf.root.filters._v_children.keys())
if hasattr(filters[0], 'decode'):
filters = [k.decode('utf8') for k in filters]
return(filters)
def load_all_filters(self, interp=True, lamb=None):
""" load all filters from the library
Parameters
----------
interp: bool
reinterpolate the filters over given lambda points
lamb: ndarray[float, ndim=1]
desired wavelength definition of the filter
Returns
-------
filters: list[filter]
list of filter objects
"""
with self as s:
filters = [s._load_filter(fname, interp=interp, lamb=lamb)
for fname in s.content]
return(filters)
def load_filters(self, names, interp=True, lamb=None, filterLib=None):
""" load a limited set of filters
Parameters
----------
names: list[str]
normalized names according to filtersLib
interp: bool
reinterpolate the filters over given lambda points
lamb: ndarray[float, ndim=1]
desired wavelength definition of the filter
filterLib: path
path to the filter library hd5 file
Returns
-------
filters: list[filter]
list of filter objects
"""
with self as s:
filters = [s._load_filter(fname, interp=interp, lamb=lamb)
for fname in names]
return(filters)
def add_filter(self, f, **kwargs):
""" Add a filter to the library permanently
Parameters
----------
f: Filter object
filter to add
"""
if not isinstance(f, UnitFilter):
msg = "Argument of type Filter expected. Got type {0}"
raise TypeError(msg.format(type(f)))
if f.wavelength_unit is None:
msg = "Filter wavelength must have units for storage."
raise AttributeError(msg)
f.write_to("{0:s}".format(self.source),
tablename='/filters/{0}'.format(f.name),
createparents=True,
**kwargs)
def get_library(fname=__default__, **kwargs):
""" Finds the appropriate class to load the library """
if os.path.isfile(fname):
return UnitHDF_Library(fname, **kwargs)
else:
return UnitAscii_Library(fname, **kwargs)
@set_method_default_units('AA', 'flam', output_unit='flam')
def reduce_resolution(wi, fi, fwhm0=0.55 * Unit('AA'),
sigma_floor=0.2 * Unit('AA')):
""" Adapt the resolution of the spectra to match the lick definitions
Lick definitions have different resolution elements as function
of wavelength. These definition are hard-coded in this function
Parameters
---------
wi: ndarray (n, )
wavelength definition
fi: ndarray (nspec, n) or (n, )
spectra to convert
fwhm0: float
initial broadening in the spectra `fi`
sigma_floor: float
minimal dispersion to consider
Returns
-------
flux_red: ndarray (nspec, n) or (n, )
reduced spectra
"""
flux_red = _reduce_resolution(wi.value, fi.value,
fwhm0.to('AA').value,
sigma_floor.to('AA').value)
return flux_red * Unit('flam')
class UnitLickIndex(LickIndex):
""" Define a Lick Index similarily to a Filter object """
@set_method_default_units('AA', 'flam')
def get(self, wave, flux, **kwargs):
""" compute spectral index after continuum subtraction
Parameters
----------
w: ndarray (nw, )
array of wavelengths in AA
flux: ndarray (N, nw)
array of flux values for different spectra in the series
degree: int (default 1)
degree of the polynomial fit to the continuum
nocheck: bool
set to silently pass on spectral domain mismatch.
otherwise raises an error when index is not covered
Returns
-------
ew: ndarray (N,)
equivalent width or magnitude array
Raises
------
ValueError: when the spectral coverage wave does not cover the index
range
"""
return LickIndex.get(self, wave, flux.to('flam').value, **kwargs)
class UnitLickLibrary(LickLibrary):
""" Collection of Lick indices """
def __init__(self, fname=__default_lick__, comment='#'):
self.source = fname
data, hdr = self._read_lick_list(fname, comment)
self._content = data
self._hdr = hdr
@property
def description(self):
""" any comment in the input file """
return self._hdr
@classmethod
def _read_lick_list(cls, fname=__default__, comment='#'):
""" read the list of lick indices
Parameters
----------
fname: str
file containing the indices' definitions
comment: str
character indicating comment in the file
Returns
-------
data: dict
dictionary of indices
name: (band, blue, red, unit)
"""
with open(fname, 'r') as f:
data = {}
hdr = []
for line in f:
if line[0] != comment:
_line = line.split()
attr = dict(
band=(float(_line[1]), float(_line[2])),
blue=(float(_line[3]), float(_line[4])),
red=(float(_line[5]), float(_line[6])),
unit='mag' if int(_line[7]) > 0 else 'ew',
)
name = _line[8]
data[name] = attr
else:
hdr.append(line[1:-1])
return data, hdr
def __repr__(self):
return "Lick Index Library: {0}\n{1:s}".format(self.source,
object.__repr__(self))
def __enter__(self):
""" Enter context """
return self
def __exit__(self, *exc_info):
""" end context """
return False
def __len__(self):
""" Size of the library """
return len(self.content)
def get_library_content(self):
return list(self._content.keys())
def __getitem__(self, name):
""" Make this object like a dictionary and load one or multiple filters
"""
with self as s:
try:
f = s._load_filter(name)
except TypeError:
f = [s._load_filter(k) for k in name]
return f
def _load_filter(self, fname, **kwargs):
""" Load a given filter from the library """
with self as current_lib:
return UnitLickIndex(fname, current_lib._content[fname])
@property
def content(self):
return self.get_library_content()
def find(self, name, case_sensitive=True):
r = []
if not case_sensitive:
_n = name.lower()
for k in self.get_library_content():
if _n in k.lower():
r.append(k)
else:
for k in self.content:
if name in k:
r.append(k)
return r
| mit |
vasudevk/sklearn_pycon2015 | notebooks/fig_code/sgd_separator.py | 54 | 1148 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
def plot_sgd_separator():
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2,
random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01,
n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
ax = plt.axes()
ax.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
ax.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
ax.axis('tight')
if __name__ == '__main__':
plot_sgd_separator()
plt.show()
| bsd-3-clause |
yask123/scikit-learn | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
markomanninen/gematria | gematria/html.py | 1 | 3182 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
# file: html.py
from IPython.display import HTML
import pandas as pd
from remarkuple import helper as h, table
from math import digital_root, digital_sum
from main import to_roman, to_hebrew, gematria, unicode_gematria
def _init_text(text, capitalize = None):
return text.decode('utf-8')
def char_table_data(text, modulo = 9):
# initialize data dictionary, split text to columns: #, letter, translit, num, sum, mod, word
data = dict([key, []] for key in ['letter', 'transliteration', 'gematria', 'word'])
# character chart
# split words
for word in _init_text(text).split():
# split letters
for idx, letter in enumerate(word):
#data['index'].append(idx)
data['letter'].append(letter)
data['transliteration'].append(to_roman(letter.encode('utf-8')))
data['gematria'].append(gematria(letter.encode('utf-8')))
data['word'].append(word)
data = pd.DataFrame(data)
# word summary from character chart
gb = data.groupby('word')
data2 = gb.sum()
data2['characters'] = gb['word'].apply(len)
data2['digital_sum'] = data2['gematria'].apply(digital_sum)
data2['digital_root'] = data2['gematria'].apply(digital_root)
# phrase summary from word summary
s = data2.sum()
data3 = pd.DataFrame({'digital_root': [digital_root(s.gematria)],
'characters': [s.characters],
'digital_sum': [digital_sum(s.gematria)],
'gematria': [s.gematria],
'phrase': text})
return (data, data2, data3)
def char_table(text, modulo = 9):
# initialize html table
tbl = table(Class="char-table")
# add caption / table title
tbl.addCaption(text)
# add data rows
tr1 = h.tr() # hebrew letters
tr2 = h.tr() # roman letters
tr3 = h.tr() # gematria number
tr4 = h.tr() # summary
i = 0
text = unicode(text, encoding="utf-8")
for word in text.split():
if i > 0:
# add empty cells for word separation
tr1 += h.th(" ")
tr1 += h.th(" ")
tr2 += h.td()
tr2 += h.td(Class="empty-cell")
tr3 += h.td()
tr3 += h.td(Class="empty-cell")
tr4 += h.td()
tr4 += h.td()
num = unicode_gematria(word)
tr4 += h.td("%s %s" % (num, h.sub(digital_root(num))), colspan=len(word))
i = i+1
# add each letter on own cell
for letter in word:
tr1 += h.th(letter.encode('utf-8'))
tr2 += h.td(to_roman(letter.encode('utf-8')))
tr3 += h.td(unicode_gematria(letter))
# add rows to table
tbl.addHeadRow(tr1)
tbl.addBodyRow(tr2)
tbl.addBodyRow(tr3)
tbl.addFootRow(tr4)
# add summary footer for table
num = unicode_gematria(text)
tbl.addFootRow(h.tr(h.td("%s %s" % (num, h.sub(digital_sum(num), " / ", digital_root(num, modulo))),
colspan=len(text)+len(text.split()),
style="border-top: solid 1px #ddd")))
return tbl | mit |
TzivakiM/LocationFactorsCode | deposition.py | 1 | 30850 | # deposited nuclides
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import csv
from cycler import cycler
#Plot settings for black and white. If color is wanted, comment out!
#monochrome = (cycler('color', ['k']) * cycler('marker', ['', '.']) *
# cycler('linestyle', ['-', '--', ':', '-.']))
#plt.rc('axes', prop_cycle=monochrome)
mark_step_num=0.1
def main(nuclide,T,timestep,graphoption,totdosetimeoption,doseratetimeoption):
#def main(nuclide,T,timestep,graphoption):
#define variables:
#the conversion factors from Petoussi-Henss for radionuclides detected as ground contamination after the Fukushima accident
#[adult, child, child4-7, infant]
e_dotCs137 = [10950., 11738.4, 11738.4, 14804.4]
e_dotCs134 = [30309.,32412., 32412., 40558.8]
t_halfCs137 = 30.1871 #from NIST
t_halfCs134 = 2.0654 #from NIST
#this is the same numbers used in USNCEAR 2013, based on measurements in europe after chernobyl
T1 = 1.5
T2 = 50
p1 = 0.5
p2 = 0.5
e_dot = [e_dotCs137,e_dotCs134]
t_half = [t_halfCs137, t_halfCs134]
nuclidename = ['Cs-137','Cs-134']
#Sets the parameters for the plotting
filetype = 'png'
if totdosetimeoption[0] == 1:
totdoseplottype = 'log'
elif totdosetimeoption[0] == 0:
totdoseplottype = 'linear'
else:
print 'Something went wrong with the total dose plot option'
if doseratetimeoption[0] == 1:
doserateplottype = 'linear'
elif doseratetimeoption[0] == 0:
doserateplottype = 'log'
else:
print 'Something went wrong with the dose rate plot option'
for l in range(len(nuclide)):
if nuclide[l]==1:
#timestep in years: the interval at which the calculations will be performed
numberT = int(T/timestep)
#The number of timesteps for the first 5 years of life
numberT5years = int(5./timestep)
#The number of timesteps for 10 years
numberT10years = int(10./timestep)
#the number of timesteps for the first 15 years of life
numberT15years = int(15./timestep)
#the number of timesteps for the first 15 years of life
def e_dep(t):
e_dep = e_dot[l][0] * ( (p1/((math.log(2)/t_half[l])+(math.log(2)/T1)))* (1-math.exp(-t*((math.log(2)/t_half[l])+(math.log(2)/T1)))) + (p2/((math.log(2)/t_half[l])+(math.log(2)/T2)))* (1-math.exp(-t*((math.log(2)/t_half[l])+(math.log(2)/T2)))))
return e_dep
IntegratedDose = e_dep(T)
#The following two functions are helper functions for further calculations. They don't have any real application or meaning.
def e_depChild(t):
e_dep = e_dot[l][1] * ( (p1/((math.log(2)/t_half[l])+(math.log(2)/T1)))* (1-math.exp(-t*((math.log(2)/t_half[l])+(math.log(2)/T1)))) + (p2/((math.log(2)/t_half[l])+(math.log(2)/T2)))* (1-math.exp(-t*((math.log(2)/t_half[l])+(math.log(2)/T2)))))
return e_dep
IntegratedDoseChild = e_depChild(T)
#next block to be taken out for paper
def e_depChild4(t):
e_dep = e_dot[l][2] * ( (p1/((math.log(2)/t_half[l])+(math.log(2)/T1)))* (1-math.exp(-t*((math.log(2)/t_half[l])+(math.log(2)/T1)))) + (p2/((math.log(2)/t_half[l])+(math.log(2)/T2)))* (1-math.exp(-t*((math.log(2)/t_half[l])+(math.log(2)/T2)))))
return e_dep
IntegratedDoseChild4 = e_depChild4(T)
def e_depInfant(t):
e_dep = e_dot[l][3] * ( (p1/((math.log(2)/t_half[l])+(math.log(2)/T1)))* (1-math.exp(-t*((math.log(2)/t_half[l])+(math.log(2)/T1)))) + (p2/((math.log(2)/t_half[l])+(math.log(2)/T2)))* (1-math.exp(-t*((math.log(2)/t_half[l])+(math.log(2)/T2)))))
return e_dep
IntegratedDoseInfant = e_depInfant(T)
#variables for f_build [wood, woodFireproof, concrete]
a1 = [0.2, 0.1, 0.05]
a2 = [0.2, 0.1, 0.05]
T_build = [1.8, 1.8, 1.8]
#occupancy factors [new, out,in,child10,child1]
OFhard = [0.1, 0.2, 0.05, 0.05, 0.1]
OFdirt = [0.1, 0.1, 0.05, 0.1, 0.1]
OFbuild = [0.8, 0.7, 0.9, 0.85, 0.8]
#define variables for lists of location factors
time = []
f_hard = []
f_dirt = []
f_build = [[],[],[]]
#simplified location factor TODO: need to be able to be defined!
f_newOUT = 1.
f_newIN = 0.1
#define dose lists
eDepInt = []
eDepIntChild = []
eDepIntChild4 = []
eDepIntInfant = []
eDepStep = []
eDepStepChild = []
eDepStepChild4 = []
eDepStepInfant = []
#calculate the development of location factors over time and the stepwise dose: calculation of dose at each time and then subtraction from dose at previous timestep
for i in range(numberT+1):
#location factor equations hard surface and unpaved surface:
time.append(i*timestep)
f_hard.append(0.5 * math.exp(-(time[-1]*math.log(2)/0.9))+0.1)
f_dirt.append(0.5 * math.exp(-(time[-1]*math.log(2)/2.2))+0.25)
for j in range(len(f_build)):
#[wood, woodFireproof, concrete]
f_build[j].append(a1[j] * math.exp(-time[-1]*math.log(2)/T_build[j])+a2[j])
#integrated dose (that means for every time it is the sum from all previous doses). this is the equation given in UNSCEAR for e_dep
eDepInt.append(e_dep(time[-1]))
eDepIntChild.append(e_depChild(time[-1]))
eDepIntChild4.append(e_depChild4(time[-1]))
eDepIntInfant.append(e_depInfant(time[-1]))
#Dose in every timestep: Subtract dose from previous time from the current integrated dose value
if time[-1] == 0.0:
eDepStep.append(eDepInt[-1])
eDepStepChild.append(eDepIntChild[-1])
eDepStepChild4.append(eDepIntChild4[-1])
eDepStepInfant.append(eDepIntInfant[-1])
else:
eDepStep.append(eDepInt[-1] - eDepInt[-2])
eDepStepChild.append(eDepIntChild[-1] - eDepIntChild[-2])
eDepStepChild4.append(eDepIntChild4[-1] - eDepIntChild4[-2])
eDepStepInfant.append(eDepIntInfant[-1] - eDepIntInfant[-2])
#to test if the last element of the integrated dose is equal to the sum of the timestep doses
testDoseStep = sum(eDepStep)
if testDoseStep == eDepInt[-1]:
print 'Integrated and stepwise dose match for '+str(nuclidename[l]) +'!'
else:
print 'There has been a mistake...'
#Make everything into np arrays
time = np.array(time)
f_hard = np.array(f_hard)
f_dirt = np.array(f_dirt)
#Location factor for buildings: [wood, woodFireproof, concrete]
f_build = np.array(f_build)
eDepInt = np.array(eDepInt)
eDepIntChild = np.array(eDepIntChild)
eDepIntChild4 = np.array(eDepIntChild4)
eDepIntInfant = np.array(eDepIntInfant)
eDepStep = np.array(eDepStep)
eDepStepChild = np.array(eDepStepChild)
eDepStepChild4 = np.array(eDepStepChild4)
eDepStepInfant = np.array(eDepStepInfant)
OFhard = np.array(OFhard)
OFdirt = np.array(OFdirt)
OFbuild = np.array(OFbuild)
TotDoseStepOLD = []
TotDoseIntOLD = []
TotDoseStepNEW = []
TotDoseIntNEW = []
#Index explanation:
#n=0 : NEW
#n=1 : outdoor worker
#n=2 : indoor worker
#n=3 : child 10 years old
#n=4 : child 1 year old
#m=0 : wood house 1-3 storeys
#m=1 : wood house 1-3 storeys fireproof
#m=2 : concrete house multi-storey
#Calculate dose values taking into account location factors
#for integrated dose
#in the case of UNSCEAR2013
for n in range(len(OFhard)):
#[new,out,in,child10,child1]
if n==0:
#print len(eDepStep)
TotDoseStepNEW = np.array(OFhard[n]*f_newOUT*eDepStep+OFdirt[n]*f_newOUT*eDepStep+OFbuild[n]*f_newIN*eDepStep)
for k in range(len(TotDoseStepNEW)):
if k==0:
TotDoseIntNEW.append(TotDoseStepNEW[k])
elif k>0:
TotDoseIntNEW.append(TotDoseStepNEW[k]+TotDoseIntNEW[k-1])
elif n>0 and n<3:
for m in range(len(f_build)):
#[wood, woodFireproof, concrete]
TotDoseStepOLD.append(np.array(OFhard[n]*f_hard*eDepStep+OFdirt[n]*f_dirt*eDepStep+OFbuild[n]*f_build[m]*eDepStep))
#print "Length is " + str(len(TotDoseStepOLD))
#print len(TotDoseStepOLD[0])
#print "f_hard: " + str(len(f_hard))
#print "eDepStep: " + str(len(eDepStep))
TotDoseIntOLD.append([])
for k in range(len(TotDoseStepOLD[0])):
#print len(TotDoseStepOLD[0])
if k==0:
TotDoseIntOLD[-1].append(TotDoseStepOLD[-1][k])
elif k>0:
TotDoseIntOLD[-1].append(TotDoseStepOLD[-1][k]+TotDoseIntOLD[-1][k-1])
#Calculation for a Child
elif n==3:
for m in range(len(f_build)):
#[wood, woodFireproof, concrete]
TotDoseStepOLD.append(np.array(OFhard[n]*f_hard*eDepStepChild+OFdirt[n]*f_dirt*eDepStep+OFbuild[n]*f_build[m]*eDepStep))
TotDoseIntOLD.append([])
for k in range(0, min(numberT10years,len(TotDoseStepOLD[0]))):
if k==0:
TotDoseIntOLD[-1].append(TotDoseStepOLD[-1][k])
elif k>0:
TotDoseIntOLD[-1].append(TotDoseStepOLD[-1][k]+TotDoseIntOLD[-1][k-1])
for k in range(numberT10years, len(TotDoseStepOLD[0])):
if m==0:
TotDoseIntOLD[-1].append(TotDoseStepOLD[3][k]+TotDoseIntOLD[-1][k-1])
elif m==1:
TotDoseIntOLD[-1].append(TotDoseStepOLD[4][k]+TotDoseIntOLD[-1][k-1])
elif m==2:
TotDoseIntOLD[-1].append(TotDoseStepOLD[5][k]+TotDoseIntOLD[-1][k-1])
else:
print 'There has been a mistake when calculating child dose'
#Calculation for an infant
elif n==4:
for m in range(len(f_build)):
#[wood, woodFireproof, concrete]
TotDoseStepOLD.append(np.array(OFhard[n]*f_hard*eDepStepInfant+OFdirt[n]*f_dirt*eDepStep+OFbuild[n]*f_build[m]*eDepStep))
TotDoseIntOLD.append([])
for k in range(0, min(numberT5years,len(TotDoseStepOLD[0]))):
if k==0:
TotDoseIntOLD[-1].append(TotDoseStepOLD[-1][k])
elif k>0:
TotDoseIntOLD[-1].append(TotDoseStepOLD[-1][k]+TotDoseIntOLD[-1][k-1])
for k in range(numberT5years, min(numberT15years, len(TotDoseStepOLD[0]))):
# This is for the age between 5 and 15: Use child dose
if m==0:
TotDoseIntOLD[-1].append(TotDoseStepOLD[6][k]+TotDoseIntOLD[-1][k-1])
elif m==1:
TotDoseIntOLD[-1].append(TotDoseStepOLD[7][k]+TotDoseIntOLD[-1][k-1])
elif m==2:
TotDoseIntOLD[-1].append(TotDoseStepOLD[8][k]+TotDoseIntOLD[-1][k-1])
else:
print 'There has been a mistake when calculating infant dose between 5 and 15 years'
for k in range(numberT15years, len(TotDoseStepOLD[0])):
#This is for the age 15 and over
if m==0:
TotDoseIntOLD[-1].append(TotDoseStepOLD[3][k]+TotDoseIntOLD[-1][k-1])
elif m==1:
TotDoseIntOLD[-1].append(TotDoseStepOLD[4][k]+TotDoseIntOLD[-1][k-1])
elif m==2:
TotDoseIntOLD[-1].append(TotDoseStepOLD[5][k]+TotDoseIntOLD[-1][k-1])
else:
print 'There has been a mistake when calculating infant dose over 15 years'
#all raw dose values are in nSv/kBq/m^2 or 10^(-12)Sv/Bq
#Make everything into numpy arrays and adjust units
#in E-12 Sv/Bq/m^2
TotDoseStepNEW = np.array(TotDoseStepNEW)
TotDoseStepOLD = np.array(TotDoseStepOLD)
#in E-9 Sv/Bq/m^2
TotDoseIntNEW = np.array(TotDoseIntNEW)
TotDoseIntNEW = 0.001 * TotDoseIntNEW
TotDoseIntOLD = np.array(TotDoseIntOLD)
TotDoseIntOLD = 0.001 * TotDoseIntOLD
#make a directory for the graps and files
script_dir = os.path.dirname(__file__)
plots_dir = os.path.join(script_dir, 'plots/'+str(nuclidename[l])+str(T))
results_dir = os.path.join(script_dir, 'files/'+str(nuclidename[l])+str(T))
if not os.path.isdir(plots_dir):
os.makedirs(plots_dir)
if not os.path.isdir(results_dir):
os.makedirs(results_dir)
#Make an output file with all the data and input information: highligts
#handy helper arrays and variables for plotting and output:
Buildings = ["1-3 storey house, wood", "1-3 storey house, fireproof wood","multi-storey house, concrete"]
OutTime = [0.2, 0.84, 1., 2.]
OutTime = np.array(OutTime)
OutTimeText = ['1 week', '1 month', '1 year', '2 years']
annualDoseTime = 100.
annualDoseTimeStart = annualDoseTime-1.
outfile = open("files/"+ str(nuclidename[l])+str(T)+"/Summary"+str(nuclidename[l])+"_"+str(T)+"years.txt",'w')
outfile.write("Summary of results for doses and inputs\n")
outfile.write("\ne_dep after " +str(T)+" years, before the application of reduction factors\n")
outfile.write('{:} {:} {:}\n'.format('Adult','Child (10 years)','Infant (1 year)'))
outfile.write('{:} {:} {:}\n'.format(IntegratedDose,IntegratedDoseChild,IntegratedDoseInfant))
outfile.write("\nEffective dose [nSv/Bq/$m^2] after " +str(T)+" years\n")
outfile.write('{:}'.format('Simplified methodology'))
outfile.write('\n{:}'.format(TotDoseIntNEW[-1]))
outfile.write('\n{:} {:} {:} {:}'.format('Outdoor worker','Indoor worker','Child (10 years)', 'Infant (1 year)'))
for a in range(len(Buildings)):
outfile.write("\n"+str(Buildings[a]))
outfile.write('\n{:} {:} {:} {:}'.format(TotDoseIntOLD[a][-1], TotDoseIntOLD[a+3][-1], TotDoseIntOLD[a+6][-1], TotDoseIntOLD[a+9][-1]))
for c in range(len(OutTime)):
if OutTime[c] <= T:
outfile.write("\n\nEffective dose [nSv/Bq/$m^2] after "+str(OutTimeText[c])+"\n (actual time " +str(time[time<0.02][-1])+" years)")
outfile.write('\n{:}'.format('Simplified methodology'))
outfile.write('\n{:}'.format(TotDoseIntNEW[time<OutTime[c]][-1]))
outfile.write('\n{:} {:} {:} {:}'.format('Outdoor worker','Indoor worker','Child (10 years)', 'Infant (1 year)'))
for a in range(len(Buildings)):
outfile.write("\n"+str(Buildings[a]))
outfile.write('\n{:} {:} {:} {:}'.format(TotDoseIntOLD[a][time<OutTime[c]][-1], TotDoseIntOLD[a+3][time<OutTime[c]][-1], TotDoseIntOLD[a+6][time<OutTime[c]][-1], TotDoseIntOLD[a+9][time<OutTime[c]][-1]))
elif OutTime[c] >= T:
print "could not calculate the annual dose for " +str(OutTime[c]) + "years."
for b in range(len(OutTime)):
if OutTime[b] <= T:
outfile.write("\n\nDose rate [nSv/Bq/$m^2/day] after "+str(OutTimeText[b])+"\n (actual time " +str(time[time<0.02][-1])+" years)")
outfile.write('\n{:}'.format('Simplified methodology'))
outfile.write('\n{:}'.format(TotDoseStepNEW[time<OutTime[b]][-1]))
outfile.write('\n{:} {:} {:} {:}'.format('Outdoor worker','Indoor worker','Child (10 years)', 'Infant (1 year)'))
for a in range(len(Buildings)):
outfile.write("\n"+str(Buildings[a]))
outfile.write('\n{:} {:} {:} {:}'.format(TotDoseStepOLD[a][time<OutTime[b]][-1], TotDoseStepOLD[a+3][time<OutTime[b]][-1], TotDoseStepOLD[a+6][time<OutTime[b]][-1], TotDoseStepOLD[a+9][time<OutTime[b]][-1]))
elif OutTime[b] >= T:
print "could not calculate the dose rate after " +str(OutTime[b]) + "years."
if annualDoseTime <= T:
outfile.write("\n\nEffective dose [nSv/Bq/$m^2] in year "+str(annualDoseTime))
outfile.write('\n{:}'.format('Simplified methodology'))
outfile.write('\n{:}'.format(TotDoseIntNEW[time<annualDoseTime][-1]-TotDoseIntNEW[time<(annualDoseTimeStart)][-1]))
outfile.write('\n{:} {:} {:} {:}'.format('Outdoor worker','Indoor worker','Child (10 years)', 'Infant (1 year)'))
for a in range(len(Buildings)):
outfile.write("\n"+str(Buildings[a]))
outfile.write('\n{:} {:} {:} {:}'.format(TotDoseIntOLD[a][time<annualDoseTime][-1]-TotDoseIntOLD[a][time<(annualDoseTimeStart)][-1], TotDoseIntOLD[a+3][time<annualDoseTime][-1]-TotDoseIntOLD[a+3][time<(annualDoseTimeStart)][-1], TotDoseIntOLD[a+6][time<annualDoseTime][-1]-TotDoseIntOLD[a+6][time<(annualDoseTimeStart)][-1], TotDoseIntOLD[a+9][time<annualDoseTime][-1]-TotDoseIntOLD[a+9][time<(annualDoseTimeStart)][-1]))
elif annualDoseTime >= T:
print "could not calculate the annual dose in year " +str(annualDoseTime)
outfile.close()
#Make an output file with all the data and input information: Location factors over time
outfile = open("files/"+ str(nuclidename[l])+str(T)+"/LocationFactors"+str(nuclidename[l])+"_"+str(T)+"years.txt",'w')
outfile.write("#Output file location factors over time as used in UNSCEAR 2013\n")
outfile.write('#{:} {:} {:} {:} {:} {:}\n'.format('Time [y]','f_build','f_hard','f_build(wood)','f_build(woodFireproof)','f_build(concrete)'))
for i in range(len(time)):
outfile.write("{:} {:} {:} {:} {:} {:}\n".format(time[i],f_hard[i],f_dirt[i],f_build[0][i],f_build[1][i],f_build[2][i]))
outfile.close()
#Make an output file with all the data and input information: Integrated Dose
outfile = open("files/"+str(nuclidename[l])+str(T)+"/depositionTotalDoseOutput_"+str(nuclidename[l])+"_"+str(T)+"years.txt",'w')
outfile.write("#Output file for comparison of UNSCEAR 2013 and UNSCEAR 2016 methodology for calculating dose due to deposition\n")
#outfile.write("#List of input variables:\n")
outfile.write('#Effective dose rate coefficient for {:} = {:} nSv/kBq/m^2/y\n'.format(nuclidename[l],e_dot[l][0]))
outfile.write('#{:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:}\n'.format('Time [y]', 'D_tot(simplified)[nSv/Bq/$m^2]', 'D_tot(wood_outdoorW)', 'D_tot(wood_indoorW)','D_tot(wood_Child10)','D_tot(wood_Child1)','D_tot(woodFireproof_outdoorW)', 'D_tot(woodFireproof_indoorW)','D_tot(woodFireproof_Child10)','D_tot(woodFireproof_Child1)','D_tot(concrete_outdoorW)', 'D_tot(concrete_indoorW)','D_tot(concrete_Child10)','D_tot(concrete_Child1)'))
for i in range(len(time)):
outfile.write("{:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:}\n".format(time[i],TotDoseIntNEW[i],TotDoseIntOLD[0][i],TotDoseIntOLD[3][i],TotDoseIntOLD[6][i],TotDoseIntOLD[9][i],TotDoseIntOLD[1][i],TotDoseIntOLD[4][i],TotDoseIntOLD[7][i],TotDoseIntOLD[10][i],TotDoseIntOLD[2][i],TotDoseIntOLD[5][i],TotDoseIntOLD[8][i],TotDoseIntOLD[11][i]))
outfile.close()
#Make an output file with all the data and input information: Dose Rate
outfile = open("files/"+str(nuclidename[l])+str(T)+"/depositionDoseRateOutput_"+str(nuclidename[l])+"_"+str(T)+"years.txt",'w')
outfile.write("#Output file for comparison of UNSCEAR 2013 and UNSCEAR 2016 methodology for calculating dose due to deposition\n")
#outfile.write("#List of input variables:\n")
outfile.write('#Effective dose rate coefficient for {:} = {:} nSv/kBq/m^2/y\n'.format(nuclidename[l],e_dot[l][0]))
outfile.write('#{:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:}\n'.format('Time [y]', 'D_rate(simplified)[nSv/Bq/$m^2/day]', 'D_rate(wood_outdoorW)', 'D_rate(wood_indoorW)','D_rate(wood_Child10)','D_rate(wood_Child1)','D_rate(woodFireproof_outdoorW)', 'D_rate(woodFireproof_indoorW)','D_rate(woodFireproof_Child10)','D_rate(woodFireproof_Child1)','D_rate(concrete_outdoorW)', 'D_rate(concrete_indoorW)','D_rate(concrete_Child10)','D_rate(concrete_Child1)'))
for i in range(len(time)):
outfile.write("{:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:} {:}\n".format(time[i],TotDoseStepNEW[i],TotDoseStepOLD[0][i],TotDoseStepOLD[3][i],TotDoseStepOLD[6][i],TotDoseStepOLD[9][i],TotDoseStepOLD[1][i],TotDoseStepOLD[4][i],TotDoseStepOLD[7][i],TotDoseStepOLD[10][i],TotDoseStepOLD[2][i],TotDoseStepOLD[5][i],TotDoseStepOLD[8][i],TotDoseStepOLD[11][i]))
outfile.close()
#Plot all graphs
#location factors
plt.plot(time,f_hard, label=r'$f_{hard}$')
plt.plot(time,f_dirt, label=r'$f_{dirt}$')
plt.plot(time, f_build[0], label=r'$f_{build, wood}$')
plt.plot(time, f_build[1], label=r'$f_{builf, fireproof}$')
plt.plot(time, f_build[2], label=r'$f_{build,concrete}$',markevery=mark_step_num)
plt.title('Location factors')
plt.ylabel('Location factor')
plt.xlabel('Time [y]')
plt.xscale('log')
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/LocationFactorsOverTime'+'.'+str(filetype))
#plt.show()
plt.close()
#make custom plots
# 'Select variables to plot:'
if graphoption[0]==1:
#wood house
plt.plot(time,TotDoseIntNEW, label = 'UNSCEAR 2016')
plt.plot(time, TotDoseIntOLD[0], label = 'Outdoor worker')
plt.plot(time, TotDoseIntOLD[3], label = 'Indoor worker')
plt.plot(time, TotDoseIntOLD[6], label = 'Child (10 years)')
plt.plot(time, TotDoseIntOLD[9], label = 'Child (1 year)',markevery=mark_step_num)
plt.title(r'Effective dose ($e_{dep}$) due to ' + str(nuclidename[l]) + ' deposition, \n 1-3 storey house, wood. ')
plt.ylabel(r'Effective Dose [$10^{-9}$ Sv/Bq/$m^2$]')
plt.xlabel('Time [y]')
plt.xscale(str(totdoseplottype))
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/IntDoseWood'+str(nuclidename[l]) +'.'+str(filetype))
#plt.show()
plt.close()
#wood house, fireproof
plt.plot(time,TotDoseIntNEW, label = 'UNSCEAR 2016')
plt.plot(time, TotDoseIntOLD[1], label = 'Outdoor worker')
plt.plot(time, TotDoseIntOLD[4], label = 'Indoor worker')
plt.plot(time, TotDoseIntOLD[7], label = 'Child (10 years)')
plt.plot(time, TotDoseIntOLD[10], label = 'Child (1 year)',markevery=mark_step_num)
plt.title(r'Effective dose ($e_{dep}$) due to ' + str(nuclidename[l]) + ' deposition, \n 1-3 storey house, fireproof wood. ')
plt.ylabel(r'Effective Dose [$10^{-9}$ Sv/Bq/$m^2$]')
plt.xlabel('Time [y]')
plt.xscale(str(totdoseplottype))
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/IntDoseFireproofWood'+str(nuclidename[l]) +'.'+str(filetype))
#plt.show()
plt.close()
#concrete house, multi-storey
plt.plot(time,TotDoseIntNEW, label = 'UNSCEAR 2016')
plt.plot(time, TotDoseIntOLD[2], label = 'Outdoor worker')
plt.plot(time, TotDoseIntOLD[5], label = 'Indoor worker')
plt.plot(time, TotDoseIntOLD[8], label = 'Child (10 years)')
plt.plot(time, TotDoseIntOLD[11], label = 'Child (1 year)',markevery=mark_step_num)
plt.title(r'Effective dose ($e_{dep}$) due to ' + str(nuclidename[l]) + ' deposition, \n multi-storey house, concrete. ')
plt.ylabel(r'Effective Dose [$10^{-9}$ Sv/Bq/$m^2$]')
plt.xlabel('Time [y]')
plt.xscale(str(totdoseplottype))
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/IntDoseConcrete'+str(nuclidename[l]) +'.'+str(filetype))
#plt.show()
plt.close()
#Dose rates
#wood house
plt.plot(time,(timestep/(1/365.))*TotDoseStepNEW, label = 'UNSCEAR 2016')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[0], label = 'Outdoor worker')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[3], label = 'Indoor worker')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[6], label = 'Child (10 years)')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[9], label = 'Child (1 year)',markevery=mark_step_num)
plt.title(r'Dose rate due to ' + str(nuclidename[l]) + ' deposition, \n 1-3 storey house, wood. ')
plt.ylabel(r'Dose rate [$10^{-9}$ Sv/Bq/$m^2$/day]')
plt.xlabel('Time [y]')
plt.xscale(str(doserateplottype))
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/DoseRateWood'+str(nuclidename[l]) +'.'+str(filetype))
#plt.show()
plt.close()
#wood house, fireproof
plt.plot(time,(timestep/(1/365.))*TotDoseStepNEW, label = 'UNSCEAR 2016')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[1], label = 'Outdoor worker')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[4], label = 'Indoor worker')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[7], label = 'Child (10 years)')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[10], label = 'Child (1 year)',markevery=mark_step_num)
plt.title(r'Dose rate due to ' + str(nuclidename[l]) + ' deposition, \n 1-3 storey house, fireproof wood. ')
plt.ylabel(r'Dose rate [$10^{-9}$ Sv/Bq/$m^2$/day]')
plt.xlabel('Time [y]')
plt.xscale(str(doserateplottype))
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/DoseRateFireproofWood'+str(nuclidename[l]) +'.'+str(filetype))
#plt.show()
plt.close()
#concrete
plt.plot(time,(timestep/(1/365.))*TotDoseStepNEW , label = 'UNSCEAR 2016')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[2], label = 'Outdoor worker')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[5], label = 'Indoor worker')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[8], label = 'Child (10 years)')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[11], label = 'Child (1 year)',markevery=mark_step_num)
plt.title(r'Dose rate due to ' + str(nuclidename[l]) + ' deposition, \n multi-storey house, concrete. ')
plt.ylabel(r'Dose rate [$10^{-9}$ Sv/Bq/$m^2$/day]')
plt.xlabel('Time [y]')
plt.xscale(str(doserateplottype))
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/DoseRateConcrete'+str(nuclidename[l]) +'.'+str(filetype))
#plt.show()
plt.close()
if graphoption[1]==1:
#Total doses
#Outdoor worker
plt.plot(time,TotDoseIntNEW , label = 'UNSCEAR 2016')
plt.plot(time, TotDoseIntOLD[0], label = '1-3 storey house, wood')
plt.plot(time, TotDoseIntOLD[1], label = '1-3 storey house, fireproof wood')
plt.plot(time, TotDoseIntOLD[2], label = 'multi-storey house, concrete)')
plt.title(r'Effective dose ($e_{dep}$) due to ' + str(nuclidename[l]) + ' deposition, \n Outdoor worker. ')
plt.ylabel(r'Effective Dose [$10^{-9}$ Sv/Bq/$m^2$]')
plt.xlabel('Time [y]')
plt.xscale(str(totdoseplottype))
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/IntDoseOutdoorWorker'+str(nuclidename[l]) +'.'+str(filetype))
#plt.show()
plt.close()
#Indoor worker
plt.plot(time,TotDoseIntNEW , label = 'UNSCEAR 2016')
plt.plot(time, TotDoseIntOLD[3], label = '1-3 storey house, wood')
plt.plot(time, TotDoseIntOLD[4], label = '1-3 storey house, fireproof wood')
plt.plot(time, TotDoseIntOLD[5], label = 'multi-storey house, concrete)')
plt.title(r'Effective dose ($e_{dep}$) due to ' + str(nuclidename[l]) + ' deposition, \n Indoor worker. ')
plt.ylabel(r'Effective Dose [$10^{-9}$ Sv/Bq/$m^2$]')
plt.xlabel('Time [y]')
plt.xscale(str(totdoseplottype))
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/IntDoseIndoorWorker'+str(nuclidename[l]) +'.'+str(filetype))
#plt.show()
plt.close()
#Child 10 years
plt.plot(time,TotDoseIntNEW , label = 'UNSCEAR 2016')
plt.plot(time, TotDoseIntOLD[6], label = '1-3 storey house, wood')
plt.plot(time, TotDoseIntOLD[7], label = '1-3 storey house, fireproof wood')
plt.plot(time, TotDoseIntOLD[8], label = 'multi-storey house, concrete)')
plt.title(r'Effective dose ($e_{dep}$) due to ' + str(nuclidename[l]) + ' deposition, \n Child (10 years). ')
plt.ylabel(r'Effective Dose [$10^{-9}$ Sv/Bq/$m^2$]')
plt.xlabel('Time [y]')
plt.xscale(str(totdoseplottype))
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/IntDoseChild10yr'+str(nuclidename[l]) +'.'+str(filetype))
#plt.show()
plt.close()
#Child 1 year
plt.plot(time,TotDoseIntNEW , label = 'UNSCEAR 2016')
plt.plot(time, TotDoseIntOLD[9], label = '1-3 storey house, wood')
plt.plot(time, TotDoseIntOLD[10], label = '1-3 storey house, fireproof wood')
plt.plot(time, TotDoseIntOLD[11], label = 'multi-storey house, concrete)')
plt.title(r'Effective dose ($e_{dep}$) due to ' + str(nuclidename[l]) + ' deposition, \n Child (1 year). ')
plt.ylabel(r'Effective Dose [$10^{-9}$ Sv/Bq/$m^2$]')
plt.xlabel('Time [y]')
plt.xscale(str(totdoseplottype))
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/IntDoseChild1yr'+str(nuclidename[l]) +'.'+str(filetype))
#plt.show()
plt.close()
#dose rates
#Outdoor worker
plt.plot(time,(timestep/(1/365.))*TotDoseStepNEW , label = 'UNSCEAR 2016')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[0], label = '1-3 storey house, wood')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[1], label = '1-3 storey house, fireproof wood')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[2], label = 'multi-storey house, concrete)')
plt.title(r'Dose rate due to ' + str(nuclidename[l]) + ' deposition, \n Outdoor worker. ')
plt.ylabel(r'Dose rate [$10^{-9}$ Sv/Bq/$m^2$/day]')
plt.xlabel('Time [y]')
plt.xscale(str(doserateplottype))
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/DoseRateOutdoorWorker'+str(nuclidename[l]) +'.'+str(filetype))
#plt.show()
plt.close()
#Indoor worker
plt.plot(time,(timestep/(1/365.))*TotDoseStepNEW , label = 'UNSCEAR 2016')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[3], label = '1-3 storey house, wood')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[4], label = '1-3 storey house, fireproof wood')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[5], label = 'multi-storey house, concrete)')
plt.title(r'Dose rate due to ' + str(nuclidename[l]) + ' deposition, \n Indoor worker. ')
plt.ylabel(r'Dose rate [$10^{-9}$ Sv/Bq/$m^2$/day]')
plt.xlabel('Time [y]')
plt.xscale(str(doserateplottype))
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/DoseRateIndoorWorker'+str(nuclidename[l]) +'.'+str(filetype))
#plt.show()
plt.close()
#Child 10 years
plt.plot(time,(timestep/(1/365.))*TotDoseStepNEW , label = 'UNSCEAR 2016')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[6], label = '1-3 storey house, wood')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[7], label = '1-3 storey house, fireproof wood')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[8], label = 'multi-storey house, concrete)')
plt.title(r'Dose rate due to ' + str(nuclidename[l]) + ' deposition, \n Child (10 years). ')
plt.ylabel(r'Dose rate [$10^{-9}$ Sv/Bq/$m^2$/day]')
plt.xlabel('Time [y]')
plt.xscale(str(doserateplottype))
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/DoseRateChild10yr'+str(nuclidename[l]) +'.'+str(filetype))
#plt.show()
plt.close()
#Child 1 year
plt.plot(time,(timestep/(1/365.))*TotDoseStepNEW , label = 'UNSCEAR 2016')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[9], label = '1-3 storey house, wood')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[10], label = '1-3 storey house, fireproof wood')
plt.plot(time, (timestep/(1/365.))*TotDoseStepOLD[11], label = 'multi-storey house, concrete)')
plt.title(r'Dose rate due to ' + str(nuclidename[l]) + ' deposition, \n Child (1 year). ')
plt.ylabel(r'Dose rate [$10^{-9}$ Sv/Bq/$m^2$/day]')
plt.xlabel('Time [y]')
plt.xscale(str(doserateplottype))
plt.legend(loc ='best')
plt.savefig('plots/'+str(nuclidename[l])+str(T)+'/DoseRateChild1yr'+str(nuclidename[l]) +'.'+str(filetype))
#plt.show()
plt.close()
elif nuclide[l]==0:
print 'Skipping ' + str(nuclidename[l]) + ' since it was not selected!'
return
| gpl-3.0 |
evgchz/scikit-learn | sklearn/pipeline.py | 12 | 16522 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
# One round of beers on me if someone finds out why the backslash
# is needed in the Attributes section so as not to upset sphinx.
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Parameters
----------
steps: list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
"""
# BaseEstimator interface
def __init__(self, steps):
self.named_steps = dict(steps)
names, estimators = zip(*steps)
if len(self.named_steps) != len(steps):
raise ValueError("Names provided are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(zip(names, estimators))
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps a the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps.copy()
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator."""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
def predict_log_proba(self, X):
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform."""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
def inverse_transform(self, X):
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
sangwook236/general-development-and-testing | sw_dev/python/rnd/test/machine_learning/saliency_test.py | 2 | 12493 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# REF [site] >> https://github.com/PAIR-code/saliency
import os, time
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import saliency
from matplotlib import pylab as plt
import PIL.Image
# Boilerplate methods.
def ShowImage(im, title='', ax=None):
if ax is None:
plt.figure()
plt.axis('off')
im = ((im + 1) * 127.5).astype(np.uint8)
plt.imshow(im)
plt.title(title)
def ShowGrayscaleImage(im, title='', ax=None):
if ax is None:
plt.figure()
plt.axis('off')
plt.imshow(im, cmap=plt.cm.gray, vmin=0, vmax=1)
plt.title(title)
def ShowHeatMap(im, title, ax=None):
if ax is None:
plt.figure()
plt.axis('off')
plt.imshow(im, cmap=plt.cm.inferno)
plt.title(title)
def ShowDivergingImage(grad, title='', percentile=99, ax=None):
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
plt.axis('off')
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
im = ax.imshow(grad, cmap=plt.cm.coolwarm, vmin=-1, vmax=1)
fig.colorbar(im, cax=cax, orientation='vertical')
plt.title(title)
def LoadImage(file_path):
im = PIL.Image.open(file_path)
im = np.asarray(im)
return im / 127.5 - 1.0
# REF [site] >> https://github.com/PAIR-code/saliency/blob/master/Examples.ipynb
def simple_example():
#--------------------
mnist = input_data.read_data_sets('./MNIST_data', one_hot=True)
#--------------------
# Define a model.
num_classes = 10
input_shape = (None, 28, 28, 1) # 784 = 28 * 28.
output_shape = (None, num_classes)
input_ph = tf.placeholder(tf.float32, shape=input_shape, name='input_ph')
output_ph = tf.placeholder(tf.float32, shape=output_shape, name='output_ph')
with tf.variable_scope('conv1', reuse=tf.AUTO_REUSE):
conv1 = tf.layers.conv2d(input_ph, 32, 5, activation=tf.nn.relu, name='conv')
conv1 = tf.layers.max_pooling2d(conv1, 2, 2, name='maxpool')
with tf.variable_scope('conv2', reuse=tf.AUTO_REUSE):
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu, name='conv')
conv2 = tf.layers.max_pooling2d(conv2, 2, 2, name='maxpool')
with tf.variable_scope('fc1', reuse=tf.AUTO_REUSE):
fc1 = tf.layers.flatten(conv2, name='flatten')
fc1 = tf.layers.dense(fc1, 1024, activation=tf.nn.relu, name='dense')
with tf.variable_scope('fc2', reuse=tf.AUTO_REUSE):
model_output = tf.layers.dense(fc1, num_classes, activation=tf.nn.softmax, name='dense')
#--------------------
# Train.
loss = tf.reduce_mean(-tf.reduce_sum(output_ph * tf.log(model_output), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print('Start training...')
start_time = time.time()
for _ in range(2000):
batch_xs, batch_ys = mnist.train.next_batch(512)
batch_xs = np.reshape(batch_xs, (-1,) + input_shape[1:])
sess.run(train_step, feed_dict={input_ph: batch_xs, output_ph: batch_ys})
if 0 == idx % 100: print('.', end='', flush=True)
print()
print('End training: {} secs.'.format(time.time() - start_time))
#--------------------
# Evaluate.
correct_prediction = tf.equal(tf.argmax(model_output, 1), tf.argmax(output_ph, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Start testing...')
acc = sess.run(accuracy, feed_dict={input_ph: np.reshape(mnist.test.images, (-1,) + input_shape[1:]), output_ph: mnist.test.labels})
print('Test accuracy = {}.'.format(acc))
print('End testing: {} secs.'.format(time.time() - start_time))
if acc < 0.95:
print('Failed to train...')
return
#--------------------
# Visualize.
images = np.reshape(mnist.test.images, (-1,) + input_shape[1:])
img = images[0]
minval, maxval = np.min(img), np.max(img)
img_scaled = np.squeeze((img - minval) / (maxval - minval), axis=-1)
# Construct the scalar neuron tensor.
logits = model_output
neuron_selector = tf.placeholder(tf.int32)
y = logits[0][neuron_selector]
# Construct a tensor for predictions.
prediction = tf.argmax(logits, 1)
# Make a prediction.
prediction_class = sess.run(prediction, feed_dict={input_ph: [img]})[0]
#--------------------
start_time = time.time()
saliency_obj = saliency.Occlusion(sess.graph, sess, y, input_ph)
print('Occlusion: {} secs.'.format(time.time() - start_time))
# NOTE [info] >> An error exists in GetMask() of ${Saliency_HOME}/saliency/occlusion.py.
# <before>
# occlusion_window = np.array([size, size, x_value.shape[2]])
# occlusion_window.fill(value)
# <after>
# occlusion_window = np.full([size, size, x_value.shape[2]], value)
mask_3d = saliency_obj.GetMask(img, feed_dict={neuron_selector: prediction_class})
# Compute a 2D tensor for visualization.
mask_gray = saliency.VisualizeImageGrayscale(mask_3d)
mask_div = saliency.VisualizeImageDiverging(mask_3d)
fig = plt.figure()
ax = plt.subplot(1, 3, 1)
ax.imshow(img_scaled, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Input')
ax = plt.subplot(1, 3, 2)
ax.imshow(mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Grayscale')
ax = plt.subplot(1, 3, 3)
ax.imshow(mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Diverging')
fig.suptitle('Occlusion', fontsize=16)
fig.tight_layout()
#plt.savefig('./saliency_occlusion.png')
plt.show()
#--------------------
start_time = time.time()
conv_layer = sess.graph.get_tensor_by_name('conv2/conv/BiasAdd:0')
saliency_obj = saliency.GradCam(sess.graph, sess, y, input_ph, conv_layer)
print('GradCam: {} secs.'.format(time.time() - start_time))
mask_3d = saliency_obj.GetMask(img, feed_dict={neuron_selector: prediction_class})
# Compute a 2D tensor for visualization.
mask_gray = saliency.VisualizeImageGrayscale(mask_3d)
mask_div = saliency.VisualizeImageDiverging(mask_3d)
fig = plt.figure()
ax = plt.subplot(1, 3, 1)
ax.imshow(img_scaled, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Input')
ax = plt.subplot(1, 3, 2)
ax.imshow(mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Grayscale')
ax = plt.subplot(1, 3, 3)
ax.imshow(mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Diverging')
fig.suptitle('Grad-CAM', fontsize=16)
fig.tight_layout()
#plt.savefig('./saliency_gradcam.png')
plt.show()
#--------------------
start_time = time.time()
saliency_obj = saliency.GradientSaliency(sess.graph, sess, y, input_ph)
print('GradientSaliency: {} secs.'.format(time.time() - start_time))
vanilla_mask_3d = saliency_obj.GetMask(img, feed_dict={neuron_selector: prediction_class})
smoothgrad_mask_3d = saliency_obj.GetSmoothedMask(img, feed_dict={neuron_selector: prediction_class})
# Compute a 2D tensor for visualization.
vanilla_mask_gray = saliency.VisualizeImageGrayscale(vanilla_mask_3d)
smoothgrad_mask_gray = saliency.VisualizeImageGrayscale(smoothgrad_mask_3d)
vanilla_mask_div = saliency.VisualizeImageDiverging(vanilla_mask_3d)
smoothgrad_mask_div = saliency.VisualizeImageDiverging(smoothgrad_mask_3d)
fig = plt.figure()
ax = plt.subplot(2, 3, 1)
ax.imshow(img_scaled, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Input')
ax = plt.subplot(2, 3, 2)
ax.imshow(vanilla_mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Vanilla Grayscale')
ax = plt.subplot(2, 3, 3)
ax.imshow(smoothgrad_mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('SmoothGrad Grayscale')
ax = plt.subplot(2, 3, 5)
ax.imshow(vanilla_mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Vanilla Diverging')
ax = plt.subplot(2, 3, 6)
ax.imshow(smoothgrad_mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('SmoothGrad Diverging')
fig.suptitle('Gradient Saliency', fontsize=16)
fig.tight_layout()
#plt.savefig('./saliency_gradientsaliency.png')
plt.show()
#--------------------
start_time = time.time()
saliency_obj = saliency.GuidedBackprop(sess.graph, sess, y, input_ph)
print('GuidedBackprop: {} secs.'.format(time.time() - start_time))
vanilla_mask_3d = saliency_obj.GetMask(img, feed_dict={neuron_selector: prediction_class})
smoothgrad_mask_3d = saliency_obj.GetSmoothedMask(img, feed_dict={neuron_selector: prediction_class})
# Compute a 2D tensor for visualization.
vanilla_mask_gray = saliency.VisualizeImageGrayscale(vanilla_mask_3d)
smoothgrad_mask_gray = saliency.VisualizeImageGrayscale(smoothgrad_mask_3d)
vanilla_mask_div = saliency.VisualizeImageDiverging(vanilla_mask_3d)
smoothgrad_mask_div = saliency.VisualizeImageDiverging(smoothgrad_mask_3d)
fig = plt.figure()
ax = plt.subplot(2, 3, 1)
ax.imshow(img_scaled, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Input')
ax = plt.subplot(2, 3, 2)
ax.imshow(vanilla_mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Vanilla Grayscale')
ax = plt.subplot(2, 3, 3)
ax.imshow(smoothgrad_mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('SmoothGrad Grayscale')
ax = plt.subplot(2, 3, 4)
ax.imshow(vanilla_mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Vanilla Diverging')
ax = plt.subplot(2, 3, 5)
ax.imshow(smoothgrad_mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('SmoothGrad Diverging')
fig.suptitle('Guided Backprop', fontsize=16)
fig.tight_layout()
#plt.savefig('./saliency_guidedbackprop.png')
plt.show()
#--------------------
start_time = time.time()
saliency_obj = saliency.IntegratedGradients(sess.graph, sess, y, input_ph)
print('IntegratedGradients: {} secs.'.format(time.time() - start_time))
vanilla_mask_3d = saliency_obj.GetMask(img, feed_dict={neuron_selector: prediction_class})
smoothgrad_mask_3d = saliency_obj.GetSmoothedMask(img, feed_dict={neuron_selector: prediction_class})
# Compute a 2D tensor for visualization.
vanilla_mask_gray = saliency.VisualizeImageGrayscale(vanilla_mask_3d)
smoothgrad_mask_gray = saliency.VisualizeImageGrayscale(smoothgrad_mask_3d)
vanilla_mask_div = saliency.VisualizeImageDiverging(vanilla_mask_3d)
smoothgrad_mask_div = saliency.VisualizeImageDiverging(smoothgrad_mask_3d)
fig = plt.figure()
ax = plt.subplot(2, 3, 1)
ax.imshow(img_scaled, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Input')
ax = plt.subplot(2, 3, 2)
ax.imshow(vanilla_mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Vanilla Grayscale')
ax = plt.subplot(2, 3, 3)
ax.imshow(smoothgrad_mask_gray, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('SmoothGrad Grayscale')
ax = plt.subplot(2, 3, 4)
ax.imshow(vanilla_mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Vanilla Diverging')
ax = plt.subplot(2, 3, 5)
ax.imshow(smoothgrad_mask_div, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('SmoothGrad Diverging')
fig.suptitle('Integrated Gradients', fontsize=16)
fig.tight_layout()
#plt.savefig('./saliency_integratedgradients.png')
plt.show()
#--------------------
start_time = time.time()
xrai_obj = saliency.XRAI(sess.graph, sess, y, input_ph)
print('XRAI: {} secs.'.format(time.time() - start_time))
if True:
xrai_attributions = xrai_obj.GetMask(img, feed_dict={neuron_selector: prediction_class})
else:
# Create XRAIParameters and set the algorithm to fast mode which will produce an approximate result.
xrai_params = saliency.XRAIParameters()
xrai_params.algorithm = 'fast'
xrai_attributions_fast = xrai_obj.GetMask(img, feed_dict={neuron_selector: prediction_class}, extra_parameters=xrai_params)
# Show most salient 30% of the image.
mask = xrai_attributions > np.percentile(xrai_attributions, 70)
img_masked = img_scaled.copy()
img_masked[~mask] = 0
fig = plt.figure()
ax = plt.subplot(1, 3, 1)
ax.imshow(img_scaled, cmap=plt.cm.gray, vmin=0, vmax=1)
ax.axis('off')
ax.set_title('Input')
ax = plt.subplot(1, 3, 2)
ax.imshow(xrai_attributions, cmap=plt.cm.inferno)
ax.axis('off')
ax.set_title('XRAI Attributions')
ax = plt.subplot(1, 3, 3)
ax.imshow(img_masked, cmap=plt.cm.gray)
ax.axis('off')
ax.set_title('Masked Input')
fig.suptitle('XRAI', fontsize=16)
fig.tight_layout()
#plt.savefig('./saliency_xrai.png')
plt.show()
def main():
simple_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
bmorris3/numpy | numpy/lib/polynomial.py | 82 | 37957 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond :
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
return not self.__eq__(other)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(
"'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| bsd-3-clause |
eclee25/flu-SDI-exploratory-age | scripts/ORincid_allweeks.py | 1 | 4225 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 9/15/13
###Function: OR by week and child & adult incidence by week with two axes
#### Allow us to compare the OR values by the magnitude of infection in children and adults
###Import data: OR_allweeks.csv
###Command Line: python
##############################################
### notes ###
### packages/modules ###
import csv
import numpy as np
import matplotlib.pyplot as plt
import sys
## local modules ##
import ORgenerator as od
### data structures ###
# ilidict[(week, age marker)] = ILI
# wkdict[week] = seasonnum
# weeks = list of unique weeks in the data
# ORdict[week] = OR
# ageARdict[week] = (child attack rate per 100,000, adult attack rate per 100,000)
### parameters ###
USchild = 20348657 + 20677194 + 22040343 #US child popn from 2010 Census
USadult = 21585999 + 21101849 + 19962099 + 20179642 + 20890964 + 22708591 + 22298125 + 19664805 #US adult popn from 2010 Census
seasons = range(2,11) #seasons for which ORs will be generated
### plotting settings ###
xlabels = range(40,54)
xlabels.extend(range(1,40))
ORmarker = 'o'
incidmarker = '^'
ORcol = 'black'
chcol = 'red'
adcol = 'blue'
### functions ###
### import data ###
datain=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks.csv','r')
data=csv.reader(datain, delimiter=',')
ilidict, wkdict, weeks = od.import_dwk(data, 0, 1, 2, 3)
ORdict, ageARdict = od.ORincid_wk(ilidict, weeks)
# OR and attack rate chart (two axes) for each season
for s in seasons:
# wkdummy will represent list of weeks for chart in season to use as key for OR dict
wkdummy = [key for key in sorted(weeks) if wkdict[key] == int(s)]
wkdummy = set(wkdummy) # wkdummy needs to be sorted bc dict values don't have order when pulling dict values in list comprehension
# create two y-axes
fig, yax_OR = plt.subplots()
yax_AR = yax_OR.twinx()
# for seasons with 53 weeks (season 5 only)
if len(wkdummy) == 53:
## OR y-axis
chartORs = [ORdict[wk] for wk in sorted(wkdummy)]
chartwks = xrange(len(sorted(wkdummy)))
OR, = yax_OR.plot(chartwks, chartORs, marker = ORmarker, color = ORcol, label = "Odds Ratio", lw = 4, ms = 8)
## incidence y-axis (one line each for child and adult AR)
c_AR = [ageARdict[wk][0] for wk in sorted(wkdummy)]
a_AR = [ageARdict[wk][1] for wk in sorted(wkdummy)]
child, = yax_AR.plot(chartwks, c_AR, marker = incidmarker, color = chcol, label = 'Child Attack Rate', lw = 3, ms = 8)
adult, = yax_AR.plot(chartwks, a_AR, marker = incidmarker, color = adcol, label = 'Adult Attack Rate', lw = 3, ms = 8)
## designate legend labels
lines = [OR, child, adult]
yax_OR.legend(lines, [l.get_label() for l in lines], loc = 'upper right')
# for seasons with 52 weeks
else:
## OR y-axis
chartORs = [ORdict[wk] for wk in sorted(wkdummy)]
avg53 = (chartORs[12] + chartORs[13])/2
chartORs.insert(13, avg53)
chartwks = xrange(len(sorted(wkdummy)) + 1)
OR, = yax_OR.plot(chartwks, chartORs, marker = ORmarker, color = ORcol, label = "Odds Ratio", lw = 4, ms = 8)
## incidence y-axis
c_AR = [ageARdict[wk][0] for wk in sorted(wkdummy)]
a_AR = [ageARdict[wk][1] for wk in sorted(wkdummy)]
avgc = (c_AR[12] + c_AR[13])/2
avga = (a_AR[12] + a_AR[13])/2
c_AR.insert(13, avgc)
a_AR.insert(13, avga)
child, = yax_AR.plot(chartwks, c_AR, marker = incidmarker, color = chcol, label = 'Child Incidence Rate', lw = 3, ms = 8)
adult, = yax_AR.plot(chartwks, a_AR, marker = incidmarker, color = adcol, label = 'Adult Incidence Rate', lw = 3, ms = 8)
## designate legend labels
lines = [OR, child, adult]
yax_OR.legend(lines, [l.get_label() for l in lines], loc = 'upper right')
## separate flu and off seasons
plt.plot([33, 33], [0, 100], color = 'k', linewidth = 1)
## plot settings
yax_OR.set_xlabel('Week Number, Season ' + str(s), fontsize=24)
yax_OR.set_ylim([0, 10])
yax_OR.set_ylabel('OR, child:adult', fontsize=24)
yax_AR.set_ylim([0, 100])
yax_AR.set_yticks(xrange(0,110,10))
yax_AR.set_ylabel('Incidence Rate per 100,000', fontsize=24)
plt.xlim([0, 33])
plt.xticks(xrange(33), xlabels[:33])
plt.show()
| mit |
logpai/logparser | logparser/SLCT/SLCT.py | 1 | 7250 | """
Description: This file implements a wrapper around the original SLCT code in C
Author: LogPAI team
License: MIT
"""
import sys
sys.path.append('../')
import hashlib
import pandas as pd
import re
from datetime import datetime
from ..logmatch import regexmatch
import subprocess
import os
class LogParser(object):
def __init__(self, indir, outdir, log_format, support, para_j=True, saveLog=False, rex=[]):
self.outdir = outdir
self.log_format = log_format
self.rex = rex
self.para = {}
self.para['dataPath'] = indir
self.para['para_j'] = para_j
self.para['savePath'] = outdir
self.para['support'] = support
self.para['saveLog'] = saveLog
def parse(self, logname):
self.para['dataName'] = logname
SLCT(self.para, self.log_format, self.rex)
def SLCT(para, log_format, rex):
startTime = datetime.now() # start timing
logname = os.path.join(para['dataPath'], para['dataName'])
print("Parsing file: {}".format(logname))
# SLCT compilation
if not os.path.isfile('../SLCT/slct'):
try:
print('Compile SLCT...\n>> gcc -o ../logparser/SLCT/slct -O2 ../logparser/SLCT/cslct.c')
subprocess.check_output('gcc -o ../logparser/SLCT/slct -O2 ../logparser/SLCT/cslct.c',
stderr=subprocess.STDOUT, shell=True)
except:
print("Compile error! Please check GCC installed.\n")
raise
headers, regex = generate_logformat_regex(log_format)
df_log = log_to_dataframe(logname, regex, headers, log_format)
# Generate input file
with open('slct_input.log', 'w') as fw:
for line in df_log['Content']:
if rex:
for currentRex in rex:
line = re.sub(currentRex, '<*>', line)
fw.write(line + '\n')
# Run SLCT command
SLCT_command = extract_command(para, "slct_input.log")
try:
print ("Run SLCT...\n>> {}".format(SLCT_command))
subprocess.check_call(SLCT_command, shell=True)
except:
print("SLCT executable is invalid! Please compile it using GCC.\n")
raise
# Collect and dump templates
tempParameter = TempPara(path = "./", savePath=para['savePath'], logname="slct_input.log")
tempProcess(tempParameter)
matcher = regexmatch.PatternMatch(outdir=para['savePath'], logformat=log_format)
matched_df = matcher.match(logname, "temp_templates.csv")
# sys.exit()
os.remove("slct_input.log")
os.remove("slct_outliers.log")
os.remove("slct_templates.txt")
os.remove("temp_templates.csv")
for idx, line in matched_df.iterrows():
if line['EventTemplate'] == "None":
content = line['Content']
matched_df.loc[idx, "EventTemplate"] = content
matched_df.loc[idx, "EventId"] = hashlib.md5(content.encode('utf-8')).hexdigest()[0:8]
occ_dict = dict(matched_df['EventTemplate'].value_counts())
df_event = pd.DataFrame()
df_event['EventTemplate'] = matched_df['EventTemplate'].unique()
df_event['EventId'] = df_event['EventTemplate'].map(lambda x: hashlib.md5(x.encode('utf-8')).hexdigest()[0:8])
df_event['Occurrences'] = df_event['EventTemplate'].map(occ_dict)
df_event.to_csv(os.path.join(para['savePath'], para['dataName'] + "_templates.csv"), index=False, columns=["EventId", "EventTemplate", "Occurrences"])
matched_df.to_csv(os.path.join(para['savePath'], para['dataName'] + "_structured.csv"), index=False)
print('Parsing done. [Time: {!s}]'.format(datetime.now() - startTime))
def extract_command(para, logname):
support = para['support']
parajTF = para['para_j']
input = ''
if parajTF:
input = '../logparser/SLCT/slct -j -o ' + 'slct_outliers.log -r -s ' + str(support) + ' ' + logname
else:
input = '../logparser/SLCT/slct -o ' + 'slct_outliers.log -r -s ' + str(support) + ' ' + logname
return input
def log_to_dataframe(log_file, regex, headers, logformat):
''' Function to transform log file to dataframe '''
log_messages = []
linecount = 0
with open(log_file, 'r') as fin:
for line in fin.readlines():
try:
match = regex.search(line.strip())
message = [match.group(header) for header in headers]
log_messages.append(message)
linecount += 1
except Exception as e:
pass
logdf = pd.DataFrame(log_messages, columns=headers)
logdf.insert(0, 'LineId', None)
logdf['LineId'] = [i + 1 for i in range(linecount)]
return logdf
def generate_logformat_regex(logformat):
'''
Function to generate regular expression to split log messages
'''
headers = []
splitters = re.split(r'(<[^<>]+>)', logformat)
regex = ''
for k in range(len(splitters)):
if k % 2 == 0:
splitter = re.sub(' +', '\s+', splitters[k])
regex += splitter
else:
header = splitters[k].strip('<').strip('>')
regex += '(?P<%s>.*?)' % header
headers.append(header)
regex = re.compile('^' + regex + '$')
return headers, regex
class TempPara:
def __init__(self, path='./', logname='rawlog.log', savePath='./', templateName='slct_templates.txt', outlierName='slct_outliers.log'):
self.path = path
self.logname = logname
self.savePath = savePath
self.templateName = templateName
self.outlierName = outlierName
def tempProcess(tempPara):
print('Dumping event templates...')
if not os.path.exists(tempPara.savePath):
os.makedirs(tempPara.savePath)
#read the templates
templates = []
with open('./' + tempPara.templateName) as tl:
for line in tl:
templates.append([0, line.strip(), 0])
pd.DataFrame(templates, columns=["EventId","EventTemplate","Occurrences"]).to_csv("temp_templates.csv", index=False)
def matchTempLog(templates, logs):
len_temp = {}
for tidx, temp in enumerate(templates):
tempL = temp.split()
templen = len(tempL)
if templen not in len_temp:
len_temp[templen] = [(tidx, tempL)]
else:
len_temp[templen].append((tidx, tempL))
logid_groupid = []
for idx, log in enumerate(logs):
logL = log.split()
logid = idx+1
if len(logL) in len_temp:
logid_groupid.append([idx + 1, get_groupid(logL, len_temp[len(logL)])])
else:
logid_groupid.append([idx+1, -1])
return logid_groupid
def get_groupid(logL, tempLs):
maxvalue = -1
for templ in tempLs:
starnum = 0
shot = 0
for idx, token in enumerate(logL):
if token == templ[1][idx] or templ[1][idx].count("*"):
shot += 1
if templ[1][idx].count("*"):
starnum += 1
shot = shot - starnum
if shot > maxvalue:
maxvalue = shot
groupid = templ[0]
return groupid | mit |