repo_name
stringlengths 7
90
| path
stringlengths 4
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 762
838k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sanketloke/scikit-learn | sklearn/utils/setup.py | 24 | 2920 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
apache/spark | python/pyspark/pandas/data_type_ops/num_ops.py | 6 | 21133 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from typing import cast, Any, Union
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.base import column_op, IndexOpsMixin, numpy_column_op
from pyspark.pandas.data_type_ops.base import (
DataTypeOps,
is_valid_operand_for_numeric_arithmetic,
transform_boolean_operand_to_numeric,
_as_bool_type,
_as_categorical_type,
_as_other_type,
_as_string_type,
)
from pyspark.pandas.internal import InternalField
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import extension_dtypes, pandas_on_spark_type
from pyspark.sql import functions as F
from pyspark.sql.column import Column
from pyspark.sql.types import (
BooleanType,
StringType,
TimestampType,
)
class NumericOps(DataTypeOps):
"""The class for binary operations of numeric pandas-on-Spark objects."""
@property
def pretty_name(self) -> str:
return "numerics"
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("string addition can only be applied to string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("addition can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__add__)(left, right)
def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("subtraction can not be applied to string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("subtraction can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__sub__)(left, right)
def mod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("modulo can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("modulo can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def mod(left: Column, right: Any) -> Column:
return ((left % right) + right) % right
return column_op(mod)(left, right)
def pow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("exponentiation can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("exponentiation can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def pow_func(left: Column, right: Any) -> Column:
return F.when(left == 1, left).otherwise(Column.__pow__(left, right))
return column_op(pow_func)(left, right)
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("string addition can only be applied to string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("addition can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__radd__)(left, right)
def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("subtraction can not be applied to string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("subtraction can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__rsub__)(left, right)
def rmul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("multiplication can not be applied to a string literal.")
if not isinstance(right, numbers.Number):
raise TypeError("multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__rmul__)(left, right)
def rpow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("exponentiation can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("exponentiation can not be applied to given types.")
def rpow_func(left: Column, right: Any) -> Column:
return F.when(SF.lit(right == 1), right).otherwise(Column.__rpow__(left, right))
right = transform_boolean_operand_to_numeric(right)
return column_op(rpow_func)(left, right)
def rmod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("modulo can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("modulo can not be applied to given types.")
def rmod(left: Column, right: Any) -> Column:
return ((right % left) + left) % left
right = transform_boolean_operand_to_numeric(right)
return column_op(rmod)(left, right)
# TODO(SPARK-36003): Implement unary operator `invert` as below
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
raise NotImplementedError("Unary ~ can not be applied to %s." % self.pretty_name)
def neg(self, operand: IndexOpsLike) -> IndexOpsLike:
from pyspark.pandas.base import column_op
return cast(IndexOpsLike, column_op(Column.__neg__)(operand))
def abs(self, operand: IndexOpsLike) -> IndexOpsLike:
from pyspark.pandas.base import column_op
return cast(IndexOpsLike, column_op(F.abs)(operand))
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__lt__)(left, right)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__le__)(left, right)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__ge__)(left, right)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__gt__)(left, right)
class IntegralOps(NumericOps):
"""
The class for binary operations of pandas-on-Spark objects with spark types:
LongType, IntegerType, ByteType and ShortType.
"""
@property
def pretty_name(self) -> str:
return "integrals"
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("multiplication can not be applied to a string literal.")
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, TimestampType):
raise TypeError("multiplication can not be applied to date times.")
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType):
return column_op(SF.repeat)(right, left)
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__mul__)(left, right)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def truediv(left: Column, right: Any) -> Column:
return F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), left.__div__(right)
).otherwise(SF.lit(np.inf).__div__(left))
return numpy_column_op(truediv)(left, right)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def floordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(right is np.nan), np.nan).otherwise(
F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), F.floor(left.__div__(right))
).otherwise(SF.lit(np.inf).__div__(left))
)
return numpy_column_op(floordiv)(left, right)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rtruediv(left: Column, right: Any) -> Column:
return F.when(left == 0, SF.lit(np.inf).__div__(right)).otherwise(
SF.lit(right).__truediv__(left)
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rtruediv)(left, right)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rfloordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(left == 0), SF.lit(np.inf).__div__(right)).otherwise(
F.floor(SF.lit(right).__div__(left))
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rfloordiv)(left, right)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(np.nan))
else:
return _as_other_type(index_ops, dtype, spark_type)
class FractionalOps(NumericOps):
"""
The class for binary operations of pandas-on-Spark objects with spark types:
FloatType, DoubleType.
"""
@property
def pretty_name(self) -> str:
return "fractions"
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("multiplication can not be applied to a string literal.")
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, TimestampType):
raise TypeError("multiplication can not be applied to date times.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__mul__)(left, right)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def truediv(left: Column, right: Any) -> Column:
return F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), left.__div__(right)
).otherwise(
F.when(SF.lit(left == np.inf) | SF.lit(left == -np.inf), left).otherwise(
SF.lit(np.inf).__div__(left)
)
)
return numpy_column_op(truediv)(left, right)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def floordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(right is np.nan), np.nan).otherwise(
F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), F.floor(left.__div__(right))
).otherwise(
F.when(SF.lit(left == np.inf) | SF.lit(left == -np.inf), left).otherwise(
SF.lit(np.inf).__div__(left)
)
)
)
return numpy_column_op(floordiv)(left, right)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rtruediv(left: Column, right: Any) -> Column:
return F.when(left == 0, SF.lit(np.inf).__div__(right)).otherwise(
SF.lit(right).__truediv__(left)
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rtruediv)(left, right)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rfloordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(left == 0), SF.lit(np.inf).__div__(right)).otherwise(
F.when(SF.lit(left) == np.nan, np.nan).otherwise(
F.floor(SF.lit(right).__div__(left))
)
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rfloordiv)(left, right)
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull() | F.isnan(index_ops.spark.column),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
scol = F.when(
index_ops.spark.column.isNull() | F.isnan(index_ops.spark.column),
SF.lit(True),
).otherwise(index_ops.spark.column.cast(spark_type))
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(np.nan))
else:
return _as_other_type(index_ops, dtype, spark_type)
class DecimalOps(FractionalOps):
"""
The class for decimal operations of pandas-on-Spark objects with spark type:
DecimalType.
"""
@property
def pretty_name(self) -> str:
return "decimal"
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary ~ can not be applied to %s." % self.pretty_name)
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("< can not be applied to %s." % self.pretty_name)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("<= can not be applied to %s." % self.pretty_name)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("> can not be applied to %s." % self.pretty_name)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError(">= can not be applied to %s." % self.pretty_name)
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull(),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(np.nan))
else:
return _as_other_type(index_ops, dtype, spark_type)
class IntegralExtensionOps(IntegralOps):
"""
The class for binary operations of pandas-on-Spark objects with one of the
- spark types:
LongType, IntegerType, ByteType and ShortType
- dtypes:
Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
"""
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col.astype(self.dtype)
class FractionalExtensionOps(FractionalOps):
"""
The class for binary operations of pandas-on-Spark objects with one of the
- spark types:
FloatType, DoubleType and DecimalType
- dtypes:
Float32Dtype, Float64Dtype
"""
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col.astype(self.dtype)
| apache-2.0 |
mxjl620/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
jdavidrcamacho/Tests_GP | 02 - Programs being tested/06 - spots tests/Test_M52_1spot.py | 1 | 8203 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 8 14:46:53 2017
@author: camacho
"""
import Kernel;reload(Kernel);kl=Kernel
import Kernel_likelihood;reload(Kernel_likelihood);lk=Kernel_likelihood
import Kernel_optimization;reload(Kernel_optimization);opt=Kernel_optimization
import RV_function;reload(RV_function);RVfunc=RV_function
import numpy as np;np.random.seed(1234)
import matplotlib.pylab as pl
import astropy.table as Table
import sys
f=open("Test_M52_1spot.txt","w")
sys.stdout = f
pl.close('all')
##### spots data pre-processing #####
rdb_data=Table.Table.read('1spot.rdb',format='ascii')
RV_spot=rdb_data['RV_tot'][1:101]
RV_spot=np.array(RV_spot)
RV_spot=RV_spot.astype('Float64')
RV_SPOT=np.concatenate((RV_spot,RV_spot,RV_spot,RV_spot),axis=0)
spots_yy=[]
for i in np.arange(4,401,4):
a=(RV_SPOT[i-4]+RV_SPOT[i-3]+RV_SPOT[i-2]+RV_SPOT[i-1])*1000/4.
spots_yy.append(a)
spots_data=[]
for j in np.arange(1,100,3.3):
spots_data.append(spots_yy[int(round(j))])
##### data and plot #####
# Period(P) ~ 20 e 50 days
# Observations(space) ~ every 4 days
# Error(yerr) ~ 0.20 a 0.50 m
# K=17.353 => planet with 1/4 mass of Jupiter
test1=RVfunc.RV_circular(P=25,K=17.353,T=0,gamma=0,time=100,space=30)
t=np.linspace(0,100,30) #np.linspace(0,time,space)
y0=np.array(test1[1])
yerr=np.array([np.random.uniform(0.2,0.5) for x in y0])
y=np.array([x1+x2 for x1,x2 in zip(y0,spots_data)])
total=np.array([x1+x2 for x1,x2 in zip(y,yerr)])
Xfinal=t
Yfinal=total
##### Lets try GP to fit #####
#kl.ExpSineSquared(theta,l,P) + kl.WhiteNoise(theta)
def sub_tests(trials=20,variation=-0.1):
theta=17.0;l=1.0;P=25.0
for i in range(1,trials+1):
print 'EXAMPLE %i' %i
kernel1=kl.Matern_52(theta, l)
print 'initial kernel ->', kernel1
likelihood1=lk.likelihood(kernel1,Xfinal,Xfinal,Yfinal,yerr)
print 'initial likelihood ->', likelihood1
Xcalc=np.linspace(0,100,1000)
# [mu,std]=lk.compute_kernel(kernel1,Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
optimization1=opt.committed_optimization(kernel1,Xfinal,Xfinal,Yfinal,yerr,max_opt=10)
print 'final kernel ->',optimization1[1]
print 'final likelihood ->', optimization1[0]
# [mu,std]=lk.compute_kernel(optimization1[1],Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
theta=theta+variation;l=l;P=P+variation
def subNoise_tests(trials=20,variation=-0.1):
theta=17.0;l=1.0;P=24.0;noise=0.5
for i in range(1,trials+1):
print 'EXAMPLE %i' %i
kernel1=kl.Matern_52(theta, l)+kl.WhiteNoise(noise)
print 'initial kernel ->', kernel1
likelihood1=lk.likelihood(kernel1,Xfinal,Xfinal,Yfinal,yerr)
print 'initial likelihood ->', likelihood1
Xcalc=np.linspace(0,100,1000)
# [mu,std]=lk.compute_kernel(kernel1,Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
optimization1=opt.committed_optimization(kernel1,Xfinal,Xfinal,Yfinal,yerr,max_opt=10)
print 'final kernel ->',optimization1[1]
print 'final likelihood ->', optimization1[0]
# [mu,std]=lk.compute_kernel(optimization1[1],Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
theta=theta+variation;l=l;P=P+variation;noise=noise+(variation/2.)
def add_tests(trials=20,variation=0.1):
theta=17.0;l=1.0;P=25.0
for i in range(1,trials+1):
print 'EXAMPLE %i' %i
kernel1=kl.Matern_52(theta, l)
print 'initial kernel ->', kernel1
likelihood1=lk.likelihood(kernel1,Xfinal,Xfinal,Yfinal,yerr)
print 'initial likelihood ->', likelihood1
Xcalc=np.linspace(0,100,1000)
# [mu,std]=lk.compute_kernel(kernel1,Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
optimization1=opt.committed_optimization(kernel1,Xfinal,Xfinal,Yfinal,yerr,max_opt=10)
print 'final kernel ->',optimization1[1]
print 'final likelihood ->', optimization1[0]
# [mu,std]=lk.compute_kernel(optimization1[1],Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
theta=theta+variation;l=l;P=P+variation
def addNoise_tests(trials=20,variation=0.1):
theta=17.0;l=1.0;P=24.0;noise=0.1
for i in range(1,trials+1):
print 'EXAMPLE %i' %i
kernel1=kl.Matern_52(theta, l)+kl.WhiteNoise(noise)
print 'initial kernel ->', kernel1
likelihood1=lk.likelihood(kernel1,Xfinal,Xfinal,Yfinal,yerr)
print 'initial likelihood ->', likelihood1
Xcalc=np.linspace(0,100,1000)
# [mu,std]=lk.compute_kernel(kernel1,Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
optimization1=opt.committed_optimization(kernel1,Xfinal,Xfinal,Yfinal,yerr,max_opt=10)
print 'final kernel ->',optimization1[1]
print 'final likelihood ->', optimization1[0]
# [mu,std]=lk.compute_kernel(optimization1[1],Xfinal,Xcalc,Yfinal,yerr)
# pl.figure() #Graphics
# pl.fill_between(Xcalc, mu+std, mu-std, color="k", alpha=0.1)
# pl.plot(Xcalc, mu+std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu-std, color="k", alpha=1, lw=0.25)
# pl.plot(Xcalc, mu, color="k", alpha=1, lw=0.5)
# pl.errorbar(Xfinal, Yfinal, yerr=yerr, fmt=".k", capsize=0)
# pl.xlabel("$x$")
# pl.ylabel("$y$")
theta=theta;l=l;noise=noise+(variation/2.)
sub_tests()
print ''
subNoise_tests()
print ''
add_tests()
print ''
addNoise_tests()
print ''
#for when everything ends
f.close() | mit |
UT-CWE/Hyospy | Hyospy_ensemble/lib/SUNTANS/Utils/timeseries.py | 1 | 43648 | # -*- coding: utf-8 -*-
"""
Collection of tools for plotting and analysis of time series data
Created on Wed Feb 06 12:37:17 2013
Author: Matt Rayson
Stanford University
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import mlab
from matplotlib.lines import Line2D
from scipy import signal, interpolate
import othertime
from datetime import datetime, timedelta
from uspectra import uspectra, getTideFreq
import operator
import pdb
class timeseries(object):
"""
Class for handling time series data
Methods include:
- Power spectral density (plot)
- Filtering
- Interpolation
- Plotting
"""
basetime = datetime(1900,1,1)
VERBOSE=False
units=''
long_name=''
stationid = ''
varname = ''
Z=0.0
def __init__(self,t,y,**kwargs):
self.__dict__.update(kwargs)
self.t = t # independent variable (t,x, etc)
self.y = y # dependent variable
self.shape = self.y.shape
self.ndim = len(self.shape)
self.tsec = othertime.SecondsSince(self.t,basetime=self.basetime)
self.ny = np.size(self.y)
# make sure the original data is a masked array
mask = ~np.isfinite(self.y)
self.y = np.ma.MaskedArray(self.y,mask=mask)
self._checkDT()
self.Nt = self.t.shape[0]
# Make sure that time is the last dimension
if self.y.shape[-1] != self.Nt:
self.y=self.y.T
def psd(self, plot=True,nbandavg=1,**kwargs):
"""
Power spectral density
nbandavg = Number of fft bins to average
"""
if self.isequal==False and self.VERBOSE:
print 'Warning - time series is unequally spaced consider using lomb-scargle fft'
NFFT = int(2**(np.floor(np.log2(self.ny/nbandavg)))) # Nearest power of 2 to length of data
Pyy,frq = mlab.psd(self.y-self.y.mean(),Fs=2*np.pi/self.dt,NFFT=NFFT,window=mlab.window_hanning,scale_by_freq=True)
if plot:
plt.loglog(frq,Pyy,**kwargs)
plt.xlabel('Freq. [$cycles s^{-1}$]')
plt.ylabel('PSD')
plt.grid(b=1)
return Pyy, frq
def autocorr(self,normalize=False,axis=-1):
"""
Autocorrelation calculation
"""
assert self.isequal,\
'Data must be equally spaced to perform this function.'
N = self.ny
M = int(N)/10
ymean = self.y.mean(axis=axis)
y = self.y - ymean
k = range(1,M)
tau = np.asarray(k,dtype=np.float)*self.dt
Cyy = [1./(N-kk) * np.sum(y[...,0:-kk]*y[...,kk::],axis=axis) for kk in k ]
if normalize:
return Cyy/y.var(), tau
else:
return Cyy ,tau
def specgram(self, NFFT=256,noverlap=128,plot=True,vv=29, **kwargs):
"""
Spectrogram plot
"""
from matplotlib.colors import LogNorm
Pyy,frq,tmid = mlab.specgram(self.y-self.y.mean(),Fs=2*np.pi/self.dt,window=mlab.window_hanning,\
scale_by_freq=True,noverlap=noverlap,NFFT=NFFT)
if plot==True:
ax3=plt.gca()
plt.contourf(tmid*2*np.pi,frq,Pyy,vv,norm=LogNorm(),**kwargs)
ax3.set_yscale('log')
#ax3.set_xlim((1e-4,1e-2))
#ax3.set_ylim([tmid.min(),tmid.max()])
ax3.set_ylabel("$\\omega [rad.s^{-1}]$")
plt.colorbar()
#ax3.set_xticklabels([])
return Pyy,frq,tmid
def filt(self,cutoff_dt, btype='low',order=3,axis=-1):
"""
Butterworth filter the time series
Inputs:
cutoff_dt - cuttoff period [seconds]
btype - 'low' or 'high'
"""
if self.isequal==False and self.VERBOSE:
print 'Warning - time series is unequally spaced. Use self.interp to interpolate onto an equal grid'
if not btype == 'band':
Wn = self.dt/cutoff_dt
else:
Wn = [self.dt/co for co in cutoff_dt]
(b, a) = signal.butter(order, Wn, btype=btype, analog=0, output='ba')
# filtfilt only likes to operate along the last axis
ytmp = np.swapaxes(self.y,-1,axis)
ytmp = signal.filtfilt(b, a, ytmp)
return np.swapaxes(ytmp,axis,-1)
#return signal.filtfilt(b, a, self.y, axis=axis)
def godinfilt(self,filtwidths=[24,25]):
"""
Apply the successive Godin-type filter to the data set
The filter widths are defined by filtwidths (hours).
For a 24-25-24 hour filter set filtwidths=[24,25] (default).
"""
if self.isequal==False or self.dt != 3600.:
# Puts the data onto an hourly matrix
self._evenly_dist_data(3600.)
ymean = self.running_mean(windowlength=filtwidths[0]*3600.)
self.y = ymean
ymean = self.running_mean(windowlength=filtwidths[1]*3600.)
self.y = ymean
ymean = self.running_mean(windowlength=filtwidths[0]*3600.)
self.y = ymean
def interp(self,timein,method='linear',timeformat='%Y%m%d.%H%M%S',axis=-1):
"""
Interpolate the data onto an equally spaced vector
timein is either:
(3x1 tuple) - (tstart,tend,dt)
tstart and tend - string with format 'yyyymmdd.HHMMSS'
or
datetime vector
method - method passed to interp1d
"""
# Create the time vector
try:
tstart=timein[0]
tend=timein[1]
dt=timein[2]
tnew = othertime.TimeVector(tstart,tend,dt,timeformat=timeformat)
except:
tnew=timein
t = othertime.SecondsSince(tnew,basetime = self.basetime)
# Don't include nan points
if self.ndim > 1:
# Interpolate multidimensional arrays without a mask
F = interpolate.interp1d(self.tsec,self.y,kind=method,axis=axis,\
bounds_error=False,fill_value=0)
else:
#mask = np.isnan(self.y) == False
mask = ~self.y.mask
F = interpolate.interp1d(self.tsec[mask],self.y[mask],kind=method,axis=axis,\
bounds_error=False,fill_value=0)
#F = interpolate.UnivariateSpline(self.tsec,self.y,k=method)
return tnew, F(t)
def tidefit(self,frqnames=None,basetime=None,axis=-1):
"""
Perform a tidal harmonic fit to the data
Returns the amp, phase, frequencies and fitted time series
"""
# Get the tidal fruequencies
if frqnames == None:
# This returns the default frequencies from the uspectra class
frq,frqnames = getTideFreq(Fin=None)
else:
frq,frqnames = getTideFreq(Fin=frqnames)
# Call the uspectra method
U = uspectra(self.tsec,self.y,frq=frq,method='lsqfast')
amp,phs = U.phsamp(phsbase=basetime)
return amp, phs, frq, frqnames, U.invfft()
#amp, phs, mean = \
# harmonic_fit(self.tsec,self.y,frq,phsbase=basetime,axis=axis)
#
#return amp, phs, frq, frqnames,# U.invfft()
def running_harmonic(self,omega,windowlength=3*86400.0,overlap=12*3600.0, plot=True):
"""
Running harmonic fit of the time series at frequency, omega.
windowlength - length of each time window [seconds]
overlap - overlap between windows [seconds]
"""
# Make sure that omega is a list
try:
len(omega)
except:
omega=[omega]
pt1,pt2 = window_index_time(self.t,windowlength,overlap)
npt = len(pt1)
tmid = []
amp = np.zeros((npt,))
phs = np.zeros((npt,))
ymean = np.zeros((npt,))
ii=-1
for t1,t2 in zip(pt1,pt2):
ii+=1
# Perform the harmonic fit on the segment
U = uspectra(self.t[t1:t2],self.y[t1:t2],frq=omega,method='lsqfast')
# Reference the phase to the start of the time series
amp[ii],phs[ii] = U.phsamp(phsbase = self.t[0])
# Return the mid time point
ind = np.floor(t1 + (t2-t1)/2)
tmid.append(self.t[ind])
# Return the fitted time series
ymean[ii] = self.y[t1:t2].mean()
#yout[t1:t2] += ymean + U.invfft()
tmid = np.asarray(tmid)
if plot:
plt.subplot(211)
self.plot()
plt.plot(tmid,ymean,'r')
plt.fill_between(tmid,ymean-amp,y2=ymean+amp,color=[0.5,0.5,0.5],alpha=0.5)
plt.legend(('Original Signal','Harmonic reconstruction'))
ax=plt.subplot(212)
plt.fill_between(tmid,amp,alpha=0.5)
plt.xticks(rotation=17)
plt.ylabel('Amplitude')
ax.set_xlim([self.t[0],self.t[-1]])
return tmid, amp, phs
def running_mean(self,windowlength=3*86400.0):
"""
Running mean of the time series
windowlength - length of each time window [seconds]
"""
mask = self.y.mask.copy()
self.y[self.y.mask]=0.
self.y.mask=mask
windowsize = np.floor(windowlength/self.dt)
ytmp = self.y.copy()
ytmp = self._window_matrix(ytmp,windowsize)
weights = 1./windowsize * np.ones((windowsize,))
ytmp2 = np.sum(ytmp*weights,axis=-1)
# This result needs to be normalized to account for missing data,
# this is the same as calculating different weights for each section
ntmp= np.ones_like(self.y)
ntmp[mask] = 0.
norm = self._window_matrix(ntmp,windowsize)
#norm*= weights
norm = norm.sum(axis=-1)
norm /= windowsize
ytmp2/=norm
return self._update_windowed_data(ytmp2,windowsize)
def running_rms(self,windowlength=3*86400.0):
"""
Running RMS of the time series
windowlength - length of each time window [seconds]
"""
mask = self.y.mask.copy()
self.y[self.y.mask]=0.
self.y.mask=mask
windowsize = np.floor(windowlength/self.dt)
ytmp = self.y.copy()
ytmp = self._window_matrix(ytmp,windowsize)
ytmp2 = np.sum(ytmp*ytmp,axis=-1)
ntmp= np.ones_like(self.y)
ntmp[mask] = 0.
N = self._window_matrix(ntmp,windowsize)
N = N.sum(axis=-1)
return self._update_windowed_data(np.sqrt( 1./N * ytmp2),windowsize)
def despike(self,nstd=4.,windowlength=3*86400.0,overlap=12*3600.0,\
upper=np.inf,lower=-np.inf,maxdiff=np.inf,fillval=0.):
"""
Despike time series by replacing any values greater than nstd*std.dev with the
median of a running window.
nstd - number of standard deviations outside to replace
windowlength - length of each time window [seconds]
overlap - overlap between windows [seconds]
lower - lower value bound
upper - upper value bound
maxdiff - maximum difference between two points
"""
# if self.isequal==False:
# self._evenly_dist_data()
nbad = 0
# Now check the maximum difference
ydiff = np.zeros_like(self.y)
ydiff[1::] = np.abs(self.y[1::]-self.y[0:-1])
ind = ydiff>=maxdiff
#self.y[ind]=fillval
self.y.mask[ind] = True # Mask needs to be set after values are prescribed
nbad += np.sum(ind)
# First mask any NaN and values outside of bounds
ind = operator.or_(self.y<=lower,self.y>=upper)
#self.y[ind]=fillval
self.y.mask[ind] = True
nbad += np.sum(ind)
ind = np.isnan(self.y)
#self.y[ind]=fillval
self.y.mask[ind] = True
nbad += np.sum(ind)
# Now calculate the moving median and standard deviation
windowsize = np.floor(windowlength/self.dt)
ytmp = self.y.copy()
ytmp = self._window_matrix(ytmp,windowsize)
ytmp2 = np.mean(ytmp,axis=-1)
ymean = self._update_windowed_data(ytmp2,windowsize)
#ytmp2= np.std(ytmp,axis=-1)
ytmp2 = np.apply_along_axis(np.std,-1,ytmp2)
ystd = self._update_windowed_data(ytmp2,windowsize)
# Mask values outsize of the
ind = operator.or_(self.y >= ymean + nstd*ystd,\
self.y <= ymean - nstd*ystd)
#self.y[ind] = ymedian[ind]
self.y.mask[ind] = True
nbad += np.sum(ind)
if self.VERBOSE:
print 'Despiked %d points'%nbad
def plot(self,angle=17,**kwargs):
"""
Plot
Rotates date lables
"""
h1=plt.plot(self.t,self.y,**kwargs)
plt.xticks(rotation=angle)
return h1
def fillplot(self,angle=17,alpha=0.7,**kwargs):
"""
"""
h1=plt.fill_between(self.t,self.y,alpha=alpha,**kwargs)
plt.xticks(rotation=angle)
return h1
def subset(self,time1,time2):
"""
Returns a subset of the array between time1 and time2
"""
t0 = othertime.findNearest(time1,self.t)
t1 = othertime.findNearest(time2,self.t)
return timeseries(self.t[t0:t1],self.y[...,t0:t1])
def savetxt(self,txtfile):
f = open(txtfile,'w')
#for t,v in zip(self.tsec,self.y):
# f.write('%10.6f\t%10.6f\n'%(t,v))
for ii in range(self.y.shape[0]):
f.write('%s, %10.6f\n'%(datetime.strftime(self.t[ii],'%Y-%m-%d %H:%M:%S'),self.y[ii]))
f.close()
def copy(self):
"""
Make a copy of the time-series object in memory
"""
from copy import deepcopy
return deepcopy(self)
def _checkDT(self):
"""
Check that the time series is equally spaced
"""
dt = np.diff(self.tsec)
dt_unique = np.unique(dt)
if np.size(dt_unique) == 1:
self.isequal = True
else:
self.isequal = False
try:
self.dt = dt[1]
except:
self.dt = 0.0
def _evenly_dist_data(self,dt):
"""
Distribute the data onto an evenly spaced array
No interpolation is performed
"""
if self.VERBOSE:
print 'inserting the data into an equally-spaced time vector (dt = %f s).'%self.dt
t0 = self.tsec[0]
t = self.tsec - t0
# Put the data onto an evenly spaced, masked array
tout = np.arange(t[0],t[-1]+dt,dt)
tind = np.searchsorted(tout,t)
shape = self.y.shape[:-1] + tout.shape
yout = np.ma.MaskedArray(np.zeros(shape),mask=True)
yout[...,tind] = self.y
def updatetime(tsec):
return timedelta(seconds=tsec) + self.t[0]
self.t = np.array(map(updatetime,tout))
self.y = yout
self.tsec = tout+t0
self.ny = np.size(self.y)
self.isequal = True
self.dt = dt
def _window_matrix(self,y,windowsize):
"""
Returns the matrix as a strided array so that 'rolling' operations can
be performed along the last axis
"""
windowsize=int(windowsize)
shape = y.shape[:-1] + (y.shape[-1] - windowsize + 1, windowsize)
strides = y.strides + (y.strides[-1],)
# The masked values get
return np.lib.stride_tricks.as_strided(y, shape=shape, strides=strides)
def _update_windowed_data(self,ytmp,windowsize):
"""
Re-inserts data that has been windowed into an array
that is the same size as the original time series
"""
y = np.zeros_like(self.y)
indent = (windowsize-np.mod(windowsize,2))/2
if np.mod(windowsize,2)==1:
y[...,indent:-indent]=ytmp
else:
y[...,indent-1:-indent]=ytmp
y = np.ma.MaskedArray(y,mask=self.y.mask)
y.mask[...,0:indent]=True
y.mask[...,-indent:]=True
return y
class ModVsObs(object):
"""
Class for handling and comparing two time series i.e. model vs observations
"""
units=' '
long_name=' '
stationid = ' '
varname = ' '
Z=0.0
def __init__(self,tmod,ymod,tobs,yobs,**kwargs):
"""
Inputs:
tmod,tobs - vector of datetime object
ymod,yobs - vector of values
Keywords:
long_name: string containing variable's name (used for plotting)
units: string containing variable's units (used for plotting)
Note that tmod and tobs don't need to be the same length. yobs is
linearly interpolated onto tmod.
"""
self.__dict__.update(kwargs)
# Set the range inclusive of both observation and model result
time0 = max(tmod[0],tobs[0])
time1 = min(tmod[-1],tobs[-1])
if time1 < time0:
print 'Error - the two datasets have no overlapping period.'
return None
# Clip both the model and observation to this daterange
t0 = othertime.findNearest(time0,tmod)
t1 = othertime.findNearest(time1,tmod)
TSmod = timeseries(tmod[t0:t1],ymod[...,t0:t1])
t0 = othertime.findNearest(time0,tobs)
t1 = othertime.findNearest(time1,tobs)
self.TSobs = timeseries(tobs[t0:t1],yobs[...,t0:t1])
# Interpolate the observed value onto the model step
#tobs_i, yobs_i = TSobs.interp(tmod[t0:t1],axis=0)
#self.TSobs = timeseries(tobs_i, yobs_i)
# Interpolate the modeled value onto the observation time step
tmod_i, ymod_i = TSmod.interp(tobs[t0:t1],axis=-1)
self.TSmod = timeseries(tmod_i,ymod_i)
self.N = self.TSmod.t.shape[0]
if self.N==0:
print 'Error - zero model points detected'
return None
self.calcStats()
def plot(self,colormod='r',colorobs='b',legend=True,loc='lower right',**kwargs):
"""
Time-series plots of both data sets with labels
"""
ax = plt.gca()
h1 = self.TSmod.plot(color=colormod,**kwargs)
h2 = plt.plot(self.TSobs.t,self.TSobs.y,color=colorobs,**kwargs)
plt.ylabel(r'%s [$%s$]'%(self.long_name,self.units)) # Latex formatting
plt.grid(b=True)
plt.title('StationID: %s'%self.stationid)
if legend:
plt.legend(('Model','Observed'),loc=loc)
return h1, h2, ax
def stackplot(self,colormod='r',colorobs='b',scale=None,ax=None,fig=None,labels=True,**kwargs):
"""
Stack plot of several time series
"""
if labels:
labels = ['z = %1.1f m'%z for z in self.Z.tolist()]
else:
labels=None
fig,ax,ll = stackplot(self.TSobs.t,self.TSobs.y,ax=ax,fig=fig,\
scale=scale,units=self.units,labels=labels,color=colorobs,*kwargs)
fig,ax,ll = stackplot(self.TSmod.t,self.TSmod.y,ax=ax,fig=fig,\
scale=scale,units=self.units,labels=labels,color=colormod,*kwargs)
def scatter(self,ylims=None,printstats=True,**kwargs):
"""
Scatter plot of the model vs observation
"""
if ylims==None:
ylims = [self.TSobs.y.min(), self.TSobs.y.max()]
h1 = plt.plot(self.TSobs.y.ravel(),self.TSmod.y.ravel(),'.',**kwargs)
plt.plot([ylims[0],ylims[1]],[ylims[0],ylims[1]],'k--')
ax = plt.gca()
ax.set_aspect('equal')
plt.xlim(ylims)
plt.ylim(ylims)
ax.autoscale(tight=True)
plt.grid(b=True)
if printstats:
textstr = '$r^2$ = %6.2f\nRMSE = %6.2f\n'%(self.cc,self.rmse)
plt.text(0.05,0.65,textstr,transform=ax.transAxes)
return h1, ax
def calcStats(self):
"""
Calculates statistics including:
moments, RMS, CC, skill, ...
"""
self.meanObs = self.TSobs.y.mean(axis=-1)
self.meanMod = self.TSmod.y.mean(axis=-1)
self.stdObs = self.TSobs.y.std(axis=-1)
self.stdMod = self.TSmod.y.std(axis=-1)
self.rmsObs = rms(self.TSobs.y,axis=-1)
self.rmsMod = rms(self.TSmod.y,axis=-1)
# RMSE
self.rmse = rms(self.TSobs.y-self.TSmod.y,axis=-1)
# skill
self.skill = 1.0 - ((self.TSobs.y-self.TSmod.y)**2.).sum(axis=-1) / \
( (self.TSobs.y.T - self.meanObs)**2.).T.sum(axis=-1)
# Correlation coefficient
self.cc = 1.0/float(self.N) * ( (self.TSobs.y.T-self.meanObs).T * \
(self.TSmod.y.T - self.meanMod).T ).sum(axis=-1) / (self.stdObs * self.stdMod)
def printStats(self,f=None,header=True):
"""
Prints the statistics to a markdown language style table
"""
if not self.__dict__.has_key('meanMod'):
self.calcStats()
outstr=''
if header:
outstr += "| | Mean Model | Mean Obs. | Std. Mod. | Std Obs | RMSE | CC | skill |\n"
outstr += "|------| ---------- | --------- | --------- | ------- | --- | ----- | ------| \n"
outstr += "| %s [%s] | %6.3f | %6.3f | %6.3f | %6.3f | %6.3f | %6.3f | %6.3f | \n"%\
(self.stationid,self.units, self.meanMod, self.meanObs, self.stdMod, self.stdObs,\
self.rmse,self.cc,self.skill)
if f == None:
print outstr
else:
f.write(outstr)
def printStatsZ(self,f=None,header=True):
"""
Prints the statistics to a markdown language style table
"""
outstr=''
if header:
outstr += "| Depth | Mean Model | Mean Obs. | Std. Mod. | Std Obs | RMSE | CC | skill |\n"
outstr += "|------| ---------- | --------- | --------- | ------- | --- | ----- | ------| \n"
for ii,zz in enumerate(self.Z.tolist()):
outstr += "| %3.1f [m] | %6.3f | %6.3f | %6.3f | %6.3f | %6.3f | %6.3f | %6.3f | \n"%\
(zz, self.meanMod[ii], self.meanObs[ii], self.stdMod[ii],\
self.stdObs[ii], self.rmse[ii],self.cc[ii],self.skill[ii])
if f == None:
print outstr
else:
f.write(outstr)
def crosscorr(self,normalize=False,axis=-1):
"""
Crosscorrelation calculation
"""
assert self.TSobs.isequal,\
'Data must be equally spaced to perform this function.'
N = self.TSobs.ny
M = int(N)/10
ymean = self.TSobs.y.mean(axis=axis)
y = self.TSobs.y - ymean
xmean = self.TSmod.y.mean(axis=axis)
x = self.TSmod.y - xmean
k = range(1,M)
tau = np.asarray(k,dtype=np.float)*self.TSobs.dt
Cxy = [1./(N-kk) * np.sum(y[...,0:-kk]*x[...,kk::],axis=axis) for kk in k ]
if normalize:
return Cxy/(y.std()*x.std()), tau
else:
return Cxy ,tau
def csd(self, plot=True,nbandavg=1,**kwargs):
"""
Cross spectral density
nbandavg = Number of fft bins to average
"""
if self.isequal==False and self.VERBOSE:
print 'Warning - time series is unequally spaced consider using lomb-scargle fft'
NFFT = int(2**(np.floor(np.log2(self.ny/nbandavg)))) # Nearest power of 2 to length of data
Pyy,frq = mlab.csd(self.TSobs.y-self.TSobs.y.mean(),Fs=2*np.pi/self.dt,NFFT=NFFT,window=mlab.window_hanning,scale_by_freq=True)
if plot:
plt.loglog(frq,Pyy,**kwargs)
plt.xlabel('Freq. [$cycles s^{-1}$]')
plt.ylabel('PSD')
plt.grid(b=1)
return Pyy, frq
def harmonic_fit(t,X,frq,mask=None,axis=0,phsbase=None):
"""
Least-squares harmonic fit on an array, X, with frequencies, frq.
X - vector [Nt] or array [Nt, (size)]
t - vector [Nt]
frq - vector [Ncon]
mask - array [(size non-time X)]
phsbase - phase offset
where, dimension with Nt should correspond to axis = axis.
"""
t = np.asarray(t)
# Reshape the array sizes
X = X.swapaxes(0, axis)
sz = X.shape
lenX = np.prod(sz[1:])
if not len(t) == sz[0]:
raise 'length of t (%d) must equal dimension of X (%s)'%(len(t),sz[0])
X = np.reshape(X,(sz[0],lenX))
if not mask == None:
mask = np.reshape(mask,(lenX,))
else:
mask = np.ones((lenX,))
frq = np.array(frq)
Nfrq = frq.shape[0]
def buildA(t,frq):
"""
Construct matrix A
"""
nt=t.shape[0]
nf=frq.shape[0]
nff=nf*2+1
A=np.ones((nt,nff))
for ff in range(0,nf):
A[:,ff*2+1]=np.cos(frq[ff]*t)
A[:,ff*2+2]=np.sin(frq[ff]*t)
return A
def lstsqnumpy(A,y):
"""
Solve the least square problem
Return:
the complex amplitude
the mean
"""
N=A.shape[1]
b = np.linalg.lstsq(A,y)
A = b[0][1::2]
B = b[0][2::2]
return A+1j*B, b[0][0::N]
def phsamp(C):
return np.abs(C), np.angle(C)
# Least-squares matrix approach
A = buildA(t,frq)
C, C0 = lstsqnumpy(A,X) # This works on all columns of X!!
Amp, Phs= phsamp(C)
# Reference the phase to some time
if not phsbase == None:
base = othertime.SecondsSince(phsbase)
phsoff = phase_offset(frq,t[0],base)
phsoff = np.repeat(phsoff.reshape((phsoff.shape[0],1)),lenX,axis=1)
phs = np.mod(Phs+phsoff,2*np.pi)
# Non-vectorized method (~20x slower)
# Amp = np.zeros((Nfrq,lenX))
# Phs = np.zeros((Nfrq,lenX))
# for ii in range(0,lenX):
# if mask[ii]==True:
# C = lstsqnumpy(A,X[:,ii])
# # Calculate the phase and amplitude
# am, ph= phsamp(C)
# Amp[:,ii] = am; Phs[:,ii] = ph
# reshape the array
Amp = np.reshape(Amp,(Nfrq,)+sz[1:])
Phs = np.reshape(Phs,(Nfrq,)+sz[1:])
C0 = np.reshape(C0,sz[1:])
# Output back along the original axis
return Amp.swapaxes(axis,0), Phs.swapaxes(axis,0), C0.swapaxes(axis,0)
def phase_offset(frq,start,base):
"""
Compute a phase offset for a given fruequency
"""
if type(start)==datetime:
dx = start - base
dx = dx.total_seconds()
else:
dx = start -base
return np.mod(dx*np.array(frq),2*np.pi)
def loadDBstation(dbfile,stationID,varname,timeinfo=None,filttype=None,cutoff=3600.0,output_meta=False):
"""
Load station data from a database file
Inputs:
dbfile - location of database file
stationID - Station ID in database
varname - variable name e.g. 'waterlevel', 'discharge', 'salinity'
timeinfo (optional) - tuple with (starttime,endtime,dt). Format 'yyyymmdd.HHMMSS'
Use this to interpolate onto a constant time vector
filttype (optional) - 'low' or 'high'
Set this to filter data
Returns:
timeseries object
-1 on error
"""
from netcdfio import queryNC
outvar = ['NetCDF_Filename','NetCDF_GroupID','StationName']
tablename = 'observations'
#condition = 'Variable_Name = "%s" and StationID = "%s"' % (varname,stationID)
condition = 'Variable_Name = "%s" and StationID LIKE "%%%s"' % (varname,stationID)
print 'Querying database...'
print condition
data, query = queryNC(dbfile,outvar,tablename,condition)
yout = data[0][varname].squeeze()
# Zero nan
yout[np.isnan(yout)] = 0.0
if len(data)==0:
print '!!! Warning - Did not find any stations matching query. Returning -1 !!!'
return -1
else:
ts = timeseries(data[0]['time'],yout)
if not timeinfo==None:
print 'Interpolating station data between %s and %s\n'%(timeinfo[0],timeinfo[1])
tnew,ynew = ts.interp((timeinfo[0],timeinfo[1],timeinfo[2]))
ts = timeseries(tnew,ynew)
ts.dt = timeinfo[2] # This needs updating
if not filttype==None:
print '%s-pass filtering output data. Cutoff period = %f [s].'%(filttype,cutoff)
yfilt = ts.filt(cutoff,btype=filttype,axis=-1)
ts.y = yfilt.copy()
if output_meta:
if data[0].has_key('elevation'):
ele = data[0]['elevation']
else:
ele = np.array([0.0])
meta = {'longitude':data[0]['longitude'],'latitude':data[0]['latitude'],'elevation':ele,'StationName':query['StationName'][0]}
return ts, meta
else:
return ts
def stackplot(t,y,scale=None,gap=0.2,ax=None,fig=None,units='',labels=None,**kwargs):
"""
Vertically stacked time series plot.
Puts all of the time-series into one axes by working out a suitable spacing.
Inputs:
y - 2d array [nt,ny] where ny is the number of time series
t - datetime vector
Returns:
fig, ax : figure and axes handles
ll : plot handles to each line plot [list]
"""
# Determine the scale factors and the heights of all of the axes
ny = y.shape[0]
if scale==None:
scale = np.abs(y).max()
if not labels == None:
assert len(labels)==ny, ' number of labels (%d) must equal number of layers (%d)'%(len(labels),ny)
# Height of each axes in normalized coordinates
yheight = 1.0 / (ny + (ny+1.0)*gap)
# Create a new figure
if fig==None:
fig=plt.figure()
else:
fig = plt.gcf()
if ax == None:
ax = fig.add_subplot(111,frame_on=False,ylim=[0,1.0],yticks=[])
# Now add each line to the figure
ll = [] # List of line objects
def fakeaxes(yval,dy):
cc=[0.5,0.5,0.5]
ax.add_line(Line2D([0,1],[yval,yval],linewidth=0.5,color=cc,transform=ax.transAxes,linestyle='--'))
yp = yval + dy/2.
ym = yval - dy/2.
ax.add_line(Line2D([0,0],[yp,ym],linewidth=0.5,color=cc,transform=ax.transAxes))
ax.add_line(Line2D([1,1],[yp,ym],linewidth=0.5,color=cc,transform=ax.transAxes))
#Little caps
ax.add_line(Line2D([0,0.01],[yp,yp],linewidth=0.5,color=cc,transform=ax.transAxes))
ax.add_line(Line2D([0,0.01],[ym,ym],linewidth=0.5,color=cc,transform=ax.transAxes))
ax.add_line(Line2D([0.99,1],[yp,yp],linewidth=0.5,color=cc,transform=ax.transAxes))
ax.add_line(Line2D([0.99,1],[ym,ym],linewidth=0.5,color=cc,transform=ax.transAxes))
for N in range(1,ny+1):
yoffset = N*(gap*yheight) + 0.5*yheight + (N-1)*yheight
# scaling factor
#vscale = yheight / (scale+yoffset)
vscale = yheight / (scale)
l = ax.plot(t,vscale*y[N-1,:]+yoffset,**kwargs)
ll.append(l)
#Adds an axes
fakeaxes(yoffset,yheight)
if not labels==None:
plt.text(0.6,yoffset+0.5*yheight-0.02,labels[N-1],transform=ax.transAxes,fontstyle='italic')
# Add a few extra features
ax.add_line(Line2D([0,1],[0.01,0.01],linewidth=0.5,color='k',transform=ax.transAxes))
ax.add_line(Line2D([0,1],[1,1],linewidth=0.5,color='k',transform=ax.transAxes))
plt.xticks(rotation=17)
plt.ylabel('$Scale\ =\ %2.1f\ %s$'%(scale,units))
return fig,ax,ll
def SpeedDirPlot(t,u,v,convention='current',units='m s^{-1}',color1='b',color2='r'):
"""
Plots speed and direction on the same axes
Inputs:
t - time vector
u,v - velocity cartesian components
Returns:
ax - list of axes handles
h - list of plot handles
convention = 'current' or 'wind'
See this example:
http://matplotlib.org/examples/api/two_scales.html
"""
import airsea
Dir, Spd = airsea.convertUV2SpeedDirn(u,v,convention=convention)
ax = range(2)
h = range(2)
fig = plt.gcf()
ax[0] = fig.gca()
# Left axes
h[0] = ax[0].fill_between(t, Spd, color=color1,alpha=0.7)
# Make the y-axis label and tick labels match the line color.
ax[0].set_ylabel('Speed [$%s$]'%units, color=color1)
for tl in ax[0].get_yticklabels():
tl.set_color(color1)
#Right axes
ax[1] = ax[0].twinx() # This sets up the second axes
ax[1].plot(t, Dir, '.',color=color2)
ax[1].set_ylabel("Dir'n [$\circ$]", color=color2)
ax[1].set_ylim([0,360])
ax[1].set_yticks([0,90,180,270])
ax[1].set_yticklabels(['N','E','S','W'])
for tl in ax[1].get_yticklabels():
tl.set_color(color2)
plt.setp( ax[0].xaxis.get_majorticklabels(), rotation=17 )
return ax, h
def ProfilePlot(t,y,z,scale=86400,\
axis=0,color=[0.5,0.5,0.5],xlim=None,units='m/s',scalebar=1.0):
"""
Plot a series of vertical profiles as a time series
scale - Sets 1 unit = scale (seconds)
See this page on formatting:
http://matplotlib.org/examples/pylab_examples/date_index_formatter.html
"""
from matplotlib import collections
from matplotlib.ticker import Formatter
class MyFormatter(Formatter):
def __init__(self, dates, fmt='%b %d %Y'):
self.fmt = fmt
self.dates = dates
def __call__(self, x, pos=0):
'Return the label for time x s'
return datetime.strftime(datetime(1990,1,1)+timedelta(seconds=x),self.fmt)
tsec = othertime.SecondsSince(t)
formatter = MyFormatter(tsec)
y = np.swapaxes(y,0,axis)
lines=[]
line2 =[]
for ii, tt in enumerate(tsec):
#xplot = set_scale(y[:,ii],tt)
xplot = tt + y[:,ii]*scale
lines.append(np.array((xplot,z)).T)
line2.append(np.array([[tt,tt],[z[0],z[-1]]]).T)
LC1 = collections.LineCollection(lines,colors=color,linewidths=1.5)
LC2 = collections.LineCollection(line2,colors='k',linestyles='dashed') # Zero axis
ax=plt.gca()
ax.add_collection(LC1)
ax.add_collection(LC2)
ax.set_ylim((z.min(),z.max()))
ax.xaxis.set_major_formatter(formatter)
if xlim==None:
xlim=(tsec[0]-scale/2,tsec[-1]+scale/2)
else:
xlim=othertime.SecondsSince(xlim)
ax.set_xlim(xlim)
plt.xticks(rotation=17)
###
# Add a scale bar
###
# Compute the scale bar size in dimensionless units
if not scalebar==None:
xscale = scalebar*scale/(xlim[-1]-xlim[0])
x0 = 0.1
y0 = 0.8
dy = 0.02
ax.add_line(Line2D([x0,x0+xscale],[y0,y0],linewidth=0.5,color='k',transform=ax.transAxes))
#Little caps
ax.add_line(Line2D([x0,x0],[y0-dy,y0+dy],linewidth=0.5,color='k',transform=ax.transAxes))
ax.add_line(Line2D([x0+xscale,x0+xscale],[y0-dy,y0+dy],linewidth=0.5,color='k',transform=ax.transAxes))
plt.text(x0,y0+0.05,'Scale %3.1f %s'%(scalebar,units),\
transform=ax.transAxes)
return ax
def monthlyhist(t,y,ylim=0.1,xlabel='',ylabel='',title='',**kwargs):
"""
Plots 12 histograms on a 6x2 matrix of variable, y, grouped by calendar month
Inputs:
y - vector of data
t - vector of datetime objects
kwargs - keyword arguments for numpy.hist
"""
month = othertime.getMonth(t)
fig=plt.gcf()
for m in range(1,13):
# Find the values
ind = np.argwhere(month==m)
data=y[ind]
ax=plt.subplot(6,2,m)
if len(data)>0:
plt.hist(data,**kwargs)
mon=datetime.strftime(datetime(1900,m,1),'%B')
plt.title(mon)
plt.ylim([0,ylim])
if m not in (11,12):
ax.set_xticklabels([])
else:
plt.xlabel(xlabel)
if m not in (1,3,5,7,9,11):
ax.set_yticklabels([])
else:
plt.ylabel(ylabel)
#Calc some stats
textstr = 'Mean: %6.1f\nStd. Dev.: %6.1f\n'%(np.mean(data),np.std(data))
plt.text(0.5,0.5,textstr,transform=ax.transAxes)
# plot a title
plt.figtext(0.5,0.95,title,fontsize=14,horizontalalignment='center')
return fig
def window_index(serieslength,windowsize,overlap):
"""
Determines the indices for start and end points of a time series window
Inputs:
serieslength - length of the vector [int]
windowsize - length of the window [int]
overlap - number of overlap points [int]
Returns: pt1,pt2 the start and end indices of each window
"""
p1=0
p2=p1 + windowsize
pt1=[p1]
pt2=[p2]
while p2 < serieslength:
p1 = p2 - overlap
p2 = min((p1 + windowsize, serieslength))
pt1.append(p1)
pt2.append(p2)
return pt1, pt2
def window_index_time_slow(t,windowsize,overlap):
"""
Determines the indices for window start and end points of a time vector
The window does not need to be evenly spaced
Inputs:
t - list or array of datetime objects
windowsize - length of the window [seconds]
overlap - number of overlap points [seconds]
Returns: pt1,pt2 the start and end indices of each window
"""
try:
t=t.tolist()
except:
t=t
t1=t[0]
t2=t1 + timedelta(seconds=windowsize)
pt1=[0]
pt2=[othertime.findNearest(t2,t)]
while t2 < t[-1]:
t1 = t2 - timedelta(seconds=overlap)
t2 = t1 + timedelta(seconds=windowsize)
pt1.append(othertime.findNearest(t1,t))
pt2.append(othertime.findNearest(t2,t))
return pt1, pt2
def window_index_time(t,windowsize,overlap):
"""
Determines the indices for window start and end points of a time vector
The window does not need to be evenly spaced
Inputs:
t - list or array of datetime objects
windowsize - length of the window [seconds]
overlap - number of overlap points [seconds]
Returns: pt1,pt2 the start and end indices of each window
"""
tsec = othertime.SecondsSince(t)
t1=tsec[0]
t2=t1 + windowsize
pt1=[0]
pt2=[np.searchsorted(tsec,t2)]
while t2 < tsec[-1]:
t1 = t2 - overlap
t2 = t1 + windowsize
pt1.append(np.searchsorted(tsec,t1))
pt2.append(np.searchsorted(tsec,t2))
return pt1, pt2
def pol2cart(th,rho):
"""Convert polar coordinates to cartesian"""
x = rho * np.cos(th)
y = rho * np.sin(th)
return x, y
def cart2pol(x,y):
"""
Convert cartesian to polar coordinates
"""
th = np.angle(x+1j*y)
rho = np.abs(x+1j*y)
return th, rho
def ap2ep(uamp,uphs,vamp,vphs):
"""
Convert u/v amplitude phase information to tidal ellipses
All angles are in radians
Returns:
SEMA, SEMI, INC, PHS, ECC
Based on the MATLAB ap2ep function:
https://www.mathworks.com/matlabcentral/fileexchange/347-tidalellipse/content/ap2ep.m
"""
# Make complex amplitudes for u and v
u = uamp*np.exp(-1j*uphs)
v = vamp*np.exp(-1j*vphs)
#Calculate complex radius of anticlockwise and clockwise circles:
wp = (u+1j*v)/2.0 # for anticlockwise circles
wm = np.conj(u-1j*v)/2.0 # for clockwise circles
# and their amplitudes and angles
Wp = np.abs(wp)
Wm = np.abs(wm)
THETAp = np.angle(wp)
THETAm = np.angle(wm)
# calculate ep-parameters (ellipse parameters)
SEMA = Wp+Wm # Semi Major Axis, or maximum speed
SEMI = Wp-Wm # Semin Minor Axis, or minimum speed
ECC = SEMI/SEMA # Eccentricity
PHA = (THETAm-THETAp)/2.0 # Phase angle, the time (in angle) when
# the velocity reaches the maximum
INC = (THETAm+THETAp)/2.0 # Inclination, the angle between the
# semi major axis and x-axis (or u-axis).
return SEMA, SEMI, INC, PHA, ECC
def rms(x,axis=None):
"""
root mean squared
"""
return np.sqrt(1.0/np.shape(x)[-1] * np.sum(x**2,axis=axis))
def crms(t,y):
"""
Continous function rms
"""
fac = 1.0/(t[-1]-t[0])
return np.sqrt(fac*np.trapz(y**2,x=t))
def tidalrmse(Ao,Am,Go,Gm):
"""
Tidal harmonic RMSE
Ao, Am - observed and modeled amplitude
Go, Gm - observed and modeled phase (radians)
"""
return np.sqrt( 0.5*(Ao**2 + Am**2) - Ao*Am*np.cos(Go-Gm) )
def loadtxt(txtfile):
"""
Loads a text file with two columns
Column 1 is time: seconds since 1990-1-1
Column 2 is the data.
"""
f = open(txtfile,'r')
t=[]
y=[]
for line in f.readlines():
line = line.strip()
ll = line.split(',')
t.append(datetime.strptime(ll[0],'%Y-%m-%d %H:%M:%S'))
y.append(float(ll[1]))
f.close()
return timeseries(np.array(t),np.array(y))
| mit |
paulmartel/voltdb | tools/vis3.py | 7 | 10384 | #!/usr/bin/env python
# This is a visualizer which pulls TPC-C benchmark results from the MySQL
# databases and visualizes them. Four graphs will be generated, latency graph on
# sinigle node and multiple nodes, and throughput graph on single node and
# multiple nodes.
#
# Run it without any arguments to see what arguments are needed.
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +
os.sep + 'tests/scripts/')
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from voltdbclient import *
from operator import itemgetter, attrgetter
import numpy
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
STATS_SERVER = 'volt2'
def COLORS(k):
return (((k ** 3) % 255) / 255.0,
((k * 100) % 255) / 255.0,
((k * k) % 255) / 255.0)
#COLORS = plt.cm.Spectral(numpy.linspace(0, 1, 10)).tolist()
COLORS = ['b','g','r','c','m','y','k']
#print COLORS
MARKERS = ['o', '*', '<', '>', '^', '_',
'D', 'H', 'd', 'h', '+', 'p']
mc = {}
def get_branches(hostname, port, days):
mydate = datetime.datetime.today()-datetime.timedelta(days=days)
query = "select branch, max(date), count(*) from app_stats where date >= '%s' group by branch order by 3 desc" % \
mydate.strftime('%Y-%m-%d 00:00:00')
conn = FastSerializer(hostname, port)
proc = VoltProcedure(conn, '@AdHoc',
[FastSerializer.VOLTTYPE_STRING])
resp = proc.call([query])
conn.close()
branches = []
keys=['branch','sampledate','count']
for row in resp.tables[0].tuples:
branches.append(dict(zip(keys,row)))
return branches
def get_stats(hostname, port, days):
"""Get most recent run statistics of all apps within the last 'days'
"""
conn = FastSerializer(hostname, port)
proc = VoltProcedure(conn, 'BestOfPeriod_mr',
[FastSerializer.VOLTTYPE_SMALLINT])
resp = proc.call([days])
conn.close()
# keyed on app name, value is a list of runs sorted chronologically
stats = dict()
run_stat_keys = ['app', 'branch', 'nodes', 'date', 'tps', 'lat95', 'lat99', 'count']
for row in resp.tables[0].tuples:
group = (row[1],row[0],row[2])
app_stats = []
if group not in stats:
stats[group] = app_stats
else:
app_stats = stats[group]
run_stats = dict(zip(run_stat_keys, row))
app_stats.append(run_stats)
return stats
class Plot:
DPI = 100.0
def __init__(self, title, xlabel, ylabel, filename, w, h, ndays):
self.filename = filename
self.ndays = ndays
self.legends = {}
w = w == None and 1200 or w
h = h == None and 1200 or h
self.fig = plt.figure(figsize=(w / self.DPI, h / self.DPI),
dpi=self.DPI)
self.ax = self.fig.add_subplot(111)
self.ax.set_title(title)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.tick_params(axis='y', labelleft=True, labelright=True)
plt.ylabel(ylabel, fontsize=8)
plt.xlabel(xlabel, fontsize=8)
self.ax.set_aspect(1)
self.ax.set_yscale('log')
self.ax.set_xscale('log')
self.cm = plt.get_cmap("PiYG")
def plot(self, x, y, color, marker_shape, legend):
self.ax.scatter(x, y, label=str(legend), c=color,
marker=marker_shape) #, markerfacecolor=color)
#float(x)/float(y), cmap=self.cm, # color=color,
def close(self):
ymin, ymax = plt.ylim()
plt.ylim((ymin-(ymax-ymin)*0.2, ymax+(ymax-ymin)*0.2))
xmin, xmax = plt.xlim()
plt.xlim((xmin-(xmax-xmin)*0.2, xmax+(xmax-xmin)*0.2))
plt.legend(prop={'size': 10}, loc=2)
plt.savefig(self.filename, format="png", transparent=False)
plt.close('all')
def plot(title, xbranch, ybranch, filename, width, height, data, root_path):
global mc
xlabel = "%s Thpt tx/sec" % xbranch
ylabel = "%s Thpt tx/sec" % ybranch
pl = Plot(title, xlabel, ylabel, filename, width, height, 1)
seq = []
if len(data) > 0:
for k,v in data.iteritems():
if v["y"]['tps'] == 0.0:
continue
diff = (float(v["y"]['tps'])-float(v["x"]['tps']))/float(v["y"]['tps']) * 100.
acolor = ['g','r'][diff<0]
pl.plot(v["x"]['tps'], v["y"]['tps'], acolor, MARKERS[0], "")
test_case = "%s %d %s" % (k[2],k[3],["node","nodes"][k[3]>1])
seq.append([test_case, round(diff,2), round(v["y"]['tps'],2), v["y"]['count'], round(v["x"]['tps'], 2), v["x"]["count"], acolor])
if abs(diff) > 5.:
atxt = "%s (%.2f%%)" % (test_case, diff)
pl.ax.annotate(atxt, xy=(v["x"]['tps'], v["y"]['tps']), xycoords='data',
xytext=(10*[1,-1][diff>0], -5), textcoords='offset points', ha=["left","right"][diff>0],
size=10, color=acolor) #, arrowprops=dict(arrowstyle="->"))
#if len(info) > 1:
# _at = AnchoredText("\n".join(info), loc=2, prop=dict(size=10))
# pl.ax.add_artist(_at)
pl.close()
seq = sorted(seq, key=lambda t: t[1])
seq.insert(0, ['test #nodes','% diff', ybranch, '#samples', xbranch, '#samples', 'flag'])
with open(filename.replace('png','html'), "w+") as f:
f.write(get_html_tbl(reduce(lambda x,y: x+y, seq, []), 7))
def get_html_tbl(seq, col_count):
if len(seq) % col_count:
seq.extend([''] * (col_count - len(seq) % col_count))
tbl_template = '<table>%s</table>' % ('<tr>%s</tr>' % ('<td>%s</td>' * col_count) * (len(seq)/col_count))
return tbl_template % tuple(seq)
def generate_index_file(root, filenames):
row = """
<tr>
<td><a href="%s"><img src="%s" width="400" height="400"/></a></td>
</tr4
"""
table = """
%s
"""
sep = """
</table>
<table frame="box">
<tr>
<th colspan="3"><a name="%s">%s</a></th>
</tr>
"""
full_content = """
<html>
<head>
<title>Performance Graphs</title>
</head>
<body>
<table frame="box">
%s
</table>
</body>
</html>
"""
hrow = """
<tr>
<td><a href=#%s>%s</a></td>
<td><a href=#%s>%s</a></td>
<td><a href=#%s>%s</a></td>
<td><a href=#%s>%s</a></td>
</tr>
"""
h = map(lambda x:(x[0].replace(' ','%20'), x[0]), filenames)
n = 4
z = n-len(h)%n
while z > 0 and z < n:
h.append(('',''))
z -= 1
rows = []
t = ()
for i in range(1, len(h)+1):
t += tuple(h[i-1])
if i%n == 0:
rows.append(hrow % t)
t = ()
last_app = None
for i in filenames:
if i[0] != last_app:
rows.append(sep % (i[0], i[0]))
last_app = i[0]
rows.append(row % (i[1], i[1]))
try:
with open(sys.argv[1]+"/"+str(i[1].replace('png','html')), 'r') as f:
rows.append(table % f.read())
except:
pass
return full_content % ''.join(rows)
def usage():
print "Usage:"
print "\t", sys.argv[0], "output_dir filename_base [ndays]" \
" [width] [height]"
print
print "\t", "width in pixels"
print "\t", "height in pixels"
def main():
if len(sys.argv) < 3:
usage()
exit(-1)
if not os.path.exists(sys.argv[1]):
print sys.argv[1], "does not exist"
exit(-1)
prefix = sys.argv[2]
path = os.path.join(sys.argv[1], sys.argv[2])
ndays = 2000
if len(sys.argv) >=4:
ndays = int(sys.argv[3])
width = None
height = None
if len(sys.argv) >= 5:
width = int(sys.argv[4])
if len(sys.argv) >= 6:
height = int(sys.argv[5])
branches = get_branches(STATS_SERVER, 21212, ndays)
# show all the history
stats = get_stats(STATS_SERVER, 21212, ndays)
root_path = path
filenames = [] # (appname, latency, throughput)
iorder = 0
bc = [(branches[i],branches[j]) for i in range(len(branches)) for j in range(len(branches)) if i != j]
for bg in bc:
merged = {}
#missing = ['Cases missing from %s:' % bg[1]['branch']]
for group,data in stats.iteritems():
if group[1].startswith('Security'):
continue
if bg[0]['branch'] == group[0]:
k = (bg[1]['branch'],group[1],group[2])
if k in stats:
m = stats[k]
merged[(bg[0]['branch'],bg[1]['branch'])+(group[1],group[2])] = {"y": data[0], "x": stats[k][0]}
#else:
# missing.append("%s %d %s" % (group[1],group[2],["node","nodes"][group[2]>1]))
app = "%s vs %s" % (bg[0]['branch'], bg[1]['branch'])
title = "%s as of %s vs %s as of %s" % (bg[0]['branch'],bg[0]['sampledate'],bg[1]['branch'],bg[1]['sampledate'])
app_filename = app.replace(' ', '_')
"""
latency95_filename = '%s-latency95-%s.png' % (prefix, app_filename)
latency99_filename = '%s-latency99-%s.png' % (prefix, app_filename)
throughput_filename = '%s-throughput-%s.png' % (prefix, app_filename)
filenames.append((app, latency95_filename, latency99_filename, throughput_filename, iorder))
"""
throughput_filename = '%s-throughput-%s.png' % (prefix, app_filename)
filenames.append((app, throughput_filename, iorder))
"""
plot(app + " latency95", "Time", "Latency (ms)",
path + "-latency95-" + app_filename + ".png", width, height, app,
data, 'lat95')
plot(app + " latency99", "Time", "Latency (ms)",
path + "-latency99-" + app_filename + ".png", width, height, app,
data, 'lat99')
"""
plot(title+" throughput", bg[1]['branch'], bg[0]['branch'],
path + "-throughput-" + app_filename + ".png", width, height, merged, root_path)
# generate index file
index_file = open(root_path + '-index.html', 'w')
#sorted_filenames = sorted(filenames, key=lambda f: f[0].lower()+str(f[1]))
index_file.write(generate_index_file(root_path, filenames))
index_file.close()
if __name__ == "__main__":
main()
| agpl-3.0 |
hitszxp/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 17 | 2021 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
This example consists in fitting a Gaussian Process model onto the diabetes
dataset.
The correlation parameters are determined by means of maximum likelihood
estimation (MLE). An anisotropic squared exponential correlation model with a
constant regression model are assumed. We also used a nugget = 1e-2 in order to
account for the (strong) noise in the targets.
We compute then compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
Takonan/csc411_a3 | basicBoosting.py | 1 | 11498 | from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import LabelKFold
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.svm import LinearSVR
from sklearn.svm import SVC
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
from sklearn.metrics.scorer import check_scoring
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from utils import *
import time
import matplotlib.pyplot as plt
def run_AdaBoost(num_estimator=10, num_iter=5, include_mirror=False, do_cv=False):
# Loading data
# train_inputs, train_targets, valid_inputs, valid_targets = load_data(include_mirror)
inputs, targets, identities = load_data_with_identity(include_mirror)
lkf = LabelKFold(identities, n_folds=10)
# myClassifier = LogisticRegression()
# myClassifier = Perceptron(n_iter=num_iter)
# myClassifier = SGDClassifier(loss='perceptron',n_iter=num_iter)
clf = AdaBoostClassifier(n_estimators=num_estimator)
if do_cv:
# Do cross validation
# scores = cross_val_score(clf, train_inputs, train_targets)
scores = cross_val_score(clf, inputs, targets, cv=lkf)
print scores
print scores.mean()
return scores.mean()
else:
# Do just one validation
clf.fit(train_inputs, train_targets)
pred = clf.predict(valid_inputs)
score = (pred == valid_targets).mean()
return score
# clf = AdaBoostClassifier(n_estimators=100)
# scores = cross_val_score(clf, train_inputs, train_targets, n_jobs=-1)
# print scores.mean()
def run_ExtremeRandFor(include_mirror=False):
train_inputs, train_targets, valid_inputs, valid_targets = load_data(include_mirror)
clf = ExtraTreesClassifier(n_estimators=100, max_depth=None, min_samples_split=1, random_state=0, n_jobs=-1)
scores = cross_val_score(clf, train_inputs, train_targets, n_jobs=-1)
print scores.mean()
def run_RandFor(include_mirror=False):
# train_inputs, train_targets, valid_inputs, valid_targets = load_data(include_mirror)
inputs, targets, identities = load_data_with_identity(False)
# inputs, targets, identities = reload_data_with_identity_normalized()
lkf = LabelKFold(identities, n_folds=10)
clf = RandomForestClassifier(n_estimators=100, max_depth=None, min_samples_split=1, random_state=0, n_jobs=-1)
scores = cross_val_score(clf, inputs, targets, n_jobs=-1, cv=lkf)
print scores
print scores.mean()
def run_Bagging(num_estimator=10, num_iter=5, include_mirror=False, do_cv=False, reload=False):
if not reload:
train_inputs, train_targets, valid_inputs, valid_targets = load_data(include_mirror)
else:
train_inputs, train_targets, valid_inputs, valid_targets, test_inputs, test_targets = reload_data_with_test_normalized()
# myClassifier = LinearSVC()
# myClassifier = RidgeClassifier()
myClassifier = Perceptron(n_iter=num_iter)
# myClassifier = SGDClassifier(loss='perceptron',n_iter=num_iter)
# myClassifier = OneVsRestClassifier(LinearSVC(random_state=0))
# clf = BaggingClassifier(base_estimator=myClassifier, n_estimators=num_estimator, n_jobs=-1)
if do_cv:
# Do cross validation
scores = cross_val_score(clf, train_inputs, train_targets)
return scores.mean()
else:
# Do just one validation
# clf.fit(train_inputs, train_targets)
pred = myClassifier.fit(train_inputs, train_targets).predict(valid_inputs)
# pred = clf.predict(valid_inputs)
score = (pred == (valid_targets)).mean()
return score
return
def run_Bagging_LabelKFold(num_estimator=10, num_iter=5, include_mirror=False, reload=False, classifier='Perceptron'):
ZCAMatrix = np.load('ZCAMatrix.npy')
if not reload:
inputs, targets, identities = load_data_with_identity(True)
inputs = inputs.reshape(inputs.shape[0], 1, 32,32) # For CNN model
inputs = preprocess_images(inputs)
inputs = inputs.reshape(inputs.shape[0],inputs.shape[1]*inputs.shape[2]*inputs.shape[3])
inputs = np.dot(inputs,ZCAMatrix)
else:
inputs, targets, identities = reload_data_with_identity_normalized()
if classifier == 'Perceptron':
myClassifier = Perceptron(n_iter=num_iter)
elif classifier == 'DecisionTree':
myClassifier = DecisionTreeClassifier()
elif classifier == 'LinearSVC':
myClassifier = LinearSVC()
elif classifier == 'RidgeClassifier':
myClassifier = RidgeClassifier()
else:
print "Classifier not recognized. Aborting..."
return
# myClassifier = SGDClassifier(loss='perceptron',n_iter=num_iter)
# myClassifier = OneVsRestClassifier(LinearSVC(random_state=0))
clf = BaggingClassifier(base_estimator=myClassifier, n_estimators=num_estimator)
lkf = LabelKFold(identities, n_folds=10)
print "Starting cross validation testing on %s bagging with %d estimators" % (classifier, num_estimator)
scores = cross_val_score(clf, inputs, targets, cv=lkf)
print scores
print scores.mean()
return scores
def run_Bagging_testset(num_estimator=100, num_iter=25, include_mirror=True):
inputs, targets, identities = load_data_with_identity(include_mirror)
x_test = load_public_test()
myClassifier = Perceptron(n_iter=num_iter)
clf = BaggingClassifier(base_estimator=myClassifier, n_estimators=num_estimator, n_jobs=-1)
clf.fit(inputs, targets)
# Predict on the training data
train_pred = clf.predict(inputs)
print classification_report(targets, train_pred)
print "Done learning, now predicting"
pred = clf.predict(x_test)
print pred
print "Saving the output test prediction"
save_output_csv("Perceptron_Bagging_test_predictions.csv", pred)
return
def run_Bagging_NumEstimator_Experiment(classifier='Perceptron'):
val_avg_score_list = np.zeros(9)
val_max_score_list = np.zeros(9)
val_scores_list = []
num_estimator_list = np.array([1,2,3, 5, 10, 25, 50, 75, 100])
for i in xrange(num_estimator_list.shape[0]):
num_estimator = num_estimator_list[i]
print "Number of num_estimator: ", num_estimator
score = run_Bagging_LabelKFold(num_estimator=num_estimator, num_iter=10, include_mirror=True, classifier=classifier)
print "Average Validation score: ", score
val_avg_score_list[i] = score.mean()
val_max_score_list[i] = score.max()
val_scores_list.append(score)
print "Val_avg_score_list: "
print val_avg_score_list
print "Val_max_score_list: "
print val_max_score_list
print "All scores:"
print val_scores_list
print "num_estimator_list: "
print num_estimator_list
# Plot the data
plt.figure()
plt.plot(num_estimator_list, val_avg_score_list, label='Avg Validation Accuracy (10 fold)')
plt.plot(num_estimator_list, val_max_score_list, label='Max Validation Accuracy (10 fold)')
plt.legend(loc=4)
plt.title('%s Bagging Validation Accuray vs Number of Estimator' % (classifier))
plt.xlabel('Number of Estimators')
plt.ylabel('Accuracy')
plt.savefig('%s_Bagging_ValAcc_vs_NumEstimator.png' % classifier)
plt.show()
return
def pca_SVM(normalized_intensity=False, ratio=0.25):
if not normalized_intensity:
# Perform PCA on the unlabeled data (Not include the mirror)
images = load_unlabeled_data()
start = time.time()
pca = PCA(n_components=images.shape[1]*ratio)
unlabeled_pca = pca.fit_transform(images)
elasped = time.time() - start
print "Done doing PCA fit with ratio %f" % (ratio)
print "It took %f seconds" % elasped
# Now do Kernel PCA on the unlabeled_pca
# kpca = KernelPCA(kernel="rbf", gamma=15)
# start = time.time()
# unlabeled_kpca = kpca.fit(unlabeled_pca)
# unlabeled_kpca = kpca.fit(images[0:6000])
# elasped = time.time() - start
# print "Done Kernel PCA fit"
# print "It took %f seconds" % elasped
# # Perform SVM on the PCA transformed data
# train_inputs, train_targets, valid_inputs, valid_targets, test_inputs, test_targets = load_data_with_test(True)
# train_inputs = pca.transform(train_inputs)
# valid_inputs = pca.transform(valid_inputs)
# test_inputs = pca.transform(test_inputs)
# Train one vs one SVM's
clf = SVC()
# clf.fit(train_inputs, train_targets)
# val_pred = clf.predict(valid_inputs)
# print valid_targets
# print val_pred
# print accuracy_score(valid_targets, val_pred)
# print(classification_report(valid_targets, val_pred))
# test_pred = clf.predict(test_inputs)
# print test_targets
# print test_pred
# print accuracy_score(test_targets, test_pred)
# print(classification_report(test_targets, test_pred))
inputs, targets, identities = load_data_with_identity(True)
# inputs = kpca.transform(inputs)
inputs = pca.transform(inputs)
print "Dimension of inputs:", inputs.shape
lkf = LabelKFold(identities, n_folds=3)
# for train_index, test_index in lkf:
# print("TRAIN:", train_index, "TEST:", test_index)
# X_train, X_test = inputs[train_index], inputs[test_index]
# y_train, y_test = targets[train_index], targets[test_index]
# # Do not legit cross validation:
scores = cross_val_score(clf, inputs, targets, cv=lkf, n_jobs=-1)
print scores.mean()
return
if __name__ == '__main__':
#print "Running classification algorithms with original training data set:"
#start = time.time()
#run_AdaBoost(num_estimator=500, include_mirror=True, do_cv=True)
#elasped = time.time() - start
#print "Elasped time: ", elasped
# # run_ExtremeRandFor()
# run_RandFor()
# start = time.time()
# run_Bagging()
# elasped = time.time() - start
# print "Elasped time: ", elasped
# print "Running classification algorithms with original training data set and mirrorred data set:"
# # run_AdaBoost(True)
# # run_ExtremeRandFor(True)
# # run_RandFor(True)
# start = time.time()
# run_Bagging(True)
# elasped = time.time() - start
# print "Elasped time: ", elasped
#for num_estimator in [100]: #[10, 25, 50]:
# for num_iter in [25]: #[5, 10, 25, 50]:
# # print "Original Set, num_estimator: %d, num_iter: %d, accuracy: %f" % (num_estimator, num_iter, run_Bagging_LabelKFold(num_estimator, num_iter, False, False))
# print "Original + Mirrored Set, num_estimator: %d, num_iter: %d, accuracy: %f" % (num_estimator, num_iter, run_Bagging_LabelKFold(num_estimator, num_iter, True, False))
# pca_SVM()
run_Bagging_NumEstimator_Experiment(classifier='Perceptron')
| bsd-3-clause |
linebp/pandas | pandas/compat/pickle_compat.py | 11 | 5476 | """
Support pre-0.12 series pickle compatibility.
"""
import sys
import pandas # noqa
import copy
import pickle as pkl
from pandas import compat, Index
from pandas.compat import u, string_types # noqa
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
if len(args) and type(args[0]) is type:
n = args[0].__name__ # noqa
try:
stack[-1] = func(*args)
return
except Exception as e:
# If we have a deprecated function,
# try to replace and try again.
msg = '_reconstruct: First argument must be a sub-type of ndarray'
if msg in str(e):
try:
cls = args[0]
stack[-1] = object.__new__(cls)
return
except:
pass
# try to re-encode the arguments
if getattr(self, 'encoding', None) is not None:
args = tuple([arg.encode(self.encoding)
if isinstance(arg, string_types)
else arg for arg in args])
try:
stack[-1] = func(*args)
return
except:
pass
# unknown exception, re-raise
if getattr(self, 'is_verbose', None):
print(sys.exc_info())
print(func, args)
raise
# If classes are moved, provide compat here.
_class_locations_map = {
# 15477
('pandas.core.base', 'FrozenNDArray'):
('pandas.core.indexes.frozen', 'FrozenNDArray'),
('pandas.core.base', 'FrozenList'):
('pandas.core.indexes.frozen', 'FrozenList'),
# 10890
('pandas.core.series', 'TimeSeries'):
('pandas.core.series', 'Series'),
('pandas.sparse.series', 'SparseTimeSeries'):
('pandas.core.sparse.series', 'SparseSeries'),
# 12588, extensions moving
('pandas._sparse', 'BlockIndex'):
('pandas._libs.sparse', 'BlockIndex'),
('pandas.tslib', 'Timestamp'):
('pandas._libs.tslib', 'Timestamp'),
('pandas.tslib', '__nat_unpickle'):
('pandas._libs.tslib', '__nat_unpickle'),
('pandas._period', 'Period'): ('pandas._libs.period', 'Period'),
# 15998 top-level dirs moving
('pandas.sparse.array', 'SparseArray'):
('pandas.core.sparse.array', 'SparseArray'),
('pandas.sparse.series', 'SparseSeries'):
('pandas.core.sparse.series', 'SparseSeries'),
('pandas.sparse.frame', 'SparseDataFrame'):
('pandas.core.sparse.frame', 'SparseDataFrame'),
('pandas.indexes.base', '_new_Index'):
('pandas.core.indexes.base', '_new_Index'),
('pandas.indexes.base', 'Index'):
('pandas.core.indexes.base', 'Index'),
('pandas.indexes.numeric', 'Int64Index'):
('pandas.core.indexes.numeric', 'Int64Index'),
('pandas.indexes.range', 'RangeIndex'):
('pandas.core.indexes.range', 'RangeIndex'),
('pandas.indexes.multi', 'MultiIndex'):
('pandas.core.indexes.multi', 'MultiIndex'),
('pandas.tseries.index', '_new_DatetimeIndex'):
('pandas.core.indexes.datetimes', '_new_DatetimeIndex'),
('pandas.tseries.index', 'DatetimeIndex'):
('pandas.core.indexes.datetimes', 'DatetimeIndex'),
('pandas.tseries.period', 'PeriodIndex'):
('pandas.core.indexes.period', 'PeriodIndex')
}
# our Unpickler sub-class to override methods and some dispatcher
# functions for compat
if compat.PY3:
class Unpickler(pkl._Unpickler):
def find_class(self, module, name):
# override superclass
key = (module, name)
module, name = _class_locations_map.get(key, key)
return super(Unpickler, self).find_class(module, name)
else:
class Unpickler(pkl.Unpickler):
def find_class(self, module, name):
# override superclass
key = (module, name)
module, name = _class_locations_map.get(key, key)
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
Unpickler.dispatch = copy.copy(Unpickler.dispatch)
Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
def load_newobj_ex(self):
kwargs = self.stack.pop()
args = self.stack.pop()
cls = self.stack.pop()
# compat
if issubclass(cls, Index):
obj = object.__new__(cls)
else:
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
try:
Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
except:
pass
def load(fh, encoding=None, compat=False, is_verbose=False):
"""load a pickle, with a provided encoding
if compat is True:
fake the old class hierarchy
if it works, then return the new type objects
Parameters
----------
fh: a filelike object
encoding: an optional encoding
compat: provide Series compatibility mode, boolean, default False
is_verbose: show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except:
raise
| bsd-3-clause |
henrymliu/Dynamical_Billiards | AbstractTable.py | 1 | 6431 | """
AbstractTable module for Dynamical Billiards Simulator
All the different tables will be a subclass of this abstract superclass
"""
import numpy as np
from matplotlib import animation
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from PIL import Image
class Ball(object):
"""Holds the colour and state of a ball in the simulation"""
def __init__(self, **kwargs):
super().__init__()
self.parameters = kwargs
self.state = self.parameters['initstate']
self.color = self.parameters['color']
class AbstractTable(object):
"""
Abstract class for a table that simulates collisions
this superclass takes care of the animating and preview generation
subclasses will take care of detecting collisions and drawing the table
subclasses must implement:
drawTable
step
all others are optional
"""
def __init__(self, **kwargs):
super().__init__()
self.parameters = kwargs
self.ballList = []
self.nBalls = self.parameters['nBalls']
self.drag = 0.999 # TODO: possibly change this with entrybox
# use colormap for many colors
self.cmap = plt.cm.get_cmap("gist_rainbow", self.nBalls + 1)
def drawTable(self, ec='none'):
"""
Each table must implement this function
should make a figure and axes in self and should draw the table as a
collection of matplotlib patches
edge colour is for the patches, when animating it can be left as none
but must be 'k' for generatePreview
"""
return None
def step(self, particle, dt):
"""
each table must implement this function
for each check particle, check if boundaries crossed and update
velocity (position is updated in stepall)
"""
return None
def stepall(self, dt):
"""
updates position of each ball and checks boundaries using step
"""
for particle in self.ballList:
if self.parameters['friction']:
particle.state[2] *= self.drag
particle.state[3] *= self.drag
particle.state[0] += dt * particle.state[2]
particle.state[1] += dt * particle.state[3]
self.step(particle, dt)
def generatePreview(self):
"""
saves a preview of the figure as preview.png and returns a PIL image
object of the preview
must run update before using this method
"""
# draw table with black edge color
self.drawTable('k')
balls=[]
# initialize all the balls and their positions
for i in range(self.nBalls):
balls.append(Ball(color=self.cmap(i),
initstate=self.parameters['balls'][i]))
self.ax.plot(balls[i].state[0], balls[i].state[1],
color=self.cmap(i), marker = 'o', ms=8)
# plot arrow indicating velocity vector
self.ax.add_patch(patches.Arrow(balls[i].state[0], balls[i].state[1], balls[i].state[2]*0.3,
balls[i].state[3]*0.3, width=0.05, ls='-', color=self.cmap(i)))
# linewidth needs to be larger than animating so it will be visible in
# the preview
self.table.set_linewidth(6)
self.fig.savefig('preview.png')
f=Image.open('preview.png')
# resize object so it will fit in tkinter canvas
f=f.resize((300,300))
return f
def update(self, **kwargs):
"""saves new parameters for the Simulation"""
self.parameters = kwargs
def main(self,frames=600):
"""
opens the matplotlib window and starts the animation
should run update before calling with function
"""
# close any figures made from generatePreview
plt.close('all')
# make figure and axis and add the table to it
self.drawTable()
# define time step. this value seems to work well but can be adjusted
dt = 1 / 30
# initialize balls and axes objects
particles = []
paths = []
self.pathx = {}
self.pathy = {}
for i in range(self.nBalls):
# make ball object and add it to ball list
self.ballList.append(Ball(color=self.cmap(i), initstate=self.parameters['balls'][i]))
# initialize particles and paths that will be plotted
particles.append(self.ax.plot([], [], color=self.cmap(i), marker='o', ms=6)[0])
paths.append(self.ax.plot([], [], color=self.cmap(i), ls='-', lw=1)[0])
self.pathx[i] = np.array([])
self.pathy[i] = np.array([])
def init():
"""
initialize function for the animation.
gets run before each frame.
"""
# reset particles
for ball in particles:
ball.set_data([], [])
ball.set_data([], [])
# reset table
self.table.set_edgecolor('none')
return tuple(particles) + (self.table,) + tuple(paths)
def animate(k):
"""perform animation step"""
# trace the particle if check box is selected
if self.parameters['trace']:
for i in range(self.nBalls):
self.pathx[i] = np.append(self.pathx[i],
self.ballList[i].state[0])
self.pathy[i] = np.append(self.pathy[i],
self.ballList[i].state[1])
# update position and check for collisions
self.stepall(dt)
# update table
self.table.set_edgecolor('k')
# set particle position and path data
for ball in range(self.nBalls):
particles[ball].set_data(self.ballList[ball].state[0],
self.ballList[ball].state[1])
paths[ball].set_data(self.pathx[ball], self.pathy[ball])
return tuple(particles) + (self.table,) + tuple(paths)
# define animation with appropriate playbackSpeed
ani = animation.FuncAnimation(self.fig, animate, frames=frames,
interval=np.ceil((1 / self.parameters['playbackSpeed']) * 10 ** 3),
blit=True, init_func=init)
# show matplotlib window
plt.show()
return ani
| mit |
mshakya/PyPiReT | piret/counts/featurecounts.py | 1 | 5439 |
#! /usr/bin/env python
"""Check design."""
from __future__ import print_function
import os
from os.path import basename, splitext
from plumbum.cmd import stringtie, featureCounts
# from piret.runs import Map
import pandas as pd
from luigi.util import inherits, requires
from luigi.contrib.external_program import ExternalProgramTask
from luigi import LocalTarget, Parameter, IntParameter
import luigi
import logging
class FeatureCounts(luigi.Task):
"""Summarize mapped reads classificaion using FeatureCount."""
fastq_dic = luigi.DictParameter()
kingdom = luigi.Parameter()
gff_file = luigi.Parameter()
workdir = luigi.Parameter()
indexfile = luigi.Parameter()
num_cpus = luigi.IntParameter()
ref_file = luigi.Parameter()
fid = luigi.Parameter()
stranded = luigi.IntParameter()
def output(self):
"""Expected output of featureCounts."""
counts_dir = os.path.join(self.workdir, "processes", "featureCounts",
self.kingdom)
gff_fp = os.path.abspath(self.gff_file)
features = list(set(pd.read_csv(gff_fp, sep="\t", header=None,
comment='#')[2].tolist()))
features = [feat for feat in features if feat in ['CDS', 'rRNA',
'tRNA', 'exon',
'gene',
'transcript']]
loc_target = LocalTarget(os.path.join(counts_dir, features[-1] +
"_count.tsv"))
return loc_target
def run(self):
"""Running featureCounts on all."""
map_dir = os.path.join(self.workdir, "processes", "mapping")
samp_list = list(self.fastq_dic.keys())
in_srtbam_list = [os.path.join(map_dir, samp, samp + "_srt.bam")
for samp in samp_list]
counts_dir = os.path.join(self.workdir, "processes", "featureCounts",
self.kingdom)
if not os.path.exists(counts_dir):
os.makedirs(counts_dir)
if ',' in self.gff_file:
gff_list = self.gff_file.split(",")
gff_full_path = [os.path.abspath(gff) for gff in gff_list]
for gffs in gff_full_path:
feature = list(set(pd.read_csv(gffs,
sep="\t", header=None,
comment='#')[2].tolist()))
for feat in feature:
if feat in ['CDS', 'rRNA', 'tRNA', 'exon',
'NovelRegion', 'transcript', 'mRNA']:
fcount_cmd_opt = ["-a", self.gff_file,
"-s", self.stranded,
"-B",
"-p", "-P", "-C",
"-g", self.fid,
"-t", feat,
"-T", self.num_cpus,
"-o", counts_dir + "/" + feat +
"_count.tsv"] + in_srtbam_list
elif feat in ['gene']:
fcount_cmd_opt = ["-a", self.gff_file,
"-s", self.stranded,
"-B",
"-p", "-P", "-C",
"-g", "ID",
"-t", feat,
"-T", self.num_cpus,
"-o", counts_dir + "/" + feat +
"_count.tsv"] + in_srtbam_list
fcount_cmd = featureCounts[fcount_cmd_opt]
fcount_cmd()
else:
feature = list(set(pd.read_csv(self.gff_file, sep="\t", header=None,
comment='#')[2].tolist()))
for feat in feature:
if feat in ['CDS', 'rRNA', 'tRNA', 'exon', 'transcript',
'NovelRegion']:
fcount_cmd_opt = ["-a", self.gff_file,
"-s", self.stranded,
"-B",
"-p", "-P", "-C",
"-g", self.fid,
"-t", feat,
"-T", self.num_cpus,
"-o", counts_dir + "/" + feat +
"_count.tsv"] + in_srtbam_list
fcount_cmd = featureCounts[fcount_cmd_opt]
fcount_cmd()
if feat in ['gene']:
fcount_cmd_opt = ["-a", self.gff_file,
"-s", self.stranded,
"-B",
"-p", "-P", "-C",
"-g", "ID",
"-t", feat,
"-T", self.num_cpus,
"-o", counts_dir + "/" + feat +
"_count.tsv"] + in_srtbam_list
fcount_cmd = featureCounts[fcount_cmd_opt]
fcount_cmd()
| bsd-3-clause |
kjung/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
djgagne/scikit-learn | examples/svm/plot_rbf_parameters.py | 132 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
pystruct/pystruct | pystruct/learners/subgradient_ssvm.py | 5 | 12101 | from time import time
import numpy as np
from sklearn.externals.joblib import Parallel, delayed, cpu_count
from sklearn.utils import gen_even_slices, shuffle
from .ssvm import BaseSSVM
from ..utils import find_constraint
class SubgradientSSVM(BaseSSVM):
"""Structured SVM solver using subgradient descent.
Implements a margin rescaled with l1 slack penalty.
By default, a constant learning rate is used.
It is also possible to use the adaptive learning rate found by AdaGrad.
This class implements online subgradient descent. If n_jobs != 1,
small batches of size n_jobs are used to exploit parallel inference.
If inference is fast, use n_jobs=1.
Parameters
----------
model : StructuredModel
Object containing model structure. Has to implement
`loss`, `inference` and `loss_augmented_inference`.
max_iter : int, default=100
Maximum number of passes over dataset to find constraints and perform
updates.
C : float, default=1.
Regularization parameter.
verbose : int, default=0
Verbosity.
learning_rate : float or 'auto', default='auto'
Learning rate used in subgradient descent. If 'auto', the pegasos
schedule is used, which starts with ``learning_rate = n_samples * C``.
momentum : float, default=0.0
Momentum used in subgradient descent.
n_jobs : int, default=1
Number of parallel jobs for inference. -1 means as many as cpus.
batch_size : int, default=None
Ignored if n_jobs > 1. If n_jobs=1, inference will be done in mini
batches of size batch_size. If n_jobs=-1, batch learning will be
performed, that is the whole dataset will be used to compute each
subgradient.
show_loss_every : int, default=0
Controlls how often the hamming loss is computed (for monitoring
purposes). Zero means never, otherwise it will be computed very
show_loss_every'th epoch.
decay_exponent : float, default=1
Exponent for decaying learning rate. Effective learning rate is
``learning_rate / (decay_t0 + t)** decay_exponent``. Zero means no
decay.
decay_t0 : float, default=10
Offset for decaying learning rate. Effective learning rate is
``learning_rate / (decay_t0 + t)** decay_exponent``.
break_on_no_constraints : bool, default=True
Break when there are no new constraints found.
logger : logger object.
averaging : string, default=None
Whether and how to average weights. Possible options are 'linear',
'squared' and None.
The string reflects the weighting of the averaging:
- ``linear: w_avg ~ w_1 + 2 * w_2 + ... + t * w_t``
- ``squared: w_avg ~ w_1 + 4 * w_2 + ... + t**2 * w_t``
Uniform averaging is not implemented as it is worse than linear
weighted averaging or no averaging.
shuffle : bool, default=False
Whether to shuffle the dataset in each iteration.
Attributes
----------
w : nd-array, shape=(model.size_joint_feature,)
The learned weights of the SVM.
``loss_curve_`` : list of float
List of loss values if show_loss_every > 0.
``objective_curve_`` : list of float
Primal objective after each pass through the dataset.
``timestamps_`` : list of int
Total training time stored before each iteration.
References
----------
* Nathan Ratliff, J. Andrew Bagnell and Martin Zinkevich:
(Online) Subgradient Methods for Structured Prediction, AISTATS 2007
* Shalev-Shwartz, Shai and Singer, Yoram and Srebro, Nathan and Cotter,
Andrew: Pegasos: Primal estimated sub-gradient solver for svm,
Mathematical Programming 2011
"""
def __init__(self, model, max_iter=100, C=1.0, verbose=0, momentum=0.0,
learning_rate='auto', n_jobs=1,
show_loss_every=0, decay_exponent=1,
break_on_no_constraints=True, logger=None, batch_size=None,
decay_t0=10, averaging=None, shuffle=False):
BaseSSVM.__init__(self, model, max_iter, C, verbose=verbose,
n_jobs=n_jobs, show_loss_every=show_loss_every,
logger=logger)
self.averaging = averaging
self.break_on_no_constraints = break_on_no_constraints
self.momentum = momentum
self.learning_rate = learning_rate
self.t = 0
self.decay_exponent = decay_exponent
self.decay_t0 = decay_t0
self.batch_size = batch_size
self.shuffle = shuffle
def _solve_subgradient(self, djoint_feature, n_samples, w):
"""Do a single subgradient step."""
grad = (djoint_feature - w / (self.C * n_samples))
self.grad_old = ((1 - self.momentum) * grad
+ self.momentum * self.grad_old)
if self.decay_exponent == 0:
effective_lr = self.learning_rate_
else:
effective_lr = (self.learning_rate_
/ (self.t + self.decay_t0)
** self.decay_exponent)
w += effective_lr * self.grad_old
if self.averaging == 'linear':
rho = 2. / (self.t + 2.)
self.w = (1. - rho) * self.w + rho * w
elif self.averaging == 'squared':
rho = 6. * (self.t + 1) / ((self.t + 2) * (2 * self.t + 3))
self.w = (1. - rho) * self.w + rho * w
else:
self.w = w
self.t += 1.
return w
def fit(self, X, Y, constraints=None, warm_start=False, initialize=True):
"""Learn parameters using subgradient descent.
Parameters
----------
X : iterable
Traing instances. Contains the structured input objects.
No requirement on the particular form of entries of X is made.
Y : iterable
Training labels. Contains the strctured labels for inputs in X.
Needs to have the same length as X.
constraints : None
Discarded. Only for API compatibility currently.
warm_start : boolean, default=False
Whether to restart a previous fit.
initialize : boolean, default=True
Whether to initialize the model for the data.
Leave this true except if you really know what you are doing.
"""
if initialize:
self.model.initialize(X, Y)
if self.verbose:
print("Training primal subgradient structural SVM")
self.grad_old = np.zeros(self.model.size_joint_feature)
self.w = getattr(self, "w", np.zeros(self.model.size_joint_feature))
w = self.w.copy()
if not warm_start:
self.objective_curve_ = []
self.timestamps_ = [time()]
if self.learning_rate == "auto":
self.learning_rate_ = self.C * len(X)
else:
self.learning_rate_ = self.learning_rate
else:
self.timestamps_ = (np.array(self.timestamps_) - time()).tolist()
try:
# catch ctrl+c to stop training
for iteration in range(self.max_iter):
if self.shuffle:
X, Y = shuffle(X, Y)
if self.n_jobs == 1:
objective, positive_slacks, w = self._sequential_learning(X, Y, w)
else:
objective, positive_slacks, w = self._parallel_learning(X, Y, w)
# some statistics
objective = objective * self.C + np.sum(w ** 2) / 2.
if positive_slacks == 0:
if self.verbose:
print("No additional constraints")
if self.break_on_no_constraints:
break
if self.verbose > 0:
print(self)
print("iteration %d" % iteration)
print("positive slacks: %d,"
"objective: %f" %
(positive_slacks, objective))
self.timestamps_.append(time() - self.timestamps_[0])
self.objective_curve_.append(self._objective(X, Y))
if self.verbose > 2:
print(self.w)
self._compute_training_loss(X, Y, iteration)
if self.logger is not None:
self.logger(self, iteration)
except KeyboardInterrupt:
pass
if self.verbose:
print("Computing final objective")
self.timestamps_.append(time() - self.timestamps_[0])
self.objective_curve_.append(self._objective(X, Y))
if self.logger is not None:
self.logger(self, 'final')
if self.verbose:
if self.objective_curve_:
print("final objective: %f" % self.objective_curve_[-1])
if self.verbose and self.n_jobs == 1:
print("calls to inference: %d" % self.model.inference_calls)
return self
def _parallel_learning(self, X, Y, w):
n_samples = len(X)
objective, positive_slacks = 0, 0
verbose = max(0, self.verbose - 3)
if self.batch_size is not None:
raise ValueError("If n_jobs != 1, batch_size needs to"
"be None")
# generate batches of size n_jobs
# to speed up inference
if self.n_jobs == -1:
n_jobs = cpu_count()
else:
n_jobs = self.n_jobs
n_batches = int(np.ceil(float(len(X)) / n_jobs))
slices = gen_even_slices(n_samples, n_batches)
for batch in slices:
X_b = X[batch]
Y_b = Y[batch]
candidate_constraints = Parallel(
n_jobs=self.n_jobs,
verbose=verbose)(delayed(find_constraint)(
self.model, x, y, w)
for x, y in zip(X_b, Y_b))
djoint_feature = np.zeros(self.model.size_joint_feature)
for x, y, constraint in zip(X_b, Y_b,
candidate_constraints):
y_hat, delta_joint_feature, slack, loss = constraint
if slack > 0:
objective += slack
djoint_feature += delta_joint_feature
positive_slacks += 1
w = self._solve_subgradient(djoint_feature, n_samples, w)
return objective, positive_slacks, w
def _sequential_learning(self, X, Y, w):
n_samples = len(X)
objective, positive_slacks = 0, 0
if self.batch_size in [None, 1]:
# online learning
for x, y in zip(X, Y):
y_hat, delta_joint_feature, slack, loss = \
find_constraint(self.model, x, y, w)
objective += slack
if slack > 0:
positive_slacks += 1
self._solve_subgradient(delta_joint_feature, n_samples, w)
else:
# mini batch learning
if self.batch_size == -1:
slices = [slice(0, len(X))]
else:
n_batches = int(np.ceil(float(len(X)) / self.batch_size))
slices = gen_even_slices(n_samples, n_batches)
for batch in slices:
X_b = X[batch]
Y_b = Y[batch]
Y_hat = self.model.batch_loss_augmented_inference(
X_b, Y_b, w, relaxed=True)
delta_joint_feature = (self.model.batch_joint_feature(X_b, Y_b)
- self.model.batch_joint_feature(X_b, Y_hat))
loss = np.sum(self.model.batch_loss(Y_b, Y_hat))
violation = np.maximum(0, loss - np.dot(w, delta_joint_feature))
objective += violation
positive_slacks += self.batch_size
self._solve_subgradient(delta_joint_feature / len(X_b), n_samples, w)
return objective, positive_slacks, w
| bsd-2-clause |
jaredweiss/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mlab.py | 69 | 104273 | """
Numerical python functions written for compatability with matlab(TM)
commands with the same names.
Matlab(TM) compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (power spectral density over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in matlab(TM), but are useful anyway:
:meth:`cohere_pairs`
Coherence over all pairs. This is not a matlab function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:meth:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly::
See :ref:`misc-examples-index`
:meth:`rec2txt`
pretty print a record array
:meth:`rec2csv`
store record array in CSV file
:meth:`csv2rec`
import record array from CSV file with type inspection
:meth:`rec_append_fields`
adds field(s)/array(s) to record array
:meth:`rec_drop_fields`
drop fields from record array
:meth:`rec_join`
join two record arrays on sequence of fields
:meth:`rec_groupby`
summarize data by groups (similar to SQL GROUP BY)
:meth:`rec_summarize`
helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:meth:`conv`
convolution (numpy.convolve)
:meth:`corrcoef`
The matrix of correlation coefficients
:meth:`hist`
Histogram (numpy.histogram)
:meth:`linspace`
Linear spaced array from min to max
:meth:`load`
load ASCII file - use numpy.loadtxt
:meth:`meshgrid`
Make a 2D grid from 2 1 arrays (numpy.meshgrid)
:meth:`polyfit`
least squares best polynomial fit of x to y (numpy.polyfit)
:meth:`polyval`
evaluate a vector for a vector of polynomial coeffs (numpy.polyval)
:meth:`save`
save ASCII file - use numpy.savetxt
:meth:`trapz`
trapeziodal integration (trapz(x,y) -> numpy.trapz(y,x))
:meth:`vander`
the Vandermonde matrix (numpy.vander)
"""
from __future__ import division
import csv, warnings, copy, os
import numpy as np
ma = np.ma
from matplotlib import verbose
import matplotlib.nxutils as nxutils
import matplotlib.cbook as cbook
# set is a new builtin function in 2.4; delete the following when
# support for 2.3 is dropped.
try:
set
except NameError:
from sets import Set as set
def linspace(*args, **kw):
warnings.warn("use numpy.linspace", DeprecationWarning)
return np.linspace(*args, **kw)
def meshgrid(x,y):
warnings.warn("use numpy.meshgrid", DeprecationWarning)
return np.meshgrid(x,y)
def mean(x, dim=None):
warnings.warn("Use numpy.mean(x) or x.mean()", DeprecationWarning)
if len(x)==0: return None
return np.mean(x, axis=dim)
def logspace(xmin,xmax,N):
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
"return sqrt(x dot x)"
return np.sqrt(np.dot(x,x))
def window_hanning(x):
"return x times the hanning window of len(x)"
return np.hanning(len(x))*x
def window_none(x):
"No window function; simply return x"
return x
#from numpy import convolve as conv
def conv(x, y, mode=2):
'convolve x with y'
warnings.warn("Use numpy.convolve(x, y, mode='full')", DeprecationWarning)
return np.convolve(x,y,mode)
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis:
ind = [slice(None)] * axis
ind.append(np.newaxis)
return x - x.mean(axis)[ind]
return x - x.mean(axis)
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
#This is a helper function that implements the commonality between the
#psd, csd, and spectrogram. It is *NOT* meant to be used outside of mlab
def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None):
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x)<NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y)<NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if scale_by_freq is None:
warnings.warn("psd, csd, and specgram have changed to scale their "
"densities by the sampling frequency for better MatLab "
"compatibility. You can pass scale_by_freq=False to disable "
"this behavior. Also, one-sided densities are scaled by a "
"factor of 2.")
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':
numFreqs = pad_to
scaling_factor = 1.
elif sides in ('default', 'onesided'):
numFreqs = pad_to//2 + 1
scaling_factor = 2.
else:
raise ValueError("sides must be one of: 'default', 'onesided', or "
"'twosided'")
# Matlab divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
scaling_factor /= Fs
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
Pxy = np.zeros((numFreqs,n), np.complex_)
# do the ffts of the slices
for i in range(n):
thisX = x[ind[i]:ind[i]+NFFT]
thisX = windowVals * detrend(thisX)
fx = np.fft.fft(thisX, n=pad_to)
if same_data:
fy = fx
else:
thisY = y[ind[i]:ind[i]+NFFT]
thisY = windowVals * detrend(thisY)
fy = np.fft.fft(thisY, n=pad_to)
Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2. Also include
# scaling factors for one-sided densities and dividing by the sampling
# frequency, if desired.
Pxy *= scaling_factor / (np.abs(windowVals)**2).sum()
t = 1./Fs * (ind + NFFT / 2.)
freqs = float(Fs) / pad_to * np.arange(numFreqs)
return Pxy, freqs, t
#Split out these keyword docs so that they can be used elsewhere
kwdocd = dict()
kwdocd['PSD'] ="""
Keyword arguments:
*NFFT*: integer
The number of data points used in each block for the FFT.
Must be even; a power 2 is most efficient. The default value is 256.
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*detrend*: callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
matlab, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the psd (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the PSD to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided PSD,
while 'twosided' forces two-sided.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MatLab compatibility.
"""
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The power spectral density by Welch's average periodogram method.
The vector *x* is divided into *NFFT* length blocks. Each block
is detrended by the function *detrend* and windowed by the function
*window*. *noverlap* gives the length of the overlap between blocks.
The absolute(fft(block))**2 of each segment are averaged to compute
*Pxx*, with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxx*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
return Pxx.real,freqs
psd.__doc__ = psd.__doc__ % kwdocd
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The cross power spectral density by Welch's average periodogram
method. The vectors *x* and *y* are divided into *NFFT* length
blocks. Each block is detrended by the function *detrend* and
windowed by the function *window*. *noverlap* gives the length
of the overlap between blocks. The product of the direct FFTs
of *x* and *y* are averaged over each segment to compute *Pxy*,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxy*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
if len(Pxy.shape) == 2 and Pxy.shape[1]>1:
Pxy = Pxy.mean(axis=1)
return Pxy, freqs
csd.__doc__ = csd.__doc__ % kwdocd
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=128, pad_to=None, sides='default', scale_by_freq=None):
"""
Compute a spectrogram of data in *x*. Data are split into *NFFT*
length segements and the PSD of each section is computed. The
windowing function *window* is applied to each segment, and the
amount of overlap of each segment is specified with *noverlap*.
If *x* is real (i.e. non-complex) only the spectrum of the positive
frequencie is returned. If *x* is complex then the complete
spectrum is returned.
%(PSD)s
Returns a tuple (*Pxx*, *freqs*, *t*):
- *Pxx*: 2-D array, columns are the periodograms of
successive segments
- *freqs*: 1-D array of frequencies corresponding to the rows
in Pxx
- *t*: 1-D array of times corresponding to midpoints of
segments.
.. seealso::
:func:`psd`:
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
"""
assert(NFFT > noverlap)
Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
Pxx = Pxx.real #Needed since helper implements generically
if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[NFFT/2:]-Fs,freqs[:NFFT/2]))
Pxx = np.concatenate((Pxx[NFFT/2:,:],Pxx[:NFFT/2,:]),0)
return Pxx, freqs, t
specgram.__doc__ = specgram.__doc__ % kwdocd
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(PSD)s
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect, since
the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`:
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x)<2*NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
cohere.__doc__ = cohere.__doc__ % kwdocd
def corrcoef(*args):
"""
corrcoef(*X*) where *X* is a matrix returns a matrix of correlation
coefficients for the columns of *X*
corrcoef(*x*, *y*) where *x* and *y* are vectors returns the matrix of
correlation coefficients for *x* and *y*.
Numpy arrays can be real or complex.
The correlation matrix is defined from the covariance matrix *C*
as
.. math::
r_{ij} = \\frac{C_{ij}}{\\sqrt{C_{ii}C_{jj}}}
"""
warnings.warn("Use numpy.corrcoef", DeprecationWarning)
kw = dict(rowvar=False)
return np.corrcoef(*args, **kw)
def polyfit(*args, **kwargs):
u"""
polyfit(*x*, *y*, *N*)
Do a best fit polynomial of order *N* of *y* to *x*. Return value
is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for
*N*=2::
p2*x0^2 + p1*x0 + p0 = y1
p2*x1^2 + p1*x1 + p0 = y1
p2*x2^2 + p1*x2 + p0 = y2
.....
p2*xk^2 + p1*xk + p0 = yk
Method: if *X* is a the Vandermonde Matrix computed from *x* (see
`vandermonds
<http://mathworld.wolfram.com/VandermondeMatrix.html>`_), then the
polynomial least squares solution is given by the '*p*' in
X*p = y
where *X* is a (len(*x*) \N{MULTIPLICATION SIGN} *N* + 1) matrix,
*p* is a *N*+1 length vector, and *y* is a (len(*x*)
\N{MULTIPLICATION SIGN} 1) vector.
This equation can be solved as
.. math::
p = (X_t X)^-1 X_t y
where :math:`X_t` is the transpose of *X* and -1 denotes the
inverse. Numerically, however, this is not a good method, so we
use :func:`numpy.linalg.lstsq`.
For more info, see `least squares fitting
<http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html>`_,
but note that the *k*'s and *n*'s in the superscripts and
subscripts on that page. The linear algebra is correct, however.
.. seealso::
:func:`polyval`
"""
warnings.warn("use numpy.poyfit", DeprecationWarning)
return np.polyfit(*args, **kwargs)
def polyval(*args, **kwargs):
"""
*y* = polyval(*p*, *x*)
*p* is a vector of polynomial coeffients and *y* is the polynomial
evaluated at *x*.
Example code to remove a polynomial (quadratic) trend from y::
p = polyfit(x, y, 2)
trend = polyval(p, x)
resid = y - trend
.. seealso::
:func:`polyfit`
"""
warnings.warn("use numpy.polyval", DeprecationWarning)
return np.polyval(*args, **kwargs)
def vander(*args, **kwargs):
"""
*X* = vander(*x*, *N* = *None*)
The Vandermonde matrix of vector *x*. The *i*-th column of *X* is the
the *i*-th power of *x*. *N* is the maximum power to compute; if *N* is
*None* it defaults to len(*x*).
"""
warnings.warn("Use numpy.vander()", DeprecationWarning)
return np.vander(*args, **kwargs)
def donothing_callback(*args):
pass
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
u"""
Cxy, Phase, freqs = cohere_pairs(X, ij, ...)
Compute the coherence for all pairs in *ij*. *X* is a
(*numSamples*, *numCols*) numpy array. *ij* is a list of tuples
(*i*, *j*). Each tuple is a pair of indexes into the columns of *X*
for which you want to compute coherence. For example, if *X* has 64
columns, and you want to compute all nonredundant pairs, define *ij*
as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i, j) )
The other function arguments, except for *preferSpeedOverMemory*
(see below), are explained in the help string of :func:`psd`.
Return value is a tuple (*Cxy*, *Phase*, *freqs*).
- *Cxy*: a dictionary of (*i*, *j*) tuples -> coherence vector for that
pair. I.e., ``Cxy[(i,j)] = cohere(X[:,i], X[:,j])``. Number of
dictionary keys is ``len(ij)``.
- *Phase*: a dictionary of phases of the cross spectral density at
each frequency for each pair. The keys are ``(i,j)``.
- *freqs*: a vector of frequencies, equal in length to either
the coherence or phase vectors for any (*i*, *j*) key.. Eg,
to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If *N* is the
number of pairs, this function is O(N) for most of the heavy
lifting, whereas calling cohere for each pair is
O(N\N{SUPERSCRIPT TWO}). However, because of the caching, it is
also more memory intensive, making 2 additional complex arrays
with approximately the same number of elements as *X*.
The parameter *preferSpeedOverMemory*, if *False*, limits the
caching by only making one, rather than two, complex cache arrays.
This is useful if memory becomes critical. Even when
*preferSpeedOverMemory* is *False*, :func:`cohere_pairs` will
still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is *True*. In my tests
with a (43000, 64) array over all non-redundant pairs,
*preferSpeedOverMemory* = *True* delivered a 33% performace boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = *False*. But both solutions were more
than 10x faster than naievly crunching all possible pairs through
cohere.
.. seealso::
:file:`test/cohere_pairs_test.py` in the src tree:
For an example script that shows that this
:func:`cohere_pairs` and :func:`cohere` give the same
results for a given pair.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
seen = {}
for i,j in ij:
seen[i]=1; seen[j] = 1
allColumns = seen.keys()
Ncols = len(allColumns)
del seen
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), typecode(X)))
ind = range(0, numRows-NFFT+1, NFFT-noverlap)
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(absolute(Slices)**2), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy)
Pxy = np.divide(Pxy, normVal)
Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Phase[(i,j)] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y*.
.. math::
\sum p_i \log_2(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n,bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1]-bins[0]
S = -1.0*np.sum(p*log(p)) + log(delta)
#S = -1.0*np.sum(p*log(p))
return S
def hist(y, bins=10, normed=0):
"""
Return the histogram of *y* with *bins* equally sized bins. If
bins is an array, use those bins. Return value is (*n*, *x*)
where *n* is the count for each bin in *x*.
If *normed* is *False*, return the counts in the first element of
the returned tuple. If *normed* is *True*, return the probability
density :math:`\\frac{n}{(len(y)\mathrm{dbin}}`.
If *y* has rank > 1, it will be raveled. If *y* is masked, only the
unmasked values will be used.
Credits: the Numeric 22 documentation
"""
warnings.warn("Use numpy.histogram()", DeprecationWarning)
return np.histogram(y, bins=bins, range=None, normed=normed)
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def levypdf(x, gamma, alpha):
"Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*"
N = len(x)
if N%2 != 0:
raise ValueError, 'x must be an event length array; try\n' + \
'x = np.linspace(minx, maxx, N), where N is even'
dx = x[1]-x[0]
f = 1/(N*dx)*np.arange(-N/2, N/2, np.float_)
ind = np.concatenate([np.arange(N/2, N, int),
np.arange(0, N/2, int)])
df = f[1]-f[0]
cfl = exp(-gamma*np.absolute(2*pi*f)**alpha)
px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_)
return np.take(px, ind)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def trapz(x, y):
"""
Trapezoidal integral of *y*(*x*).
"""
warnings.warn("Use numpy.trapz(y,x) instead of trapz(x,y)", DeprecationWarning)
return np.trapz(y, x)
#if len(x)!=len(y):
# raise ValueError, 'x and y must have the same length'
#if len(x)<2:
# raise ValueError, 'x and y must have > 1 element'
#return np.sum(0.5*np.diff(x)*(y[1:]+y[:-1]))
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x)==0:
return np.array([])
ind = (x==0).nonzero()[0]
if len(ind)==0:
return np.arange(len(x))
if len(ind)==len(x):
return np.array([])
y = np.zeros( (len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0];
dn = (dif == -1).nonzero()[0];
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
def prepca(P, frac=0):
"""
Compute the principal components of *P*. *P* is a (*numVars*,
*numObs*) array. *frac* is the minimum fraction of variance that a
component must contain to be included.
Return value is a tuple of the form (*Pcomponents*, *Trans*,
*fracVar*) where:
- *Pcomponents* : a (numVars, numObs) array
- *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* *
*P*
- *fracVar* : the fraction of the variance accounted for by each
component returned
A similar function of the same name was in the Matlab (TM)
R13 Neural Network Toolbox but is not found in later versions;
its successor seems to be called "processpcs".
"""
U,s,v = np.linalg.svd(P)
varEach = s**2/P.shape[1]
totVar = varEach.sum()
fracVar = varEach/totVar
ind = slice((fracVar>=frac).sum())
# select the components that are greater
Trans = U[:,ind].transpose()
# The transformed data
Pcomponents = np.dot(Trans,P)
return Pcomponents, Trans, fracVar[ind]
def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
x = np.array(x).ravel() # we need a copy
x.sort()
Nx = len(x)
if not cbook.iterable(p):
return x[int(p*Nx/100.0)]
p = np.asarray(p)* Nx/100.0
ind = p.astype(int)
ind = np.where(ind>=Nx, Nx-1, ind)
return x.take(ind)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:,np.newaxis])
M = M / M.std(axis=1)[:,np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M,N,frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M,N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0,M-1)
y = np.random.randint(0,N-1)
data[x,y] = np.random.rand()
return data
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w,v);
if ( c1 <= 0 ):
return dist(p, s0);
c2 = np.dot(v,v)
if ( c2 <= c1 ):
return dist(p, s1);
b = c1 / c2
pb = s0 + b * v;
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x=window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
def liaupunov(x, fprime):
"""
*x* is a very long trajectory from a map, and *fprime* returns the
derivative of *x*.
Returns :
.. math::
\lambda = \\frac{1}{n}\\sum \\ln|f^'(x_i)|
.. seealso::
Sec 10.5 Strogatz (1994) "Nonlinear Dynamics and Chaos".
`Wikipedia article on Lyapunov Exponent
<http://en.wikipedia.org/wiki/Lyapunov_exponent>`_.
.. note::
What the function here calculates may not be what you really want;
*caveat emptor*.
It also seems that this function's name is badly misspelled.
"""
return np.mean(np.log(np.absolute(fprime(x))))
class FIFOBuffer:
"""
A FIFO queue to hold incoming *x*, *y* data in a rotating buffer
using numpy arrays under the hood. It is assumed that you will
call asarrays much less frequently than you add data to the queue
-- otherwise another data structure will be faster.
This can be used to support plots where data is added from a real
time feed and the plot object wants to grab data from the buffer
and plot it to screen less freqeuently than the incoming.
If you set the *dataLim* attr to
:class:`~matplotlib.transforms.BBox` (eg
:attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as
new data come in.
TODO: add a grow method that will extend nmax
.. note::
mlab seems like the wrong place for this class.
"""
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {}
def register(self, func, N):
"""
Call *func* every time *N* events are passed; *func* signature
is ``func(fifo)``.
"""
self.callbackd.setdefault(N, []).append(func)
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xys = ((x,y),)
self.dataLim.update(xys, -1) #-1 means use the default ignore setting
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in self.callbackd.items():
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1
def last(self):
"""
Get the last *x*, *y* or *None*. *None* if no data set.
"""
if self._ind==0: return None, None
ind = (self._ind-1) % self._nmax
return self._xs[ind], self._ys[ind]
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya
def update_datalim_to_current(self):
"""
Update the *datalim* in the current data in the fifo.
"""
if self.dataLim is None:
raise ValueError('You must first set the dataLim attr')
x, y = self.asarrays()
self.dataLim.update_numerix(x, y, True)
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
def save(fname, X, fmt='%.18e',delimiter=' '):
"""
Save the data in *X* to file *fname* using *fmt* string to convert the
data to strings.
*fname* can be a filename or a file handle. If the filename ends
in '.gz', the file is automatically saved in compressed gzip
format. The :func:`load` function understands gzipped files
transparently.
Example usage::
save('test.out', X) # X is an array
save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays
save('test2.out', x) # x is 1D
save('test3.out', x, fmt='%1.4e') # use exponential notation
*delimiter* is used to separate the fields, eg. *delimiter* ','
for comma-separated values.
"""
if cbook.is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname,'wb')
else:
fh = file(fname,'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
origShape = None
if X.ndim == 1:
origShape = X.shape
X.shape = len(X), 1
for row in X:
fh.write(delimiter.join([fmt%val for val in row]) + '\n')
if origShape is not None:
X.shape = origShape
def load(fname,comments='#',delimiter=None, converters=None,skiprows=0,
usecols=None, unpack=False, dtype=np.float_):
"""
Load ASCII data from *fname* into an array and return the array.
The data must be regular, same number of values in every row
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'.
matfile data is not supported; for that, use :mod:`scipy.io.mio`
module.
Example usage::
X = load('test.dat') # data in two columns
t = X[:,0]
y = X[:,1]
Alternatively, you can do the same with "unpack"; see below::
X = load('test.dat') # a matrix of data
x = load('test.dat') # a single column of data
- *comments*: the character used to indicate the start of a comment
in the file
- *delimiter* is a string-like character used to seperate values
in the file. If *delimiter* is unspecified or *None*, any
whitespace string is a separator.
- *converters*, if not *None*, is a dictionary mapping column number to
a function that will convert that column to a float (or the optional
*dtype* if specified). Eg, if column 0 is a date string::
converters = {0:datestr2num}
- *skiprows* is the number of rows from the top to skip.
- *usecols*, if not *None*, is a sequence of integer column indexes to
extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract
just the 2nd, 5th and 6th columns
- *unpack*, if *True*, will transpose the matrix allowing you to unpack
into named arguments on the left hand side::
t,y = load('test.dat', unpack=True) # for two column data
x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True)
- *dtype*: the array will have this dtype. default: ``numpy.float_``
.. seealso::
See :file:`examples/pylab_examples/load_converter.py` in the source tree:
Exercises many of these options.
"""
if converters is None: converters = {}
fh = cbook.to_filehandle(fname)
X = []
if delimiter==' ':
# space splitting is a special case since x.split() is what
# you want, not x.split(' ')
def splitfunc(x):
return x.split()
else:
def splitfunc(x):
return x.split(delimiter)
converterseq = None
for i,line in enumerate(fh):
if i<skiprows: continue
line = line.split(comments, 1)[0].strip()
if not len(line): continue
if converterseq is None:
converterseq = [converters.get(j,float)
for j,val in enumerate(splitfunc(line))]
if usecols is not None:
vals = splitfunc(line)
row = [converterseq[j](vals[j]) for j in usecols]
else:
row = [converterseq[j](val)
for j,val in enumerate(splitfunc(line))]
thisLen = len(row)
X.append(row)
X = np.array(X, dtype)
r,c = X.shape
if r==1 or c==1:
X.shape = max(r,c),
if unpack: return X.transpose()
else: return X
def slopes(x,y):
"""
SLOPES calculate the slope y'(x) Given data vectors X and Y SLOPES
calculates Y'(X), i.e the slope of a curve Y(X). The slope is
estimated using the slope obtained from that of a parabola through
any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between x-
and y-values. For many functions, however, the abscissa are given
in different dimensions, so an aspect ratio is completely
arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
STINEMAN_INTERP Well behaved data interpolation. Given data
vectors X and Y, the slope vector YP and a new abscissa vector XI
the function stineman_interp(xi,x,y,yp) uses Stineman
interpolation to calculate a vector YI corresponding to XI.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa:
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For yp=None, the routine automatically determines the slopes using
the "slopes" routine.
X is assumed to be sorted in increasing order
For values xi[j] < x[0] or xi[j] > x[-1], the routine tries a
extrapolation. The relevance of the data obtained from this, of
course, questionable...
original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
def inside_poly(points, verts):
"""
points is a sequence of x,y points
verts is a sequence of x,y vertices of a poygon
return value is a sequence of indices into points for the points
that are inside the polygon
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(ymin, xs, ys):
"""
given a arrays *xs* and *ys*, return the vertices of a polygon
that has a scalar lower bound *ymin* and an upper bound at the *ys*.
intended for use with Axes.fill, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
return poly_between(xs, ys, xmin)
def poly_between(x, ylower, yupper):
"""
given a sequence of x, ylower and yupper, return the polygon that
fills the regions between them. ylower or yupper can be scalar or
iterable. If they are iterable, they must be equal in length to x
return value is x, y arrays for use with Axes.fill
"""
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*np.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*np.ones(Nx)
x = np.concatenate( (x, x[::-1]) )
y = np.concatenate( (yupper, ylower[::-1]) )
return x,y
### the following code was written and submitted by Fernando Perez
### from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <[email protected]>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import operator
import math
#*****************************************************************************
# Globals
#****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x)
def amap(fn,*args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(map(fn,*args))
#from numpy import zeros_like
def zeros_like(a):
"""
Return an array of zeros of the shape and typecode of *a*.
"""
warnings.warn("Use numpy.zeros_like(a)", DeprecationWarning)
return np.zeros_like(a)
#from numpy import sum as sum_flat
def sum_flat(a):
"""
Return the sum of all the elements of *a*, flattened out.
It uses ``a.flat``, and if *a* is not contiguous, a call to
``ravel(a)`` is made.
"""
warnings.warn("Use numpy.sum(a) or a.sum()", DeprecationWarning)
return np.sum(a)
#from numpy import mean as mean_flat
def mean_flat(a):
"""
Return the mean of all the elements of *a*, flattened out.
"""
warnings.warn("Use numpy.mean(a) or a.mean()", DeprecationWarning)
return np.mean(a)
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a,p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p=='Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin == None:
xfin = xini + 0.0
xini = 0.0
if delta == None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
#import numpy.diag as diagonal_matrix
def diagonal_matrix(diag):
"""
Return square diagonal matrix whose non-zero elements are given by the
input array.
"""
warnings.warn("Use numpy.diag(d)", DeprecationWarning)
return np.diag(diag)
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
warnings.warn("Use dtype kwarg instead of typecode",
DeprecationWarning)
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr (number, base = 2, padding = 0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base: \
return (padding - 1) * chars [0] + chars [int (number)]
max_exponent = int (math.log (number)/math.log (base))
max_power = long (base) ** max_exponent
lead_digit = int (number/max_power)
return chars [lead_digit] + \
base_repr (number - max_power * lead_digit, base, \
max (padding - 1, max_exponent))
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
def log2(x,ln2 = math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError,TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the Matlab (TM) function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape)==np.max(X.shape)
#from numpy import fromfunction as fromfunction_kw
def fromfunction_kw(function, dimensions, **kwargs):
"""
Drop-in replacement for :func:`numpy.fromfunction`.
Allows passing keyword arguments to the desired function.
Call it as (keywords are optional)::
fromfunction_kw(MyFunction, dimensions, keywords)
The function ``MyFunction`` is responsible for handling the
dictionary of keywords it will receive.
"""
warnings.warn("Use numpy.fromfunction()", DeprecationWarning)
return np.fromfunction(function, dimensions, **kwargs)
### end fperez numutils code
def rem(x,y):
"""
Deprecated - see :func:`numpy.remainder`
"""
raise NotImplementedError('Deprecated - see numpy.remainder')
def norm(x,y=2):
"""
Deprecated - see :func:`numpy.linalg.norm`
"""
raise NotImplementedError('Deprecated - see numpy.linalg.norm')
def orth(A):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def rank(x):
"""
Deprecated - see :func:`numpy.rank`
"""
raise NotImplementedError('Deprecated - see numpy.rank')
def sqrtm(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - see scipy.linalg.sqrtm')
def mfuncC(f, x):
"""
Deprecated
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def approx_real(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
#helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isinf(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def rec_view(rec):
"""
Return a view of an ndarray as a recarray
.. seealso::
http://projects.scipy.org/pipermail/numpy-discussion/2008-August/036429.html
"""
return rec.view(np.recarray)
#return rec.view(dtype=(np.record, rec.dtype), type=np.recarray)
def rec_append_field(rec, name, arr, dtype=None):
"""
Return a new record array with field name populated with data from
array *arr*. This function is Deprecated. Please use
:func:`rec_append_fields`.
"""
warnings.warn("use rec_append_fields", DeprecationWarning)
return rec_append_fields(rec, name, arr, dtype)
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError, "number of arrays do not match number of names"
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = map(np.asarray, arrs)
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError, "dtypes must be None, a single dtype or a list"
newdtype = np.dtype(rec.dtype.descr + zip(names, dtypes))
newrec = np.empty(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return rec_view(newrec)
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
Nr = len(rec)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.empty(Nr, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return rec_view(newrec)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. eg ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = rowd.keys()
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = zip(*stats)
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s'%name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s'%name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])
r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r1.dtype[name]
assert dt2==dt1
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.empty(common_len + left_len + right_len, dtype=newdtype)
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = newrec.dtype.fields.keys()
for k, v in defaults.items():
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return rec_view(newrec)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=True):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converted*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g. '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing
If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
parsedate = dateutil.parser.parse
fh = cbook.to_filehandle(fname)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def next(self):
return self.fix(self.fh.next())
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter==' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1,1,1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1,1,1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return' : 'return_',
'file' : 'file_',
'print' : 'print_',
}
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
#print 'csv2rec', row
if len(row) and row[0].startswith(comments):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d'%i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt>0:
names.append(item + '_%d'%cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
skipheader = reader.next()
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row): continue
if row[0].startswith(comments): continue
rows.append([func(name, val) for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try: from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj:
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
#class FormatString(FormatObj):
# def tostr(self, x):
# return '"%r"'%self.toval(x)
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None: return 'None'
return self.fmt%self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df'%precision)
self.precision = precision
self.scale = scale
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d'%int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_ : FormatBool(),
np.int16 : FormatInt(),
np.int32 : FormatInt(),
np.int64 : FormatInt(),
np.float32 : FormatFloat(),
np.float64 : FormatFloat(),
np.object_ : FormatObj(),
np.string_ : FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(map(len,map(str,column))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(map(len,map(lambda x:fmt%x,column))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(map(len,map(str,column))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
.. seealso::
:func:`csv2rec`:
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'w', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x,y,z,xi,yi):
"""
``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =
*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced
vectors (*x*, *y*, *z*). :func:`griddata` interpolates this
surface at the points specified by (*xi*, *yi*) to produce
*zi*. *xi* and *yi* must describe a regular grid, can be either 1D
or 2D, but must be monotonically increasing.
A masked array is returned if any grid points are outside convex
hull defined by input data (no extrapolation is done).
Uses natural neighbor interpolation based on Delaunay
triangulation. By default, this algorithm is provided by the
:mod:`matplotlib.delaunay` package, written by Robert Kern. The
triangulation algorithm in this package is known to fail on some
nearly pathological cases. For this reason, a separate toolkit
(:mod:`mpl_tookits.natgrid`) has been created that provides a more
robust algorithm fof triangulation and interpolation. This
toolkit is based on the NCAR natgrid library, which contains code
that is not redistributable under a BSD-compatible license. When
installed, this function will use the :mod:`mpl_toolkits.natgrid`
algorithm, otherwise it will use the built-in
:mod:`matplotlib.delaunay` package.
The natgrid matplotlib toolkit can be downloaded from
http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792
"""
try:
from mpl_toolkits.natgrid import _natgrid, __version__
_use_natgrid = True
except ImportError:
import matplotlib.delaunay as delaunay
from matplotlib.delaunay import __version__
_use_natgrid = False
if not griddata._reported:
if _use_natgrid:
verbose.report('using natgrid version %s' % __version__)
else:
verbose.report('using delaunay version %s' % __version__)
griddata._reported = True
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if not len(x)==len(y)==len(z):
raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
if hasattr(z,'mask'):
x = x.compress(z.mask == False)
y = y.compress(z.mask == False)
z = z.compressed()
if _use_natgrid: # use natgrid toolkit if available.
if xi.ndim == 2:
xi = xi[0,:]
yi = yi[:,0]
# override default natgrid internal parameters.
_natgrid.seti('ext',0)
_natgrid.setr('nul',np.nan)
# cast input arrays to doubles (this makes a copy)
x = x.astype(np.float)
y = y.astype(np.float)
z = z.astype(np.float)
xo = xi.astype(np.float)
yo = yi.astype(np.float)
if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0:
raise ValueError, 'output grid defined by xi,yi must be monotone increasing'
# allocate array for output (buffer will be overwritten by nagridd)
zo = np.empty((yo.shape[0],xo.shape[0]), np.float)
_natgrid.natgridd(x,y,z,xo,yo,zo)
else: # use Robert Kern's delaunay package from scikits (default)
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if xi.ndim == 1:
xi,yi = np.meshgrid(xi,yi)
# triangulate data
tri = delaunay.Triangulation(x,y)
# interpolate data
interp = tri.nn_interpolator(z)
zo = interp(xi,yi)
# mask points on grid outside convex hull of input data.
if np.any(np.isnan(zo)):
zo = np.ma.masked_where(np.isnan(zo),zo)
return zo
griddata._reported = False
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
nx = ma
else:
nx = np
xs = nx.asarray(xs)
ys = nx.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*nx.ones(2*Nx)
y = nx.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
nx = ma
else:
nx = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*nx.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*nx.ones(Nx)
x = nx.concatenate( (x, x[::-1]) )
y = nx.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
| gpl-3.0 |
florian-f/sklearn | sklearn/datasets/tests/test_base.py | 3 | 5839 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_filenames
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
def test_deprecated_load_filenames():
with warnings.catch_warnings(record=True):
# catch deprecation warning
res = load_filenames(LOAD_FILES_ROOT)
assert_true(res)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_charset():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, charset="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 14)
assert_true(res.DESCR)
| bsd-3-clause |
marcocaccin/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/model_selection/_search.py | 5 | 58290 | """
The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the
parameters of an estimator.
"""
from __future__ import print_function
from __future__ import division
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, defaultdict, Sequence
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from scipy.stats import rankdata
from ..base import BaseEstimator, is_classifier, clone
from ..base import MetaEstimatorMixin
from ._split import check_cv
from ._validation import _fit_and_score
from ._validation import _aggregate_score_dicts
from ..exceptions import NotFittedError
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..utils import check_random_state
from ..utils.fixes import sp_version
from ..utils.fixes import MaskedArray
from ..utils.random import sample_without_replacement
from ..utils.validation import indexable, check_is_fitted
from ..utils.metaestimators import if_delegate_has_method
from ..utils.deprecation import DeprecationDict
from ..metrics.scorer import _check_multimetric_scoring
from ..metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.model_selection import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
Uses :class:`ParameterGrid` to perform a full parallelized parameter
search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space. Deterministic behavior is however
guaranteed from SciPy 0.16 onwards.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int, RandomState instance or None, optional (default=None)
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.model_selection import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d. For exhaustive searches, use "
"GridSearchCV." % (grid_size, self.n_iter))
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
if sp_version < (0, 16):
params[k] = v.rvs()
else:
params[k] = v.rvs(random_state=rnd)
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None
The scorer callable object / function must have its signature as
``scorer(estimator, X, y)``.
If ``None`` the estimator's default scorer is used.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
# NOTE we are not using the return value as the scorer by itself should be
# validated before. We use check_scoring only to reject multimetric scorer
check_scoring(estimator, scorer)
scores, n_samples_test = _fit_and_score(estimator, X, y,
scorer, train,
test, verbose, parameters,
fit_params=fit_params,
return_n_test_samples=True,
error_score=error_score)
return scores, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
if (isinstance(v, six.string_types) or
not isinstance(v, (np.ndarray, Sequence))):
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence(but not a string) or"
" np.ndarray.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
# XXX Remove in 0.20
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise', return_train_score=True):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
self.return_train_score = return_train_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
self._check_is_fitted('score')
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
score = self.scorer_[self.refit] if self.multimetric_ else self.scorer_
return score(self.best_estimator_, X, y)
def _check_is_fitted(self, method_name):
if not self.refit:
raise NotFittedError('This %s instance was initialized '
'with refit=False. %s is '
'available only after refitting on the best '
'parameters. You can refit an estimator '
'manually using the ``best_parameters_`` '
'attribute'
% (type(self).__name__, method_name))
else:
check_is_fitted(self, 'best_estimator_')
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict')
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_proba')
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_log_proba')
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('decision_function')
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('transform')
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found params.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('inverse_transform')
return self.best_estimator_.inverse_transform(Xt)
@property
def classes_(self):
self._check_is_fitted("classes_")
return self.best_estimator_.classes_
def fit(self, X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
if self.fit_params is not None:
warnings.warn('"fit_params" as a constructor argument was '
'deprecated in version 0.19 and will be removed '
'in version 0.21. Pass fit parameters to the '
'"fit" method instead.', DeprecationWarning)
if fit_params:
warnings.warn('Ignoring fit_params passed as a constructor '
'argument in favor of keyword arguments to '
'the "fit" method.', RuntimeWarning)
else:
fit_params = self.fit_params
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
scorers, self.multimetric_ = _check_multimetric_scoring(
self.estimator, scoring=self.scoring)
if self.multimetric_:
if self.refit is not False and (
not isinstance(self.refit, six.string_types) or
# This will work for both dict / list (tuple)
self.refit not in scorers):
raise ValueError("For multi-metric scoring, the parameter "
"refit must be set to a scorer key "
"to refit an estimator with the best "
"parameter setting on the whole data and "
"make the best_* attributes "
"available for that metric. If this is not "
"needed, refit should be set to False "
"explicitly. %r was passed." % self.refit)
else:
refit_metric = self.refit
else:
refit_metric = 'score'
X, y, groups = indexable(X, y, groups)
n_splits = cv.get_n_splits(X, y, groups)
# Regenerate parameter iterable for each fit
candidate_params = list(self._get_param_iterator())
n_candidates = len(candidate_params)
if self.verbose > 0:
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(n_splits, n_candidates,
n_candidates * n_splits))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(delayed(_fit_and_score)(clone(base_estimator), X, y, scorers, train,
test, self.verbose, parameters,
fit_params=fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True, return_parameters=False,
error_score=self.error_score)
for parameters, (train, test) in product(candidate_params,
cv.split(X, y, groups)))
# if one choose to see train score, "out" will contain train score info
if self.return_train_score:
(train_score_dicts, test_score_dicts, test_sample_counts, fit_time,
score_time) = zip(*out)
else:
(test_score_dicts, test_sample_counts, fit_time,
score_time) = zip(*out)
# test_score_dicts and train_score dicts are lists of dictionaries and
# we make them into dict of lists
test_scores = _aggregate_score_dicts(test_score_dicts)
if self.return_train_score:
train_scores = _aggregate_score_dicts(train_score_dicts)
# TODO: replace by a dict in 0.21
results = (DeprecationDict() if self.return_train_score == 'warn'
else {})
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
# When iterated first by splits, then by parameters
# We want `array` to have `n_candidates` rows and `n_splits` cols.
array = np.array(array, dtype=np.float64).reshape(n_candidates,
n_splits)
if splits:
for split_i in range(n_splits):
# Uses closure to alter the results
results["split%d_%s"
% (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array -
array_means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method='min'), dtype=np.int32)
_store('fit_time', fit_time)
_store('score_time', score_time)
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(MaskedArray,
np.empty(n_candidates,),
mask=True,
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
# NOTE test_sample counts (weights) remain the same for all candidates
test_sample_counts = np.array(test_sample_counts[:n_splits],
dtype=np.int)
for scorer_name in scorers.keys():
# Computed the (weighted) mean and std for test scores alone
_store('test_%s' % scorer_name, test_scores[scorer_name],
splits=True, rank=True,
weights=test_sample_counts if self.iid else None)
if self.return_train_score:
prev_keys = set(results.keys())
_store('train_%s' % scorer_name, train_scores[scorer_name],
splits=True)
if self.return_train_score == 'warn':
for key in set(results.keys()) - prev_keys:
message = (
'You are accessing a training score ({!r}), '
'which will not be available by default '
'any more in 0.21. If you need training scores, '
'please set return_train_score=True').format(key)
# warn on key access
results.add_warning(key, message, FutureWarning)
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
self.best_index_ = results["rank_test_%s" % refit_metric].argmin()
self.best_params_ = candidate_params[self.best_index_]
self.best_score_ = results["mean_test_%s" % refit_metric][
self.best_index_]
if self.refit:
self.best_estimator_ = clone(base_estimator).set_params(
**self.best_params_)
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers if self.multimetric_ else scorers['score']
self.cv_results_ = results
self.n_splits_ = n_splits
return self
@property
def grid_scores_(self):
check_is_fitted(self, 'cv_results_')
if self.multimetric_:
raise AttributeError("grid_scores_ attribute is not available for"
" multi-metric evaluation.")
warnings.warn(
"The grid_scores_ attribute was deprecated in version 0.18"
" in favor of the more elaborate cv_results_ attribute."
" The grid_scores_ attribute will not be available from 0.20",
DeprecationWarning)
grid_scores = list()
for i, (params, mean, std) in enumerate(zip(
self.cv_results_['params'],
self.cv_results_['mean_test_score'],
self.cv_results_['std_test_score'])):
scores = np.array(list(self.cv_results_['split%d_test_score'
% s][i]
for s in range(self.n_splits_)),
dtype=np.float64)
grid_scores.append(_CVScoreTuple(params, mean, scores))
return grid_scores
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's default scorer (if available) is used.
fit_params : dict, optional
Parameters to pass to the fit method.
.. deprecated:: 0.19
``fit_params`` as a constructor argument was deprecated in version
0.19 and will be removed in version 0.21. Pass fit parameters to
the ``fit`` method instead.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, or string, default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a string denoting the
scorer is used to find the best parameters for refitting the estimator
at the end.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``GridSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_parameters_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer.
See ``scoring`` parameter to know more about multiple metric
evaluation.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : boolean, optional
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Current default is ``'warn'``, which behaves as ``True`` in addition
to raising a warning when a training score is looked up.
That default will be changed to ``False`` in 0.21.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
Examples
--------
>>> from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svc = svm.SVC()
>>> clf = GridSearchCV(svc, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape='ovr', degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params=None, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=..., return_train_score=...,
scoring=..., verbose=...)
>>> sorted(clf.cv_results_.keys())
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
['mean_fit_time', 'mean_score_time', 'mean_test_score',...
'mean_train_score', 'param_C', 'param_kernel', 'params',...
'rank_test_score', 'split0_test_score',...
'split0_train_score', 'split1_test_score', 'split1_train_score',...
'split2_test_score', 'split2_train_score',...
'std_fit_time', 'std_score_time', 'std_test_score', 'std_train_score'...]
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|split0_test_score|...|rank_t...|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.8 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.7 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.8 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.9 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.8, 0.7, 0.8, 0.9],
'split1_test_score' : [0.82, 0.5, 0.7, 0.78],
'mean_test_score' : [0.81, 0.60, 0.75, 0.82],
'std_test_score' : [0.02, 0.01, 0.03, 0.03],
'rank_test_score' : [2, 4, 3, 1],
'split0_train_score' : [0.8, 0.9, 0.7],
'split1_train_score' : [0.82, 0.5, 0.7],
'mean_train_score' : [0.81, 0.7, 0.7],
'std_train_score' : [0.03, 0.03, 0.04],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.007, 0.06, 0.04, 0.04],
'std_score_time' : [0.001, 0.002, 0.003, 0.005],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator
For multi-metric evaluation, this is present only if ``refit`` is
specified.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is present only if ``refit`` is
specified.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is present only if ``refit`` is
specified.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.model_selection.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise',
return_train_score="warn"):
super(GridSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def _get_param_iterator(self):
"""Return ParameterGrid instance for the given param_grid"""
return ParameterGrid(self.param_grid)
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's default scorer (if available) is used.
fit_params : dict, optional
Parameters to pass to the fit method.
.. deprecated:: 0.19
``fit_params`` as a constructor argument was deprecated in version
0.19 and will be removed in version 0.21. Pass fit parameters to
the ``fit`` method instead.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, or string default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a string denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``RandomizedSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_parameters_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer.
See ``scoring`` parameter to know more about multiple metric
evaluation.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int, RandomState instance or None, optional, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : boolean, optional
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Current default is ``'warn'``, which behaves as ``True`` in addition
to raising a warning when a training score is looked up.
That default will be changed to ``False`` in 0.21.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+--------------+-------------+-------------------+---+---------------+
| param_kernel | param_gamma | split0_test_score |...|rank_test_score|
+==============+=============+===================+===+===============+
| 'rbf' | 0.1 | 0.8 |...| 2 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.2 | 0.9 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.3 | 0.7 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'],
mask = False),
'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
'split0_test_score' : [0.8, 0.9, 0.7],
'split1_test_score' : [0.82, 0.5, 0.7],
'mean_test_score' : [0.81, 0.7, 0.7],
'std_test_score' : [0.02, 0.2, 0.],
'rank_test_score' : [3, 1, 1],
'split0_train_score' : [0.8, 0.9, 0.7],
'split1_train_score' : [0.82, 0.5, 0.7],
'mean_train_score' : [0.81, 0.7, 0.7],
'std_train_score' : [0.03, 0.03, 0.04],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.007, 0.06, 0.04, 0.04],
'std_score_time' : [0.001, 0.002, 0.003, 0.005],
'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
For multi-metric evaluation, this attribute is present only if
``refit`` is specified.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise', return_train_score="warn"):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
def _get_param_iterator(self):
"""Return ParameterSampler instance for the given distributions"""
return ParameterSampler(
self.param_distributions, self.n_iter,
random_state=self.random_state)
| mit |
ajdawson/cartopy | lib/cartopy/tests/mpl/test_ticker.py | 2 | 8607 | # (C) British Crown Copyright 2014, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from mock import Mock
from nose.tools import assert_equal, assert_raises_regexp
from matplotlib.axes import Axes
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter
def test_LatitudeFormatter_bad_axes():
formatter = LatitudeFormatter()
formatter.axis = Mock(axes=Mock(Axes, projection=ccrs.PlateCarree()))
message = 'This formatter can only be used with cartopy axes.'
with assert_raises_regexp(TypeError, message):
formatter(0)
def test_LatitudeFormatter_bad_projection():
formatter = LatitudeFormatter()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=ccrs.Orthographic()))
message = 'This formatter cannot be used with non-rectangular projections.'
with assert_raises_regexp(TypeError, message):
formatter(0)
def test_LongitudeFormatter_bad_axes():
formatter = LongitudeFormatter()
formatter.axis = Mock(axes=Mock(Axes, projection=ccrs.PlateCarree()))
message = 'This formatter can only be used with cartopy axes.'
with assert_raises_regexp(TypeError, message):
formatter(0)
def test_LongitudeFormatter_bad_projection():
formatter = LongitudeFormatter()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=ccrs.Orthographic()))
message = 'This formatter cannot be used with non-rectangular projections.'
with assert_raises_regexp(TypeError, message):
formatter(0)
def test_LatitudeFormatter():
formatter = LatitudeFormatter()
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90\u00B0S', u'60\u00B0S', u'30\u00B0S', u'0\u00B0',
u'30\u00B0N', u'60\u00B0N', u'90\u00B0N']
assert_equal(result, expected)
def test_LatitudeFormatter_degree_symbol():
formatter = LatitudeFormatter(degree_symbol='')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90S', u'60S', u'30S', u'0',
u'30N', u'60N', u'90N']
assert_equal(result, expected)
def test_LatitudeFormatter_number_format():
formatter = LatitudeFormatter(number_format='.2f')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90.00\u00B0S', u'60.00\u00B0S', u'30.00\u00B0S',
u'0.00\u00B0', u'30.00\u00B0N', u'60.00\u00B0N',
u'90.00\u00B0N']
assert_equal(result, expected)
def test_LatitudeFormatter_mercator():
formatter = LatitudeFormatter()
p = ccrs.Mercator()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-15496570.739707904, -8362698.548496634,
-3482189.085407435, 0.0, 3482189.085407435,
8362698.548496634, 15496570.739707898]
result = [formatter(tick) for tick in test_ticks]
expected = [u'80\u00B0S', u'60\u00B0S', u'30\u00B0S', u'0\u00B0',
u'30\u00B0N', u'60\u00B0N', u'80\u00B0N']
assert_equal(result, expected)
def test_LatitudeFormatter_small_numbers():
formatter = LatitudeFormatter(number_format='.7f')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [40.1275150, 40.1275152, 40.1275154]
result = [formatter(tick) for tick in test_ticks]
expected = [u'40.1275150\u00B0N', u'40.1275152\u00B0N',
u'40.1275154\u00B0N']
assert_equal(result, expected)
def test_LongitudeFormatter_central_longitude_0():
formatter = LongitudeFormatter(dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180\u00B0W', u'120\u00B0W', u'60\u00B0W', u'0\u00B0',
u'60\u00B0E', u'120\u00B0E', u'180\u00B0E']
assert_equal(result, expected)
def test_LongitudeFormatter_central_longitude_180():
formatter = LongitudeFormatter(zero_direction_label=True)
p = ccrs.PlateCarree(central_longitude=180)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'0\u00B0E', u'60\u00B0E', u'120\u00B0E', u'180\u00B0',
u'120\u00B0W', u'60\u00B0W', u'0\u00B0W']
assert_equal(result, expected)
def test_LongitudeFormatter_central_longitude_120():
formatter = LongitudeFormatter()
p = ccrs.PlateCarree(central_longitude=120)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'60\u00B0W', u'0\u00B0', u'60\u00B0E', u'120\u00B0E',
u'180\u00B0', u'120\u00B0W', u'60\u00B0W']
assert_equal(result, expected)
def test_LongitudeFormatter_degree_symbol():
formatter = LongitudeFormatter(degree_symbol='',
dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180W', u'120W', u'60W', u'0', u'60E', u'120E', u'180E']
assert_equal(result, expected)
def test_LongitudeFormatter_number_format():
formatter = LongitudeFormatter(number_format='.2f',
dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180.00\u00B0W', u'120.00\u00B0W', u'60.00\u00B0W',
u'0.00\u00B0', u'60.00\u00B0E', u'120.00\u00B0E',
u'180.00\u00B0E']
assert_equal(result, expected)
def test_LongitudeFormatter_mercator():
formatter = LongitudeFormatter(dateline_direction_label=True)
p = ccrs.Mercator()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-20037508.342783064, -13358338.895188706,
-6679169.447594353, 0.0, 6679169.447594353,
13358338.895188706, 20037508.342783064]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180\u00B0W', u'120\u00B0W', u'60\u00B0W', u'0\u00B0',
u'60\u00B0E', u'120\u00B0E', u'180\u00B0E']
assert_equal(result, expected)
def test_LongitudeFormatter_small_numbers_0():
formatter = LongitudeFormatter(number_format='.7f')
p = ccrs.PlateCarree(central_longitude=0)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-17.1142343, -17.1142340, -17.1142337]
result = [formatter(tick) for tick in test_ticks]
expected = [u'17.1142343\u00B0W', u'17.1142340\u00B0W',
u'17.1142337\u00B0W']
assert_equal(result, expected)
def test_LongitudeFormatter_small_numbers_180():
formatter = LongitudeFormatter(zero_direction_label=True,
number_format='.7f')
p = ccrs.PlateCarree(central_longitude=180)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-17.1142343, -17.1142340, -17.1142337]
result = [formatter(tick) for tick in test_ticks]
expected = [u'162.8857657\u00B0E', u'162.8857660\u00B0E',
u'162.8857663\u00B0E']
assert_equal(result, expected)
| lgpl-3.0 |
JT5D/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 7 | 2021 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print "best bandwidth: {0}".format(grid.best_estimator_.bandwidth)
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
arbuz001/sms-tools | workspace/A6/A6Part1.py | 1 | 5633 | import os
import sys
import numpy as np
import math
from scipy.signal import get_window
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../software/models/'))
import utilFunctions as UF
import harmonicModel as HM
import sineModel as SM
import stft
import dftModel as DFT
eps = np.finfo(float).eps
"""
A6Part1 - Estimate fundamental frequency in polyphonic audio signal
Set the analysis parameters used within the function estimateF0() to obtain a good estimate of the
fundamental frequency (f0) corresponding to one melody within a complex audio signal. The signal
is a cello recording cello-double-2.wav, in which two strings are played simultaneously. One string
plays a constant drone while the other string plays a simple melody. You have to choose the analysis
parameter values such that only the f0 frequency of the simple melody is tracked.
The input argument to the function is the wav file name including the path (inputFile). The function
returns a numpy array of the f0 frequency values for each audio frame. For this question we take
hopSize (H) = 256 samples.
estimateF0() calls f0Detection() function of the harmonicModel.py, which uses the two way mismatch
algorithm for f0 estimation.
estimateF0() also plots the f0 contour on top of the spectrogram of the audio signal for you to
visually analyse the performance of your chosen values for the analysis parameters. In this question
we will only focus on the time segment between 0.5 and 4 seconds. So, your analysis parameter values
should produce a good f0 contour in this time region.
In addition to plotting the f0 contour on the spectrogram, this function also synthesizes the f0
contour. You can also evaluate the performance of your chosen analysis parameter values by listening
to this synthesized wav file named 'synthF0Contour.wav'
Since there can be numerous combinations of the optimal analysis parameter values, the evaluation is
done solely on the basis of the output f0 sequence. Note that only the segment of the f0 contour
between time 0.5 to 4 seconds is used to evaluate the performance of f0 estimation.
Your assignment will be tested only on inputFile = '../../sounds/cello-double-2.wav'. So choose the
analysis parameters using which the function estimates the f0 frequency contour corresponding to the
string playing simple melody and not the drone. There is no separate test case for this question.
You can keep working with the wav file mentioned above and when you think the performance is
satisfactory you can submit the assignment. The plots can help you achieve a good performance.
Be cautious while choosing the window size. Window size should be large enough to resolve the spectral
peaks and small enough to preserve the note transitions. Very large window sizes may smear the f0
contour at note transitions.
Depending on the parameters you choose and the capabilities of the hardware you use, the function
might take a while to run (even half a minute in some cases). For this part of the assignment please
refrain from posting your analysis parameters on the discussion forum.
"""
def estimateF0(inputFile = '../../sounds/cello-double-2.wav'):
"""
Function to estimate fundamental frequency (f0) in an audio signal. This function also plots the
f0 contour on the spectrogram and synthesize the f0 contour.
Input:
inputFile (string): wav file including the path
Output:
f0 (numpy array): array of the estimated fundamental frequency (f0) values
"""
### Change these analysis parameter values marked as XX
window = 'blackman'
M = 6096
N = 4096*4
f0et = 5.0
t = -60
minf0 = 40
maxf0 = 215
### Do not modify the code below
H = 256 #fix hop size
fs, x = UF.wavread(inputFile) #reading inputFile
w = get_window(window, M) #obtaining analysis window
### Method 1
f0 = HM.f0Detection(x, fs, w, N, H, t, minf0, maxf0, f0et) #estimating F0
startFrame = np.floor(0.5*fs/H)
endFrame = np.ceil(4.0*fs/H)
f0[:startFrame] = 0
f0[endFrame:] = 0
y = UF.sinewaveSynth(f0, 0.8, H, fs)
UF.wavwrite(y, fs, 'synthF0Contour.wav')
## Code for plotting the f0 contour on top of the spectrogram
# frequency range to plot
maxplotfreq = 500.0
fontSize = 16
plot = 1
fig = plt.figure()
ax = fig.add_subplot(111)
mX, pX = stft.stftAnal(x, fs, w, N, H) #using same params as used for analysis
mX = np.transpose(mX[:,:int(N*(maxplotfreq/fs))+1])
timeStamps = np.arange(mX.shape[1])*H/float(fs)
binFreqs = np.arange(mX.shape[0])*fs/float(N)
plt.pcolormesh(timeStamps, binFreqs, mX)
plt.plot(timeStamps, f0, color = 'k', linewidth=1.5)
plt.plot([0.5, 0.5], [0, maxplotfreq], color = 'b', linewidth=1.5)
plt.plot([4.0, 4.0], [0, maxplotfreq], color = 'b', linewidth=1.5)
plt.autoscale(tight=True)
plt.ylabel('Frequency (Hz)', fontsize = fontSize)
plt.xlabel('Time (s)', fontsize = fontSize)
plt.legend(('f0',))
xLim = ax.get_xlim()
yLim = ax.get_ylim()
ax.set_aspect((xLim[1]-xLim[0])/(2.0*(yLim[1]-yLim[0])))
if plot == 1: #save the plot too!
plt.autoscale(tight=True)
plt.show()
else:
fig.tight_layout()
fig.savefig('f0_over_Spectrogram.png', dpi=150, bbox_inches='tight')
return f0
| agpl-3.0 |
sarvex/tensorflow | tensorflow/tools/compatibility/tf_upgrade_v2_test.py | 6 | 102159 | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.0 upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import tempfile
from absl.testing import parameterized
import six
import tensorflow.compat.v1 as tf
# OSS TF V2 import placeholder.
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade_v2
def get_symbol_for_name(root, name):
name_parts = six.ensure_str(name).split(".")
symbol = root
# Iterate starting with second item since 1st item is "tf.".
for part in name_parts[1:]:
symbol = getattr(symbol, part)
return symbol
def get_args(symbol):
if hasattr(inspect, "signature"):
signature = inspect.signature(symbol)
# Ignore *args and **kwargs for now.
return [param.name for param in signature.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD]
return tf_inspect.getargspec(symbol)[0]
def get_func_and_args_from_str(call_str):
"""Parse call string to get function and argument names.
Args:
call_str: Call string must be in the form:
`tf.foo(arg1=val1, arg2=val2, ...)`.
Returns:
(function_name, list of arg names) tuple.
"""
open_paren_index = six.ensure_str(call_str).find("(")
close_paren_index = call_str.rfind(")")
function_name = call_str[:six.ensure_str(call_str).find("(")]
args = six.ensure_str(call_str[open_paren_index +
1:close_paren_index]).split(",")
args = [six.ensure_str(arg).split("=")[0].strip() for arg in args]
args = [arg for arg in args if arg] # filter out empty strings
return function_name, args
class TestUpgrade(test_util.TensorFlowTestCase, parameterized.TestCase):
"""Test various APIs that have been changed in 2.0.
We also test whether a converted file is executable. test_file_v1_10.py
aims to exhaustively test that API changes are convertible and actually
work when run with current TensorFlow.
"""
@classmethod
def setUpClass(cls):
super(TestUpgrade, cls).setUpClass()
cls.v2_symbols = {}
cls.v1_symbols = {}
if hasattr(tf.compat, "v2"):
def symbol_collector(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v2 = tf_export.get_v2_names(attr)
for name in api_names_v2:
cls.v2_symbols["tf." + six.ensure_str(name)] = attr
visitor = public_api.PublicAPIVisitor(symbol_collector)
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v2, visitor)
if hasattr(tf.compat, "v1"):
def symbol_collector_v1(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v1 = tf_export.get_v1_names(attr)
for name in api_names_v1:
cls.v1_symbols["tf." + six.ensure_str(name)] = attr
visitor = public_api.PublicAPIVisitor(symbol_collector_v1)
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def _upgrade(self,
old_file_text,
import_rename=False,
upgrade_compat_v1_import=False):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(
tf_upgrade_v2.TFAPIChangeSpec(
import_rename, upgrade_compat_v1_import=upgrade_compat_v1_import))
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def _upgrade_multiple(self, old_file_texts):
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
results = []
for old_file_text in old_file_texts:
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
results.append([count, report, errors, out_file.getvalue()])
return results
def testParseError(self):
_, report, unused_errors, unused_new_text = self._upgrade(
"import tensorflow as tf\na + \n")
self.assertNotEqual(six.ensure_str(report).find("Failed to parse"), -1)
def testReport(self):
text = "tf.angle(a)\n"
_, report, unused_errors, unused_new_text = self._upgrade(text)
# This is not a complete test, but it is a sanity test that a report
# is generating information.
self.assertTrue(
six.ensure_str(report).find("Renamed function `tf.angle` to "
"`tf.math.angle`"))
def testRename(self):
text = "tf.conj(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.conj(a)\n")
text = "tf.rsqrt(tf.log_sigmoid(3.8))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.rsqrt(tf.math.log_sigmoid(3.8))\n")
def testAllAPI(self):
if not hasattr(tf.compat, "v2"):
return
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v2 namespace.
# Please regenerate the renames file or edit any manual renames if this
# test fails.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = tf_export.get_v1_names(attr)
for name in api_names:
_, _, _, text = self._upgrade("tf." + six.ensure_str(name))
if (text and
not text.startswith("tf.compat.v1") and
not text.startswith("tf.compat.v2") and
text not in self.v2_symbols and
# Ignore any symbol that contains __internal__
"__internal__" not in text and
# Builds currently install old version of estimator that doesn't
# have some 2.0 symbols.
not text.startswith("tf.estimator")):
self.assertFalse(
True, "Symbol %s generated from %s not in v2 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testAllAPIV1(self):
collect = True
v1_symbols = set([])
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v1 namespace.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = tf_export.get_v1_names(attr)
for name in api_names:
if collect:
v1_symbols.add("tf." + six.ensure_str(name))
else:
_, _, _, text = self._upgrade("tf." + six.ensure_str(name))
if (text and
not text.startswith("tf.compat.v1") and
not text.startswith("tf.compat.v2") and
not text.startswith("tf.estimator") and
text not in v1_symbols):
self.assertFalse(
True, "Symbol %s generated from %s not in v1 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
collect = False
traverse.traverse(tf.compat.v1, visitor)
def testV1KeywordArgNames(self):
all_keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
# Visitor that verifies V1 argument names.
def arg_test_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
names_v1 = tf_export.get_v1_names(attr)
for name in names_v1:
name = "tf.%s" % name
if name not in all_keyword_renames:
continue
arg_names_v1 = tf_inspect.getargspec(attr)[0]
keyword_renames = all_keyword_renames[name]
self.assertEqual(type(keyword_renames), dict)
# Assert that v1 function has valid v1 argument names.
for from_name, _ in keyword_renames.items():
self.assertIn(
from_name, arg_names_v1,
"%s not found in %s arguments: %s" %
(from_name, name, str(arg_names_v1)))
visitor = public_api.PublicAPIVisitor(arg_test_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testV2KeywordArgNames(self):
# This test converts a call of the form:
# tf.foo(arg1=0, arg2=1, ...)
# to 2.0. Then, checks that converted function has valid argument names.
if not hasattr(tf.compat, "v2"):
return
v2_arg_exceptions = {
"verify_shape_is_now_always_true",
# These arguments should not be used, they just specify
# that a function takes named arguments.
"keyword_required",
"_sentinel",
}
v1_name_exceptions = {
"tf.print", # requires print_function import
}
function_warnings = (
tf_upgrade_v2.TFAPIChangeSpec().function_warnings)
function_transformers = (
tf_upgrade_v2.TFAPIChangeSpec().function_transformers)
keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
# Visitor that converts to V2 and checks V2 argument names.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
if not tf_inspect.isfunction(attr):
continue
names_v1 = tf_export.get_v1_names(attr)
arg_names_v1 = get_args(attr)
for name in names_v1:
tf_name = "tf.%s" % name
if tf_name in function_warnings or tf_name in function_transformers:
continue # These require manual change
if tf_name in v1_name_exceptions:
continue
# Assert that arg names after converting to v2 are present in
# v2 function.
# 1. First, create an input of the form:
# tf.foo(arg1=val1, arg2=val2, ...)
args = ",".join(
["%s=%d" % (from_name, from_index)
for from_index, from_name in enumerate(arg_names_v1)])
text_input = "%s(%s)" % (tf_name, args)
# 2. Convert the input to V2.
_, _, _, text = self._upgrade(text_input)
new_function_name, new_args = get_func_and_args_from_str(text)
if "__internal__" in new_function_name:
# Skip the tf.__internal__ and tf.keras.__internal__ API.
continue
if new_function_name == "tf.compat.v1.%s" % name:
if tf_name in keyword_renames:
# If we rename arguments, new function must be available in 2.0.
# We should not be using compat.v1 in this case.
self.fail(
"Function '%s' is not in 2.0 when converting\n%s\nto\n%s" %
(new_function_name, text_input, text))
continue
if new_function_name.startswith("tf.compat.v2"):
self.assertIn(new_function_name.replace("tf.compat.v2.", "tf."),
self.v2_symbols)
continue
# 3. Verify V2 function and arguments.
args_v2 = get_args(self.v2_symbols[new_function_name])
args_v2.extend(v2_arg_exceptions)
for new_arg in new_args:
self.assertIn(
new_arg, args_v2,
"Invalid argument '%s' in 2.0 when converting\n%s\nto\n%s.\n"
"Supported arguments: %s" % (
new_arg, text_input, text, str(args_v2)))
# 4. Verify that the argument exists in v1 as well.
if new_function_name in set(["tf.nn.ctc_loss",
"tf.saved_model.save"]):
continue
args_v1 = get_args(self.v1_symbols[new_function_name])
args_v1.extend(v2_arg_exceptions)
for new_arg in new_args:
self.assertIn(
new_arg, args_v1,
"Invalid argument '%s' in 1.0 when converting\n%s\nto\n%s.\n"
"Supported arguments: %s" % (
new_arg, text_input, text, str(args_v1)))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testPositionsMatchArgGiven(self):
full_dict = tf_upgrade_v2.TFAPIChangeSpec().function_arg_warnings
method_names = list(full_dict.keys())
for method_name in method_names:
args = list(full_dict[method_name].keys())
if "contrib" in method_name:
# Skip descending and fetching contrib methods during test. These are
# not available in the repo anymore.
continue
elif six.ensure_str(method_name).startswith("*."):
# special case for optimizer methods
method = six.ensure_str(method_name).replace("*", "tf.train.Optimizer")
else:
method = method_name
method = get_symbol_for_name(tf, method)
arg_spec = tf_inspect.getfullargspec(method)
for (arg, pos) in args:
# to deal with the self argument on methods on objects
if six.ensure_str(method_name).startswith("*."):
pos += 1
self.assertEqual(arg_spec[0][pos], arg)
def testReorderFileNeedsUpdate(self):
reordered_function_names = (
tf_upgrade_v2.TFAPIChangeSpec().reordered_function_names)
function_reorders = (
tf_upgrade_v2.TFAPIChangeSpec().function_reorders)
manual_function_reorders = (
tf_upgrade_v2.TFAPIChangeSpec().manual_function_reorders)
added_names_message = """Some function names in
self.reordered_function_names are not in reorders_v2.py.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
removed_names_message = """%s in self.reorders_v2 does not match
any name in self.reordered_function_names.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
self.assertTrue(
reordered_function_names.issubset(function_reorders),
added_names_message)
# function_reorders should contain reordered_function_names
# and their TensorFlow V1 aliases.
for name in function_reorders:
if name in manual_function_reorders:
continue
# get other names for this function
attr = get_symbol_for_name(tf.compat.v1, name)
_, attr = tf_decorator.unwrap(attr)
v1_names = tf_export.get_v1_names(attr)
self.assertTrue(v1_names)
v1_names = ["tf.%s" % n for n in v1_names]
# check if any other name is in
self.assertTrue(
any(n in reordered_function_names for n in v1_names),
removed_names_message % name)
def testRenameConstant(self):
text = "tf.MONOLITHIC_BUILD\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.sysconfig.MONOLITHIC_BUILD\n")
text = "some_call(tf.MONOLITHIC_BUILD)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "some_call(tf.sysconfig.MONOLITHIC_BUILD)\n")
def testRenameArgs(self):
text = ("tf.nn.pool(input_a, window_shape_a, pooling_type_a, padding_a, "
"dilation_rate_a, strides_a, name_a, data_format_a)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
("tf.nn.pool(input=input_a, window_shape=window_shape_a,"
" pooling_type=pooling_type_a, padding=padding_a, "
"dilations=dilation_rate_a, strides=strides_a, "
"name=name_a, data_format=data_format_a)\n"))
def testReorder(self):
text = "tf.boolean_mask(a, b, c, d)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
"tf.boolean_mask(tensor=a, mask=b, name=c, axis=d)\n")
def testLearningRateDecay(self):
for decay in ["tf.train.exponential_decay",
"tf.train.polynomial_decay", "tf.train.natural_exp_decay",
"tf.train.inverse_time_decay", "tf.train.cosine_decay",
"tf.train.cosine_decay_restarts",
"tf.train.linear_cosine_decay",
"tf.train.noisy_linear_cosine_decay",
"tf.train.piecewise_constant_decay",
]:
text = "%s(a, b)\n" % decay
_, report, unused_errors, _ = self._upgrade(text)
self.assertIn("switch to the schedules in "
"`tf.keras.optimizers.schedules`", report)
def verify_compat_v1_rename_correctness(self, values, ns_prefix=""):
if ns_prefix:
ns_prefix += "."
for v in values:
text = "tf." + ns_prefix + v + "(a, b)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1." + ns_prefix + v + "(a, b)", new_text)
def testInitializers(self):
initializers = [
"zeros",
"ones",
"constant",
"random_uniform",
"random_normal",
"truncated_normal",
"variance_scaling",
"orthogonal",
"glorot_uniform",
"glorot_normal",
"identity",
"lecun_normal",
"lecun_uniform",
"he_normal",
"he_uniform",
]
self.verify_compat_v1_rename_correctness(
initializers, ns_prefix="initializers")
initializers = [
"zeros_initializer",
"ones_initializer",
"constant_initializer",
"random_uniform_initializer",
"random_normal_initializer",
"truncated_normal_initializer",
"variance_scaling_initializer",
"orthogonal_initializer",
"glorot_uniform_initializer",
"glorot_normal_initializer",
]
self.verify_compat_v1_rename_correctness(initializers)
initializers = [
"zeros",
"ones",
"Ones",
"Zeros",
"constant",
"Constant",
"VarianceScaling",
"Orthogonal",
"orthogonal",
"Identity",
"identity",
"glorot_uniform",
"glorot_normal",
"lecun_normal",
"lecun_uniform",
"he_normal",
"he_uniform",
"TruncatedNormal",
"truncated_normal",
"RandomUniform",
"uniform",
"random_uniform",
"RandomNormal",
"normal",
"random_normal",
]
self.verify_compat_v1_rename_correctness(
initializers, ns_prefix="keras.initializers")
def testContribXavierInitializer(self):
for contrib_alias in ["tf.contrib.", "contrib_"]:
text = contrib_alias + "layers.xavier_initializer()\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=\"uniform\")\n",
)
text = "slim.xavier_initializer(True or False)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if True or False else "
"\"truncated_normal\"))\n",
)
text = "slim.xavier_initializer(uniform=(True or False))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if True or False else "
"\"truncated_normal\"))\n",
)
text = contrib_alias + "layers.xavier_initializer_conv2d(False, 12)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12)\n",
)
text = (contrib_alias + "layers.xavier_initializer_conv2d("
"False, 12, tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12, "
"dtype=tf.float32)\n",
)
text = (contrib_alias + "layers.xavier_initializer("
"False, 12, dtypes=tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, "
"mode=\"fan_avg\", "
"distribution=(\"uniform\" if False else \"truncated_normal\"), "
"seed=12, "
"dtypes=tf.float32)\n",
)
def testVarianceScalingInitializer(self):
text = ("tf.contrib.layers.variance_scaling_initializer("
"mode=(\"FAN\" + \"_AVG\"))\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=2.0, "
"mode=(\"FAN\" + \"_AVG\").lower())\n",
)
text = ("slim.variance_scaling_initializer("
"uniform=(True or False), mode=(\"FAN\" + \"_AVG\"))\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=2.0, "
"distribution=(\"uniform\" if True or False else \"truncated_normal\"),"
" mode=(\"FAN\" + \"_AVG\").lower())\n",
)
text = "tf.contrib.layers.variance_scaling_initializer(factor=1.0)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0)\n",
)
text = ("tf.contrib.layers.variance_scaling_initializer("
"12.0, \"FAN_AVG\", True, dtypes=tf.float32)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.compat.v1.keras.initializers.VarianceScaling(12.0, "
"(\"FAN_AVG\").lower(), "
"(\"uniform\" if True else \"truncated_normal\"), "
"dtypes=tf.float32)\n",
)
def testMetrics(self):
metrics = [
"accuracy",
"auc",
"average_precision_at_k",
"false_negatives",
"false_negatives_at_thresholds",
"false_positives",
"false_positives_at_thresholds",
"mean",
"mean_absolute_error",
"mean_cosine_distance",
"mean_iou",
"mean_per_class_accuracy",
"mean_relative_error",
"mean_squared_error",
"mean_tensor",
"percentage_below",
"precision",
"precision_at_k",
"precision_at_thresholds",
"precision_at_top_k",
"recall",
"recall_at_k",
"recall_at_thresholds",
"recall_at_top_k",
"root_mean_squared_error",
"sensitivity_at_specificity",
"sparse_average_precision_at_k",
"sparse_precision_at_k",
"specificity_at_sensitivity",
"true_negatives",
"true_negatives_at_thresholds",
"true_positives",
"true_positives_at_thresholds",
]
for m in metrics:
text = "tf.metrics." + m + "(a, b)"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1.metrics." + m + "(a, b)", new_text)
self.assertIn(
"tf.metrics have been replaced with object oriented versions", report)
def testLosses(self):
losses = [
"absolute_difference",
"add_loss",
"compute_weighted_loss",
"cosine_distance",
"get_losses",
"get_regularization_loss",
"get_regularization_losses",
"get_total_loss",
"hinge_loss",
"huber_loss",
"log_loss",
"mean_pairwise_squared_error",
"mean_squared_error",
"sigmoid_cross_entropy",
"softmax_cross_entropy",
"sparse_softmax_cross_entropy",
]
for l in losses:
text = "tf.losses." + l + "(a, b)"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.compat.v1.losses." + l + "(a, b)", new_text)
self.assertIn(
"tf.losses have been replaced with object oriented versions", report)
def testEstimatorLossReductionChange(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier",
"BaselineClassifier", "BaselineRegressor"
]
for c in classes:
ns = "tf.estimator." + c
text = ns + "()"
expected_text = ns + "(loss_reduction=tf.keras.losses.Reduction.SUM)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ns + "(loss_reduction=TEST)"
expected_text = ns + "(loss_reduction=TEST)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "tf.estimator.BaselineClassifier(m, c, w, v, o, c, lr)"
expected_text = (
"tf.compat.v1.estimator.BaselineClassifier("
"model_dir=m, n_classes=c, weight_column=w, label_vocabulary=v, "
"optimizer=o, config=c, loss_reduction=lr)")
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.estimator.BaselineClassifier(model_dir=model_dir)"
expected_text = ("tf.estimator.BaselineClassifier(" +
"model_dir=model_dir, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testBaseEstimatorPartitioner(self):
classes = ["LinearEstimator", "DNNLinearCombinedEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorPartitioner(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST)"
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBaseEstimatorOptimizer(self):
classes = ["BaselineEstimator", "LinearEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(optimizer=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedEstimatorOptimizer(self):
classes = ["DNNLinearCombinedEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(dnn_optimizer=TEST, linear_optimizer=Test)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorOptimizer(self):
classes = [
"BaselineClassifier", "BaselineRegressor", "LinearClassifier",
"LinearRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(optimizer=TEST)"
text = ns + suffix
suffix = ("(optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedOptimizer(self):
classes = [
"DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor",
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(dnn_optimizer=TEST, linear_optimizer=Test)"
text = ns + suffix
suffix = ("(dnn_optimizer=TEST, linear_optimizer=Test, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBaseEstimatorPartitionerAndOptimizer(self):
classes = ["LinearEstimator", "DNNEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST, optimizer=TEST)"
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedEstimatorPartitionerAndOptimizer(self):
classes = ["DNNLinearCombinedEstimator"]
for c in classes:
ns = "tf.estimator." + c
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST)")
text = ns + suffix
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testCannedEstimatorPartitionerAndOptimizer(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNRegressor", "DNNClassifier"
]
for c in classes:
ns = "tf.estimator." + c
suffix = "(input_layer_partitioner=TEST, optimizer=TEST)"
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDNNLinearCombinedPartitionerAndOptimizer(self):
classes = [
"DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor",
]
for c in classes:
ns = "tf.estimator." + c
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST)")
text = ns + suffix
suffix = ("(input_layer_partitioner=TEST, dnn_optimizer=TEST, "
"linear_optimizer=TEST, "
"loss_reduction=tf.keras.losses.Reduction.SUM)")
expected_text = "tf.compat.v1.estimator." + c + suffix
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testExtractGlimpse(self):
text = ("tf.image.extract_glimpse(x, size, off, False, "
"False, False, name=\"foo\")\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.image.extract_glimpse(x, size, off, False, "
"False, 'uniform' if (False) else 'gaussian', name=\"foo\")\n",
)
text = ("tf.image.extract_glimpse(x, size, off, centered=False, "
"normalized=False, uniform_noise=True if uniform_noise else "
"False, name=\"foo\")\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.image.extract_glimpse(x, size, off, centered=False, "
"normalized=False, noise='uniform' if (True if uniform_noise else "
"False) else 'gaussian', name=\"foo\")\n",
)
text = ("tf.image.extract_glimpse(x,\n"
" size,\n"
" off,\n"
" centered=True,\n"
" normalized=True, # Stuff before\n"
" uniform_noise=False,\n"
" name=\"foo\")# Stuff after\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text, "tf.image.extract_glimpse(x,\n"
" size,\n"
" off,\n"
" centered=True,\n"
" normalized=True, # Stuff before\n"
" noise='uniform' if (False) else 'gaussian',\n"
" name=\"foo\")# Stuff after\n")
text = "tf.image.extract_glimpse(x)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text)
self.assertEqual(errors, [])
def testDropout(self):
text = "tf.nn.dropout(x, keep_prob, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, rate=1 - (keep_prob), name=\"foo\")\n",
)
text = "tf.nn.dropout(x, keep_prob=.4, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, rate=1 - (.4), name=\"foo\")\n",
)
text = (
"tf.nn.dropout(x, # Stuff before\n"
" keep_prob=.4, # Stuff after\n"
" name=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, # Stuff before\n"
" rate=1 - (.4), # Stuff after\n"
" name=\"foo\")\n",
)
text = "tf.nn.dropout(x)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text)
self.assertIn("tf.nn.dropout called without arguments", errors[0])
def testDropoutExpr(self):
text = "tf.nn.dropout(x, 1 - func(3 + 4.), name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, rate=1 - (1 - func(3 + 4.)), name=\"foo\")\n",
)
def testContribL1(self):
text = "tf.contrib.layers.l1_regularizer(scale)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1(scale)\n",
)
self.assertNotIn("Dropping scope", unused_report)
text = "tf.contrib.layers.l1_regularizer(scale, scope)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1(scale)\n",
)
self.assertIn("Dropping scope", unused_report)
text = (
"slim.l1_regularizer( # Stuff before\n"
" scale=.4,"
" scope=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l1( # Stuff before\n"
" l=.4)\n",
)
self.assertIn("Dropping scope", unused_report)
def testContribL2(self):
text = "tf.contrib.layers.l2_regularizer(scale)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (scale))\n",
)
self.assertNotIn("Dropping scope", unused_report)
text = "tf.contrib.layers.l2_regularizer(scale, scope)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (scale))\n",
)
self.assertIn("Dropping scope", unused_report)
text = (
"slim.l2_regularizer( # Stuff before\n"
" scale=.4,"
" scope=\"foo\")\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2( # Stuff before\n"
" l=0.5 * (.4))\n",
)
self.assertIn("Dropping scope", unused_report)
def testContribL2Expr(self):
text = "tf.contrib.layers.l2_regularizer(1 - func(3 + 4.), scope=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.keras.regularizers.l2(0.5 * (1 - func(3 + 4.)))\n",
)
def testMathCountNonZeroChanges(self):
text = (
"tf.math.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testCountNonZeroChanges(self):
text = (
"tf.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomMultinomialToRandomCategorical(self):
text = (
"tf.random.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
text = (
"tf.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomPoissonConversion(self):
text1 = "tf.random_poisson(lam, shape, dtype)"
text2 = "tf.random.poisson(lam, shape, dtype)"
expected_text = "tf.random.poisson(lam=lam, shape=shape, dtype=dtype)"
_, unused_report, unused_errors, new_text1 = self._upgrade(text1)
self.assertEqual(new_text1, expected_text)
_, unused_report, unused_errors, new_text2 = self._upgrade(text2)
self.assertEqual(new_text2, expected_text)
def testConvolutionOpUpdate(self):
text = (
"tf.nn.convolution(input, filter, padding, strides, dilation_rate, "
"name, data_format)"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.nn.convolution(input=input, filters=filter, padding=padding, "
"strides=strides, dilations=dilation_rate, name=name, "
"data_format=data_format)"
)
self.assertEqual(new_text, expected_text)
def test_substr(self):
text = "tf.substr(input, pos, len, name, unit)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual("tf.strings.substr(input=input, pos=pos, len=len, "
"name=name, unit=unit)\n", new_text)
self.assertEqual(errors, [])
def testColocateGradientsWithOps(self):
text = "tf.gradients(yx=a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "tf.gradients(yx=a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.gradients(yx=a)\n", new_text)
self.assertIn("tf.gradients no longer takes", report)
text = "tf.gradients(y, x, grad_ys, name, colocate, gate)\n"
expected = ("tf.gradients(ys=y, xs=x, grad_ys=grad_ys, name=name, "
"gate_gradients=gate)\n")
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def testColocateGradientsWithOpsMinimize(self):
text = "optimizer.minimize(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.minimize(a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("optimizer.minimize(a)\n", new_text)
self.assertIn("Optimizer.minimize no longer takes", report)
def testColocateGradientsWithOpsComputeGradients(self):
text = "optimizer.compute_gradients(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.compute_gradients(a, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("optimizer.compute_gradients(a)\n", new_text)
self.assertIn("Optimizer.compute_gradients no longer takes", report)
def testColocateGradientsWithHessians(self):
text = "tf.hessians(ys=a, xs=b, colocate_gradients_with_ops=False)\n"
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual("tf.hessians(ys=a, xs=b)\n", new_text)
self.assertIn("tf.hessians no longer takes", report)
def testExportSavedModelRename(self):
text = "self.est.export_savedmodel(path)"
_, report, unused_errors, unused_new_text = self._upgrade(text)
self.assertIn(
"rename the method export_savedmodel() to export_saved_model()",
report)
def testArgmin(self):
text = "tf.argmin(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmin(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmin(input, 0)"
expected_text = "tf.argmin(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_min(input, 0)"
expected_text = "tf.argmin(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testArgmax(self):
text = "tf.argmax(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmax(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmax(input, 0)"
expected_text = "tf.argmax(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_max(input, 0)"
expected_text = "tf.argmax(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testAutograph(self):
text = "tf.autograph.to_graph(f, True, arg_values=None, arg_types=None)"
expected_text = "tf.autograph.to_graph(f, True)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.autograph.to_code"
"(f, False, arg_values=None, arg_types=None, indentation=' ')")
expected_text = "tf.autograph.to_code(f, False)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEstimatorInputs(self):
text = "tf.estimator.inputs.numpy_input_fn(0)"
expected_text = "tf.compat.v1.estimator.inputs.numpy_input_fn(0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.estimator.inputs.pandas_input_fn(0)"
expected_text = "tf.compat.v1.estimator.inputs.pandas_input_fn(0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBatchToSpace(self):
text = "tf.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.batch_to_space(input, crops, block_size, name)"
expected_text = (
"tf.batch_to_space(input=input, crops=crops, block_shape=block_size, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.manip.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testExtractImagePatches(self):
text = (
"tf.extract_image_patches(images, ksizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
expected_text = (
"tf.image.extract_patches(images, sizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testKerasSavedModel(self):
text = (
"tf.contrib.saved_model.save_keras_model(model, './saved_models')\n"
"tf.contrib.saved_model.load_keras_model(saved_model_path)\n")
expected_text = (
"tf.compat.v1.keras.experimental.export_saved_model(model, "
"'./saved_models')\ntf.compat.v1.keras.experimental."
"load_from_saved_model(saved_model_path)\n"
)
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
expected_info = "Please use model.save"
self.assertIn(expected_info, report)
def testStatelessMultinomial(self):
text = (
"tf.random.stateless_multinomial(logits, num_samples, seed, "
"output_dtype=dtype, name=name)")
expected_text = (
"tf.random.stateless_categorical(logits, num_samples, seed, "
"dtype=dtype, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSoftMaxCrossEntropyWithLogitsV2(self):
text = (
"tf.nn.softmax_cross_entropy_with_logits_v2("
"labels=labels, logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=labels, logits=logits, axis=2)")
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertFalse(errors)
def testSoftMaxCrossEntropyWithLogits(self):
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=labels, logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, axis=2)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo(bar))")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testSoftMaxCrossEntropyWithLogitsDoesntNest(self):
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, dim=2)")
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(labels), logits=logits, axis=2)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo(bar)))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo())")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo()))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=foo().zz())")
expected_text = ("tf.nn.softmax_cross_entropy_with_logits("
"labels=tf.stop_gradient(foo().zz()))")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testSparseMatmul(self):
text = ("tf.sparse_matmul(a, b, c, d, e, f, g)\n")
expected_text = ("tf.linalg.matmul(a=a, b=b, transpose_a=c, transpose_b=d, "
"a_is_sparse=e, b_is_sparse=f, name=g)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testWeightedMoments(self):
text = "tf.nn.weighted_moments(x, axes, freq, name, kd)"
expected_text = (
"tf.nn.weighted_moments(x=x, axes=axes, frequency_weights=freq, "
"name=name, keepdims=kd)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseAdd(self):
text = "tf.sparse.add(a, b, t)"
expected_text = "tf.sparse.add(a=a, b=b, threshold=t)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseConcat(self):
text = "tf.sparse.concat(ax, inp, name, exp, concat)"
expected_text = (
"tf.sparse.concat(axis=ax, sp_inputs=inp, name=name, "
"expand_nonconcat_dims=exp, axis=concat)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSeparableConv2D(self):
text = "tf.nn.separable_conv2d(inp, d, pt, strides, pad, rate, name, fmt)"
expected_text = (
"tf.nn.separable_conv2d(input=inp, depthwise_filter=d, "
"pointwise_filter=pt, strides=strides, padding=pad, "
"dilations=rate, name=name, data_format=fmt)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2D(self):
text = (
"tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu, "
"data_format)")
expected_text = (
"tf.nn.conv2d(input=input, filters=filter, strides=strides, "
"padding=padding, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.nn.conv2d(input, filter=filter, strides=strides, padding=padding, "
"use_cudnn_on_gpu=use_cudnn_on_gpu)")
expected_text = ("tf.nn.conv2d(input=input, filters=filter, "
"strides=strides, padding=padding)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2DBackpropFilter(self):
text = (
"tf.nn.conv2d_backprop_filter(input, filter_sizes, out_backprop, "
"strides, padding, use_cudnn_on_gpu, data_format)")
expected_text = (
"tf.compat.v1.nn.conv2d_backprop_filter(input, filter_sizes, "
"out_backprop, strides, padding, use_cudnn_on_gpu, data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testConv2DBackpropInput(self):
text = (
"tf.nn.conv2d_backprop_input(input_sizes, filter, out_backprop, "
"strides, padding, use_cudnn_on_gpu, data_format)")
expected_text = (
"tf.nn.conv2d_transpose(output_shape=input_sizes, filters=filter, "
"input=out_backprop, strides=strides, padding=padding, "
"data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpacetoBatch(self):
text = "tf.space_to_batch_nd(input, shape, paddings, name)"
expected_text = "tf.space_to_batch(input, shape, paddings, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.nn.space_to_batch(input, paddings, block_size, name)"
expected_text = (
"tf.space_to_batch(input=input, paddings=paddings, "
"block_shape=block_size, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testInTopK(self):
text = "tf.math.in_top_k(a, b, c, n)"
expected_text = (
"tf.math.in_top_k(predictions=a, targets=b, k=c, name=n)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDepthToSpace(self):
text = "tf.nn.depth_to_space(input, block_size, name, data_format)"
expected_text = (
"tf.nn.depth_to_space(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookup(self):
text = ("tf.nn.embedding_lookup(params, ids, partition_strategy, name, "
"validate_indices, max_norm)")
expected_text = ("tf.nn.embedding_lookup(params=params, ids=ids, "
"partition_strategy=partition_strategy, name=name, "
"max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookupSparse(self):
text = ("tf.nn.embedding_lookup_sparse(params, sp_ids, sp_weights, "
"partition_strategy, name, combiner, max_norm)")
expected_text = ("tf.nn.embedding_lookup_sparse(params=params, "
"sp_ids=sp_ids, sp_weights=sp_weights, "
"partition_strategy=partition_strategy, name=name, "
"combiner=combiner, max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnInTopK(self):
text = "tf.nn.in_top_k(predictions, targets, k, name)"
expected_text = ("tf.nn.in_top_k(predictions=predictions, "
"targets=targets, k=k, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpaceToDepth(self):
text = "tf.nn.space_to_depth(input, block_size, name, data_format)"
expected_text = ("tf.nn.space_to_depth(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testPrint(self):
# tf.print() cannot be parsed unless we import print_function
text = """from __future__ import print_function
tf.print()
tf.print('abc')
"""
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text) # Text should stay the same
def testSparseSplit(self):
text = (
"tf.sparse_split(sp_input=sp_input, num_split=num_split, axis=axis, "
"name=name)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, axis=axis, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.sparse_split(sp_input=sp_input, num_split=num_split, "
"name=name, split_dim=axis)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, axis=axis)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, split_dim=axis)")
expected_text = (
"tf.sparse.split(sp_input=sp_input, num_split=num_split, "
"name=name, axis=axis)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testIterators(self):
for (text, expected) in [
("(expr + yielding(data)).make_one_shot_iterator()",
"tf.compat.v1.data.make_one_shot_iterator((expr + yielding(data)))"),
("dataset.make_one_shot_iterator()",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("dataset.make_one_shot_iterator(shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("dataset.make_one_shot_iterator(x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("dataset.make_initializable_iterator()",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("ds.make_initializable_iterator(shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("dataset.make_initializable_iterator(x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)"),
("tf.data.make_one_shot_iterator(dataset)",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("tf.data.make_one_shot_iterator(dataset, shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("tf.data.make_one_shot_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("tf.data.make_initializable_iterator(dataset)",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("tf.data.make_initializable_iterator(ds, shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("tf.data.make_initializable_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset)",
"tf.compat.v1.data.make_one_shot_iterator(dataset)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, shared_name=foo)"),
("tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_one_shot_iterator(dataset, x, y, z)"),
("tf.compat.v1.data.make_initializable_iterator(dataset)",
"tf.compat.v1.data.make_initializable_iterator(dataset)"),
("tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)",
"tf.compat.v1.data.make_initializable_iterator(ds, shared_name=foo)"),
("tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)",
"tf.compat.v1.data.make_initializable_iterator(dataset, x, y, z)")]:
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testStructure(self):
for (text, expected) in [
("tf.data.experimental.DatasetStructure", "tf.data.DatasetSpec"),
("tf.data.experimental.OptionalStructure", "tf.OptionalSpec"),
("tf.data.experimental.RaggedTensorStructure", "tf.RaggedTensorSpec"),
("tf.data.experimental.SparseTensorStructure", "tf.SparseTensorSpec"),
("tf.data.experimental.Structure", "tf.TypeSpec"),
("tf.data.experimental.TensorArrayStructure", "tf.TensorArraySpec"),
("tf.data.experimental.TensorStructure", "tf.TensorSpec"),
]:
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testMapAndBatch(self):
suffix = ".data.experimental.map_and_batch_with_legacy_function(args)"
text = "tf" + suffix
expected = "tf.compat.v1" + suffix
_, unused_report, unused_errors, actual = self._upgrade(text)
self.assertEqual(actual, expected)
def testCast(self):
for (name, dtype) in [("int32", "int32"),
("int64", "int64"),
("float", "float32"),
("double", "float64"),
("complex64", "complex64"),
("complex128", "complex128"),
("bfloat16", "bfloat16")]:
text = "tf.to_%s(x, name='test')" % name
expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testCastPositionalSecondArgument(self):
for (name, dtype) in [("int32", "int32"),
("int64", "int64"),
("float", "float32"),
("double", "float64"),
("complex64", "complex64"),
("complex128", "complex128"),
("bfloat16", "bfloat16")]:
text = "tf.to_%s(x, 'test')" % name
expected_text = "tf.cast(x, name='test', dtype=tf.%s)" % dtype
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testImageResize(self):
for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:
text = "tf.image.resize_%s(i, s)" % method
expected_text = ("tf.image.resize(i, s, "
"method=tf.image.ResizeMethod.%s)" % method.upper())
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testImageResizeExtraPositionalArgs(self):
for method in ["bilinear", "area", "bicubic", "nearest_neighbor"]:
text = "tf.image.resize_%s(i, s, a, p)" % method
expected_text = [
"tf.image.resize(i, s, ", "preserve_aspect_ratio=p, ",
"method=tf.image.ResizeMethod.%s)" % method.upper()
]
_, unused_report, unused_errors, new_text = self._upgrade(text)
for s in expected_text:
self.assertIn(s, new_text)
def testCond(self):
text = "tf.cond(a, b, c, True)"
expected_text = "tf.cond(pred=a, true_fn=b, false_fn=c)"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("tf.cond", errors[0])
self.assertIn("requires manual check", errors[0])
def testParens(self):
text = """
def _log_prob(self, x):
return tf.reduce_logsumexp(
(self.mixture_distribution.logits + self.distribution.log_prob(
x[..., tf.newaxis])),
axis=-1)"""
expected_text = """
def _log_prob(self, x):
return tf.reduce_logsumexp(
input_tensor=(self.mixture_distribution.logits + self.distribution.log_prob(
x[..., tf.newaxis])),
axis=-1)"""
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testAssertStatements(self):
for name in [
"assert_greater", "assert_equal", "assert_none_equal", "assert_less",
"assert_negative", "assert_positive", "assert_non_negative",
"assert_non_positive", "assert_near", "assert_less",
"assert_less_equal", "assert_greater", "assert_greater_equal",
"assert_scalar"
]:
text = "tf.%s(a)" % name
expected_text = "tf.compat.v1.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
text = "tf.debugging.%s(a)" % name
expected_text = "tf.compat.v1.debugging.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
def testAssertRankStatements(self):
for name in ["assert_rank", "assert_rank_at_least", "assert_rank_in"]:
text = "tf.%s(a)" % name
expected_text = "tf.compat.v1.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
text = "tf.debugging.%s(a)" % name
expected_text = "tf.compat.v1.debugging.%s(a)" % name
_, report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("%s has been" % name, report)
def test_assert_equal_graph_def(self):
text = ("tf.test.assert_equal_graph_def(a, b, checkpoint_v2=x, "
"hash_table_shared_name=y)")
expected = "tf.test.assert_equal_graph_def(actual=a, expected=b)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_is_tensor_upgrade(self):
text = "tf.contrib.framework.is_tensor(x)"
expected = "tf.is_tensor(x)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_is_tensor_direct_import_upgrade(self):
text = "contrib_framework.is_tensor(x)"
expected = "tf.is_tensor(x)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_CriticalSection_upgrade(self):
text = "tf.contrib.framework.CriticalSection(shared_name='blah')"
expected = "tf.CriticalSection(shared_name='blah')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_sample_distorted_bounding_box(self):
# pylint: disable=line-too-long
text = "tf.image.sample_distorted_bounding_box(a, b, c, d, e, f, g, h, i, j)"
expected = "tf.image.sample_distorted_bounding_box(image_size=a, bounding_boxes=b, seed=c, min_object_covered=e, aspect_ratio_range=f, area_range=g, max_attempts=h, use_image_if_no_bounding_boxes=i, name=j)"
# pylint: enable=line-too-long
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_initialize(self):
text = "tf.contrib.summary.initialize"
expected = "tf.compat.v1.summary.initialize"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_framework_argsort(self):
text = "tf.contrib.framework.argsort"
expected = "tf.argsort"
# pylint: enable=line-too-long
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_flags_bare(self):
_, _, errors, _ = self._upgrade("tf.flags")
self.assertIn("tf.flags and tf.app.flags have been removed", errors[0])
def test_flags_flags(self):
_, _, errors, _ = self._upgrade("tf.flags.FLAGS")
self.assertIn("tf.flags and tf.app.flags have been removed", errors[0])
def test_contrib_estimator_head_deprecation(self):
for contrib_alias in ["tf.contrib.", "contrib_"]:
api_symbols = ["binary_classification_head", "logistic_regression_head",
"multi_class_head", "multi_head", "multi_label_head",
"poisson_regression_head", "regression_head"]
for symbol in api_symbols:
text = contrib_alias + "estimator." + symbol
_, report, _, _ = self._upgrade(text)
self.assertIn("`tf.contrib.estimator.*_head` has been deprecated",
report)
def test_contrib_layers_layer_norm_deprecation(self):
for contrib_alias in ["tf.contrib.", "contrib_"]:
_, report, _, _ = self._upgrade(contrib_alias + "layers.layer_norm")
self.assertIn(
"`tf.contrib.layers.layer_norm` has been deprecated", report)
def test_contrib_rnn_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.rnn")
self.assertIn("tf.contrib.rnn.* has been deprecated", report)
def test_contrib_cudnn_rnn_deprecation(self):
_, report, _, _ = self._upgrade("tf.contrib.cudnn_rnn")
self.assertIn("tf.contrib.cudnn_rnn.* has been deprecated", report)
def test_max_pool_2d(self):
text = "tf.nn.max_pool(value=4)"
expected_text = "tf.nn.max_pool2d(input=4)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_estimator_early_stopping(self):
for contrib_alias in ["tf.contrib.", "contrib_"]:
api_symbols = [
"make_early_stopping_hook", "stop_if_higher_hook",
"stop_if_lower_hook",
"stop_if_no_decrease_hook", "stop_if_no_increase_hook"
]
for symbol in api_symbols:
text = contrib_alias + "estimator." + symbol
expected_text = "tf.estimator.experimental." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_rnn_cell(self):
api_symbols = ["RNNCell", "BasicLSTMCell", "BasicRNNCell", "GRUCell",
"LSTMCell", "MultiRNNCell"]
for symbol in api_symbols:
text = "tf.contrib.rnn." + symbol
expected_text = "tf.compat.v1.nn.rnn_cell." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_rnn_function(self):
api_symbols = ["static_rnn", "static_state_saving_rnn",
"static_bidirectional_rnn"]
for symbol in api_symbols:
text = "tf.contrib.rnn." + symbol
expected_text = "tf.compat.v1.nn." + symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_summary_generic(self):
text = "tf.contrib.summary.generic('foo', myval, meta, 'fam', 42)"
expected = ("tf.compat.v2.summary.write(tag='foo', data=myval, "
"metadata=meta, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
# Arg errors come in alphabetical order of arguments, not appearance order.
self.assertIn("'family' argument", errors[0])
self.assertIn("'name' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_audio(self):
text = "tf.contrib.summary.audio('foo', myval, 44100, 3, 'fam', 42)"
expected = ("tf.compat.v2.summary.audio(name='foo', data=myval, "
"sample_rate=44100, max_outputs=3, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_histogram(self):
text = "tf.contrib.summary.histogram('foo', myval, 'fam', 42)"
expected = ("tf.compat.v2.summary.histogram(name='foo', data=myval, "
"step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_image(self):
text = "tf.contrib.summary.image('foo', myval, red, 3, 'fam', 42)"
expected = ("tf.compat.v2.summary.image(name='foo', data=myval, "
"max_outputs=3, step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'bad_color' argument", errors[0])
self.assertIn("'family' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_scalar(self):
text = "tf.contrib.summary.scalar('foo', myval, 'fam', 42)"
expected = ("tf.compat.v2.summary.scalar(name='foo', data=myval, "
"step=42)")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'family' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_generic_nostep(self):
text = "tf.contrib.summary.generic('foo', myval)"
expected = ("tf.compat.v2.summary.write(tag='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'name' argument", errors[0])
self.assertIn("'step' argument", errors[1])
self.assertIn("tf.compat.v2.summary.*", errors[2])
def test_contrib_summary_audio_nostep(self):
text = "tf.contrib.summary.audio('foo', myval, 44100)"
expected = ("tf.compat.v2.summary.audio(name='foo', data=myval, "
"sample_rate=44100, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_histogram_nostep(self):
text = "tf.contrib.summary.histogram('foo', myval)"
expected = ("tf.compat.v2.summary.histogram(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_image_nostep(self):
text = "tf.contrib.summary.image('foo', myval)"
expected = ("tf.compat.v2.summary.image(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_scalar_nostep(self):
text = "tf.contrib.summary.scalar('foo', myval)"
expected = ("tf.compat.v2.summary.scalar(name='foo', data=myval, "
"step=tf.compat.v1.train.get_or_create_global_step())")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'step' argument", errors[0])
self.assertIn("tf.compat.v2.summary.*", errors[1])
def test_contrib_summary_graph(self):
text = "tf.contrib.summary.graph(my_graph)"
_, _, errors, _ = self._upgrade(text)
expected_error = "tf.compat.v2.summary.trace"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_import_event(self):
text = "tf.contrib.summary.import_event(my_event)"
_, _, errors, _ = self._upgrade(text)
expected_error = "tf.compat.v2.summary.experimental.write_raw_pb"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_flush(self):
text = "tf.contrib.summary.flush(writer=foo)"
expected = "tf.compat.v2.summary.flush(writer=foo)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_create_file_writer(self):
text = ("tf.contrib.summary.create_file_writer('my_logdir', 0, 1000, "
"'.foo', 'shared-name')")
expected = ("tf.compat.v2.summary.create_file_writer(logdir='my_logdir', "
"max_queue=0, flush_millis=1000, filename_suffix='.foo')")
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("'name' argument", errors[0])
self.assertIn("no longer re-uses existing event files", errors[1])
def test_contrib_summary_always_record_summaries(self):
text = "tf.contrib.summary.always_record_summaries()"
expected = "tf.compat.v2.summary.record_if(True)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_never_record_summaries(self):
text = "tf.contrib.summary.never_record_summaries()"
expected = "tf.compat.v2.summary.record_if(False)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_record_summaries_every_n_global_steps(self):
text = "tf.contrib.summary.record_summaries_every_n_global_steps(10)"
_, _, errors, _ = self._upgrade(text)
expected_error = "replaced by a call to tf.compat.v2.summary.record_if()"
self.assertIn(expected_error, errors[0])
def test_contrib_summary_all_summary_ops(self):
text = "tf.contrib.summary.all_summary_ops()"
expected = "tf.compat.v1.summary.all_v2_summary_ops()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_contrib_summary_full_example(self):
deindent = lambda n, s: "\n".join(line[n:] for line in s.split("\n"))
text = deindent(4, """
import tensorflow as tf
tf.enable_eager_execution()
writer = tf.contrib.summary.create_file_writer(
"/tmp/migration_test", flush_millis=1000)
with writer.as_default(), tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", 0.42)
tf.contrib.summary.histogram("weights", [1.0, 2.0], step=7)
tf.contrib.summary.flush()
""")
expected = deindent(4, """
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
writer = tf.compat.v2.summary.create_file_writer(
logdir="/tmp/migration_test", flush_millis=1000)
with writer.as_default(), tf.compat.v2.summary.record_if(True):
tf.compat.v2.summary.scalar(name="loss", data=0.42, step=tf.compat.v1.train.get_or_create_global_step())
tf.compat.v2.summary.histogram(name="weights", data=[1.0, 2.0], step=7)
tf.compat.v2.summary.flush()
""")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_summary_api_warning(self):
text = "tf.summary.scalar('foo', 42)"
_, report, _, _ = self._upgrade(text)
expected_info = "TF 1.x summary API cannot be automatically migrated"
self.assertIn(expected_info, report)
def test_avg_pool_2d(self):
text = "tf.nn.avg_pool(value=4)"
expected_text = "tf.nn.avg_pool2d(input=4)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_saved_model_load(self):
text = "tf.saved_model.load(sess, ['foo_graph'])"
expected = "tf.compat.v1.saved_model.load(sess, ['foo_graph'])"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_saved_model_load_v2(self):
text = "tf.saved_model.load_v2('/tmp/blah')"
expected = "tf.compat.v2.saved_model.load('/tmp/blah')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_app_flags(self):
text = "flags = tf.app.flags"
expected = "flags = tf.compat.v1.app.flags"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_uniform_unit_scaling_initializer(self):
text = "tf.uniform_unit_scaling_initializer(0.5)"
expected_text = ("tf.compat.v1.keras.initializers.VarianceScaling("
"scale=0.5, distribution=\"uniform\")")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.initializers.uniform_unit_scaling(0.5)"
expected_text = ("tf.compat.v1.keras.initializers.VarianceScaling("
"scale=0.5, distribution=\"uniform\")")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_name_scope(self):
text = "tf.name_scope(None, default_name, [some, values])"
expected_text = "tf.name_scope(name=default_name)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.name_scope(default_name=default_name, values=stuff)"
expected_text = "tf.name_scope(name=default_name)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.name_scope(name=n, default_name=d, values=s)"
expected_text = "tf.compat.v1.name_scope(name=n, default_name=d, values=s)"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
self.assertIn("`name` passed to `name_scope`", report)
text = "tf.name_scope(name=None, values=stuff)"
_, _, errors, _ = self._upgrade(text)
self.assertIn("name_scope call with neither name nor default_name",
errors[0])
@parameterized.parameters(
# Rename parameter: delimiter -> sep and add .to_sparse()
["tf.string_split('test', delimiter=' ')",
"tf.strings.split(input='test', sep=' ').to_sparse()"],
# Rename parameter: source -> input
["tf.strings.split(source='test1')",
"tf.strings.split(input='test1').to_sparse()"],
# Use compat.v1 for skip_empty parameter.
["tf.string_split('test', ' ', True)",
"tf.compat.v1.string_split(source='test', sep=' ', skip_empty=True)"],
["tf.string_split('test', ' ', skip_empty=False)",
"tf.strings.split(input='test', sep=' ').to_sparse()"],
# Split behavior for sep=None changed. (In particular, it now splits on
# all whitespace, not just the space character)
["tf.string_split(x)",
"tf.compat.v1.string_split(source=x)"],
# Split behavior for sep='' changed:
["tf.string_split(x, '')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, sep='')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, delimiter='')",
"tf.strings.bytes_split(input=x).to_sparse()"],
["tf.string_split(x, '', result_type='RaggedTensor')",
"tf.strings.bytes_split(input=x)"],
# If sep is a variable, we can't tell if it's empty:
["tf.string_split(x, sep)",
"tf.compat.v1.string_split(source=x, sep=sep)"],
# If sep is a non-empty string literal, then we don't need compat.v1.
["tf.string_split(x, 'non-empty-sep')",
"tf.strings.split(input=x, sep='non-empty-sep').to_sparse()"],
# Add to_sparse unless result_type is RaggedTensor:
["tf.string_split(x, ' ')",
"tf.strings.split(input=x, sep=' ').to_sparse()"],
["tf.string_split(x, ' ', result_type='SparseTensor')",
"tf.strings.split(input=x, sep=' ').to_sparse()"],
["tf.string_split(x, ' ', result_type='RaggedTensor')",
"tf.strings.split(input=x, sep=' ')"],
["tf.string_split(x, ' ', result_type=x)",
"tf.compat.v1.string_split(source=x, sep=' ', result_type=x)"],
) # pyformat: disable
# TODO(b/129398290)
def DISABLED_test_string_split(self, text, expected_text):
"""Tests for transforming from tf.string_split."""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
@parameterized.parameters(
# Add to_sparse unless result_type is RaggedTensor:
["tf.strings.split(x, sep)",
"tf.strings.split(x, sep).to_sparse()"],
["tf.strings.split(x, sep, result_type='SparseTensor')",
"tf.strings.split(x, sep).to_sparse()"],
["tf.strings.split(x, sep, result_type='RaggedTensor')",
"tf.strings.split(x, sep)"],
["tf.strings.split(x, sep, result_type=x)",
"tf.compat.v1.strings.split(x, sep, result_type=x)"],
) # pyformat: disable
def test_strings_split(self, text, expected_text):
"""Tests for transforming from tf.strings.split."""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_sdca_to_raw_ops(self):
text = "tf.train.sdca_fprint(input_tensor)"
expected_text = "tf.raw_ops.SdcaFprint(input=input_tensor)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.train.sdca_fprint(input, name=n)"
expected_text = "tf.raw_ops.SdcaFprint(input=input, name=n)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.train.sdca_shrink_l1(w, l, ll)"
expected_text = "tf.raw_ops.SdcaShrinkL1(weights=w, l1=l, l2=ll)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = (
"tf.train.sdca_optimizer(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o)")
expected_text = (
"tf.raw_ops.SdcaOptimizer(sparse_example_indices=a, "
"sparse_feature_indices=b, sparse_feature_values=c, dense_features=d, "
"example_weights=e, example_labels=f, sparse_indices=g, "
"sparse_weights=h, dense_weights=i, example_state_data=j, loss_type=k, "
"l1=l, l2=m, num_loss_partitions=n, num_inner_iterations=o)")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_contrib_to_addons_move(self):
small_mapping = {
"tf.contrib.layers.poincare_normalize":
"tfa.layers.PoincareNormalize",
"tf.contrib.layers.maxout":
"tfa.layers.Maxout",
"tf.contrib.layers.group_norm":
"tfa.layers.GroupNormalization",
"tf.contrib.layers.instance_norm":
"tfa.layers.InstanceNormalization",
}
for symbol, replacement in small_mapping.items():
text = "{}('stuff', *args, **kwargs)".format(symbol)
_, report, _, _ = self._upgrade(text)
self.assertIn(replacement, report)
def testXlaExperimental(self):
text = "tf.xla.experimental.jit_scope(0)"
expected_text = "tf.xla.experimental.jit_scope(0)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.xla.experimental.compile(0)"
expected_text = "tf.xla.experimental.compile(0)"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnErosion2d(self):
text = "tf.nn.erosion2d(v, k, s, r, p)"
expected_text = "tf.nn.erosion2d(v, k, s, r, p, data_format='NHWC')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnDilation2d(self):
text = "tf.nn.dilation2d(v, k, s, r, p)"
expected_text = "tf.nn.dilation2d(v, k, s, r, p, data_format='NHWC')"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testPywrapTensorflowWarning(self):
text = "tf.pywrap_tensorflow.foo()"
expected = "tf.pywrap_tensorflow.foo()"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("`tf.pywrap_tensorflow` will not be distributed", errors[0])
def testKerasSaveModelFormat(self):
text = "tf.keras.models.save_model(model, path)"
expected_text = "tf.keras.models.save_model(model, path, save_format='h5')"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertNotIn(
"saves to the Tensorflow SavedModel format by default", report)
_, report, _, _ = self._upgrade("model.save(path)")
self.assertIn(
"saves to the Tensorflow SavedModel format by default", report)
def test_distribute_strategy(self):
text = "tf.contrib.distribute.CrossDeviceOps()"
expected = "tf.distribute.CrossDeviceOps()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
text = "tf.contrib.distribute.MirroredStrategy"
expected = "tf.contrib.distribute.MirroredStrategy"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("migrated to tf.distribute.MirroredStrategy", errors[0])
text = "tf.distribute.MirroredStrategy"
expected = "tf.distribute.MirroredStrategy"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("tf.distribute.MirroredStrategy API has changed", report)
self.assertIn("make_dataset_iterator->experimental_distribute_dataset",
report)
text = "tf.contrib.distribute.TPUStrategy"
expected = "tf.contrib.distribute.TPUStrategy"
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("migrated to tf.distribute.TPUStrategy",
errors[0])
text = "tf.contrib.distribute.foo"
expected = "tf.contrib.distribute.foo"
_, report, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
self.assertIn("tf.contrib.distribute.* have been migrated", report)
def test_decode_raw(self):
text = "tf.io.decode_raw(bytes=[1,2,3], output_dtype=tf.int32)"
expected_text = (
"tf.io.decode_raw(input_bytes=[1,2,3], output_dtype=tf.int32)")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testRecomputeGrad(self):
text = "tf.contrib.layers.recompute_grad()"
expected = "tf.recompute_grad()"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected, new_text)
def test_load_variable(self):
text = "tf.contrib.framework.load_variable('a')"
expected_text = (
"tf.train.load_variable('a')")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "tf.contrib.framework.load_variable(checkpoint_dir='a')"
expected_text = (
"tf.train.load_variable(ckpt_dir_or_file='a')")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def test_import_rename_analysis(self):
old_symbol = "tf.conj(a)"
new_symbol = "tf.math.conj(a)"
import_header = "import tensorflow as tf\n"
text = import_header + old_symbol
expected_text = "import tensorflow.compat.v2 as tf\n" + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = "import tensorflow as tf, other_import as y\n"
text = import_header + old_symbol
new_import_header = "import tensorflow.compat.v2 as tf, other_import as y\n"
expected_text = new_import_header + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_header = ("import tensorflow.compat.v2 as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
expected_text = expected_header + new_symbol
_, _, _, new_text = self._upgrade(text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow.compat.v1 as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_header = ("import tensorflow.compat.v2 as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
expected_text = expected_header + new_symbol
_, _, _, new_text = self._upgrade(
text, import_rename=True, upgrade_compat_v1_import=True)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow.compat.v1 as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_header = ("import tensorflow as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
expected_text = expected_header + new_symbol
_, _, _, new_text = self._upgrade(
text, import_rename=False, upgrade_compat_v1_import=True)
self.assertEqual(new_text, expected_text)
import_header = "from tensorflow import foo\n"
text = import_header + old_symbol
expected_text = "from tensorflow.compat.v2 import foo\n" + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = "from tensorflow import *\n"
text = import_header + old_symbol
expected_text = "from tensorflow.compat.v2 import *\n" + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = "from tensorflow.foo import bar\n"
text = import_header + old_symbol
expected_text = "from tensorflow.compat.v2.foo import bar\n" + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(
text, import_rename=True)
self.assertEqual(new_text, expected_text)
import_header = ("from tensorflow import foo as tf\n"
"from tensorflow.compat import v1 as tf_v1\n"
"from tensorflow.compat import v2 as tf_v2\n")
text = import_header + old_symbol
expected_header = ("from tensorflow.compat.v2 import foo as tf\n"
"from tensorflow.compat import v1 as tf_v1\n"
"from tensorflow.compat import v2 as tf_v2\n")
expected_text = expected_header + new_symbol
_, _, _, new_text = self._upgrade(text, import_rename=True)
self.assertEqual(new_text, expected_text)
def test_import_analysis(self):
old_symbol = "tf.conj(a)"
new_symbol = "tf.math.conj(a)"
# We upgrade the base un-versioned tensorflow aliased as tf
import_header = "import tensorflow as tf\n"
text = import_header + old_symbol
expected_text = import_header + new_symbol
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
import_header = ("import tensorflow as tf\n"
"import tensorflow.compat.v1 as tf_v1\n"
"import tensorflow.compat.v2 as tf_v2\n")
text = import_header + old_symbol
expected_text = import_header + new_symbol
_, _, _, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
# We don't handle unaliased tensorflow imports currently,
# So the upgrade script show log errors
import_header = "import tensorflow\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, _, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("unaliased `import tensorflow`", "\n".join(errors))
# Upgrading explicitly-versioned tf code is unsafe, but we don't
# need to throw errors when we detect explicitly-versioned tf.
import_header = "import tensorflow.compat.v1 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf, v2 as tf2\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v1` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "import tensorflow.compat.v2 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v2` was directly imported as `tf`",
report)
self.assertEmpty(errors)
import_header = "from tensorflow.compat import v1 as tf1, v2 as tf\n"
text = import_header + old_symbol
expected_text = import_header + old_symbol
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
self.assertIn("`tensorflow.compat.v2` was directly imported as `tf`",
report)
self.assertEmpty(errors)
def test_api_spec_reset_between_files(self):
for old_symbol, new_symbol in [
("tf.conj(a)", "tf.math.conj(a)"),
("tf.to_int32(x)", "tf.cast(x, dtype=tf.int32)")]:
## Test that the api spec is reset in between files:
import_header = "import tensorflow.compat.v2 as tf\n"
text_a = import_header + old_symbol
expected_text_a = import_header + old_symbol
text_b = old_symbol
expected_text_b = new_symbol
results = self._upgrade_multiple([text_a, text_b])
result_a, result_b = results[0], results[1]
self.assertEqual(result_a[3], expected_text_a)
self.assertEqual(result_b[3], expected_text_b)
def test_model_to_estimator_checkpoint_warning(self):
text = "tf.keras.estimator.model_to_estimator(model)"
_, report, _, _ = self._upgrade(text)
expected_info = "will save object-based checkpoints"
self.assertIn(expected_info, report)
def test_keras_experimental_export_warning(self):
text = "tf.keras.experimental.export_saved_model"
_, report, _, _ = self._upgrade(text)
expected_info = "Please use model.save"
self.assertIn(expected_info, report)
class TestUpgradeFiles(test_util.TensorFlowTestCase):
def testInplace(self):
"""Check to make sure we don't have a file system race."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "tf.conj(a)\n"
upgraded = "tf.math.conj(a)\n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
def testInplaceNoOutputChangeOnErrorHandling(self):
"""In place file should not be modified when parsing error is handled."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "print 'a' \n"
upgraded = "print 'a' \n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(
temp_file.name, temp_file.name, no_change_to_outfile_on_error=True)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
def testInplaceEmptyOutputOnError(self):
"""In place file becomes empty when parsing error is not handled."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "print 'a' \n"
upgraded = ""
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
if __name__ == "__main__":
test_lib.main()
| apache-2.0 |
giorgiop/scikit-learn | sklearn/mixture/tests/test_gaussian_mixture.py | 9 | 39845 | # Author: Wei Xue <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clauseimport warnings
import sys
import warnings
import numpy as np
from scipy import stats, linalg
from sklearn.covariance import EmpiricalCovariance
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.mixture.gaussian_mixture import GaussianMixture
from sklearn.mixture.gaussian_mixture import (
_estimate_gaussian_covariances_full,
_estimate_gaussian_covariances_tied,
_estimate_gaussian_covariances_diag,
_estimate_gaussian_covariances_spherical)
from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky
from sklearn.mixture.gaussian_mixture import _compute_log_det_cholesky
from sklearn.exceptions import ConvergenceWarning, NotFittedError
from sklearn.utils.extmath import fast_logdet
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']
def generate_data(n_samples, n_features, weights, means, precisions,
covariance_type):
rng = np.random.RandomState(0)
X = []
if covariance_type == 'spherical':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['spherical'])):
X.append(rng.multivariate_normal(m, c * np.eye(n_features),
int(np.round(w * n_samples))))
if covariance_type == 'diag':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['diag'])):
X.append(rng.multivariate_normal(m, np.diag(c),
int(np.round(w * n_samples))))
if covariance_type == 'tied':
for _, (w, m) in enumerate(zip(weights, means)):
X.append(rng.multivariate_normal(m, precisions['tied'],
int(np.round(w * n_samples))))
if covariance_type == 'full':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['full'])):
X.append(rng.multivariate_normal(m, c,
int(np.round(w * n_samples))))
X = np.vstack(X)
return X
class RandomData(object):
def __init__(self, rng, n_samples=500, n_components=2, n_features=2,
scale=50):
self.n_samples = n_samples
self.n_components = n_components
self.n_features = n_features
self.weights = rng.rand(n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.rand(n_components, n_features) * scale
self.covariances = {
'spherical': .5 + rng.rand(n_components),
'diag': (.5 + rng.rand(n_components, n_features)) ** 2,
'tied': make_spd_matrix(n_features, random_state=rng),
'full': np.array([
make_spd_matrix(n_features, random_state=rng) * .5
for _ in range(n_components)])}
self.precisions = {
'spherical': 1. / self.covariances['spherical'],
'diag': 1. / self.covariances['diag'],
'tied': linalg.inv(self.covariances['tied']),
'full': np.array([linalg.inv(covariance)
for covariance in self.covariances['full']])}
self.X = dict(zip(COVARIANCE_TYPE, [generate_data(
n_samples, n_features, self.weights, self.means, self.covariances,
covar_type) for covar_type in COVARIANCE_TYPE]))
self.Y = np.hstack([k * np.ones(int(np.round(w * n_samples)))
for k, w in enumerate(self.weights)])
def test_gaussian_mixture_attributes():
# test bad parameters
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
n_components_bad = 0
gmm = GaussianMixture(n_components=n_components_bad)
assert_raise_message(ValueError,
"Invalid value for 'n_components': %d "
"Estimation requires at least one component"
% n_components_bad, gmm.fit, X)
# covariance_type should be in [spherical, diag, tied, full]
covariance_type_bad = 'bad_covariance_type'
gmm = GaussianMixture(covariance_type=covariance_type_bad)
assert_raise_message(ValueError,
"Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% covariance_type_bad,
gmm.fit, X)
tol_bad = -1
gmm = GaussianMixture(tol=tol_bad)
assert_raise_message(ValueError,
"Invalid value for 'tol': %.5f "
"Tolerance used by the EM must be non-negative"
% tol_bad, gmm.fit, X)
reg_covar_bad = -1
gmm = GaussianMixture(reg_covar=reg_covar_bad)
assert_raise_message(ValueError,
"Invalid value for 'reg_covar': %.5f "
"regularization on covariance must be "
"non-negative" % reg_covar_bad, gmm.fit, X)
max_iter_bad = 0
gmm = GaussianMixture(max_iter=max_iter_bad)
assert_raise_message(ValueError,
"Invalid value for 'max_iter': %d "
"Estimation requires at least one iteration"
% max_iter_bad, gmm.fit, X)
n_init_bad = 0
gmm = GaussianMixture(n_init=n_init_bad)
assert_raise_message(ValueError,
"Invalid value for 'n_init': %d "
"Estimation requires at least one run"
% n_init_bad, gmm.fit, X)
init_params_bad = 'bad_method'
gmm = GaussianMixture(init_params=init_params_bad)
assert_raise_message(ValueError,
"Unimplemented initialization method '%s'"
% init_params_bad,
gmm.fit, X)
# test good parameters
n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1
covariance_type, init_params = 'full', 'random'
gmm = GaussianMixture(n_components=n_components, tol=tol, n_init=n_init,
max_iter=max_iter, reg_covar=reg_covar,
covariance_type=covariance_type,
init_params=init_params).fit(X)
assert_equal(gmm.n_components, n_components)
assert_equal(gmm.covariance_type, covariance_type)
assert_equal(gmm.tol, tol)
assert_equal(gmm.reg_covar, reg_covar)
assert_equal(gmm.max_iter, max_iter)
assert_equal(gmm.n_init, n_init)
assert_equal(gmm.init_params, init_params)
def test_check_X():
from sklearn.mixture.base import _check_X
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 2, 2
X_bad_dim = rng.rand(n_components - 1, n_features)
assert_raise_message(ValueError,
'Expected n_samples >= n_components '
'but got n_components = %d, n_samples = %d'
% (n_components, X_bad_dim.shape[0]),
_check_X, X_bad_dim, n_components)
X_bad_dim = rng.rand(n_components, n_features + 1)
assert_raise_message(ValueError,
'Expected the input data X have %d features, '
'but got %d features'
% (n_features, X_bad_dim.shape[1]),
_check_X, X_bad_dim, n_components, n_features)
X = rng.rand(n_samples, n_features)
assert_array_equal(X, _check_X(X, n_components, n_features))
def test_check_weights():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
X = rand_data.X['full']
g = GaussianMixture(n_components=n_components)
# Check bad shape
weights_bad_shape = rng.rand(n_components, 1)
g.weights_init = weights_bad_shape
assert_raise_message(ValueError,
"The parameter 'weights' should have the shape of "
"(%d,), but got %s" %
(n_components, str(weights_bad_shape.shape)),
g.fit, X)
# Check bad range
weights_bad_range = rng.rand(n_components) + 1
g.weights_init = weights_bad_range
assert_raise_message(ValueError,
"The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f"
% (np.min(weights_bad_range),
np.max(weights_bad_range)),
g.fit, X)
# Check bad normalization
weights_bad_norm = rng.rand(n_components)
weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1)
g.weights_init = weights_bad_norm
assert_raise_message(ValueError,
"The parameter 'weights' should be normalized, "
"but got sum(weights) = %.5f"
% np.sum(weights_bad_norm),
g.fit, X)
# Check good weights matrix
weights = rand_data.weights
g = GaussianMixture(weights_init=weights, n_components=n_components)
g.fit(X)
assert_array_equal(weights, g.weights_init)
def test_check_means():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components, n_features = rand_data.n_components, rand_data.n_features
X = rand_data.X['full']
g = GaussianMixture(n_components=n_components)
# Check means bad shape
means_bad_shape = rng.rand(n_components + 1, n_features)
g.means_init = means_bad_shape
assert_raise_message(ValueError,
"The parameter 'means' should have the shape of ",
g.fit, X)
# Check good means matrix
means = rand_data.means
g.means_init = means
g.fit(X)
assert_array_equal(means, g.means_init)
def test_check_precisions():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components, n_features = rand_data.n_components, rand_data.n_features
# Define the bad precisions for each covariance_type
precisions_bad_shape = {
'full': np.ones((n_components + 1, n_features, n_features)),
'tied': np.ones((n_features + 1, n_features + 1)),
'diag': np.ones((n_components + 1, n_features)),
'spherical': np.ones((n_components + 1))}
# Define not positive-definite precisions
precisions_not_pos = np.ones((n_components, n_features, n_features))
precisions_not_pos[0] = np.eye(n_features)
precisions_not_pos[0, 0, 0] = -1.
precisions_not_positive = {
'full': precisions_not_pos,
'tied': precisions_not_pos[0],
'diag': -1. * np.ones((n_components, n_features)),
'spherical': -1. * np.ones(n_components)}
not_positive_errors = {
'full': 'symmetric, positive-definite',
'tied': 'symmetric, positive-definite',
'diag': 'positive',
'spherical': 'positive'}
for covar_type in COVARIANCE_TYPE:
X = RandomData(rng).X[covar_type]
g = GaussianMixture(n_components=n_components,
covariance_type=covar_type,
random_state=rng)
# Check precisions with bad shapes
g.precisions_init = precisions_bad_shape[covar_type]
assert_raise_message(ValueError,
"The parameter '%s precision' should have "
"the shape of" % covar_type,
g.fit, X)
# Check not positive precisions
g.precisions_init = precisions_not_positive[covar_type]
assert_raise_message(ValueError,
"'%s precision' should be %s"
% (covar_type, not_positive_errors[covar_type]),
g.fit, X)
# Check the correct init of precisions_init
g.precisions_init = rand_data.precisions[covar_type]
g.fit(X)
assert_array_equal(rand_data.precisions[covar_type], g.precisions_init)
def test_suffstat_sk_full():
# compare the precision matrix compute from the
# EmpiricalCovariance.covariance fitted on X*sqrt(resp)
# with _sufficient_sk_full, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
# special case 1, assuming data is "centered"
X = rng.rand(n_samples, n_features)
resp = rng.rand(n_samples, 1)
X_resp = np.sqrt(resp) * X
nk = np.array([n_samples])
xk = np.zeros((1, n_features))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=True)
ecov.fit(X_resp)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
# special case 2, assuming resp are all ones
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean(axis=0).reshape((1, -1))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=False)
ecov.fit(X)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, 'full')
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
def test_suffstat_sk_tied():
# use equation Nk * Sk / N = S_tied
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
covars_pred_full = np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full,
0) / n_samples
covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
ecov.covariance_ = covars_pred_full
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, 'tied')
precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T)
precs_est = linalg.inv(covars_pred_tied)
assert_array_almost_equal(precs_est, precs_pred)
def test_suffstat_sk_diag():
# test against 'full' case
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
for (cov_full, cov_diag) in zip(covars_pred_full, covars_pred_diag):
ecov.covariance_ = np.diag(np.diag(cov_full))
cov_diag = np.diag(cov_diag)
assert_almost_equal(ecov.error_norm(cov_diag, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(cov_diag, norm='spectral'), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_diag, 'diag')
assert_almost_equal(covars_pred_diag, 1. / precs_chol_pred ** 2)
def test_gaussian_suffstat_sk_spherical():
# computing spherical covariance equals to the variance of one-dimension
# data after flattening, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
X = rng.rand(n_samples, n_features)
X = X - X.mean()
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean()
covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X,
nk, xk, 0)
covars_pred_spherical2 = (np.dot(X.flatten().T, X.flatten()) /
(n_features * n_samples))
assert_almost_equal(covars_pred_spherical, covars_pred_spherical2)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical,
'spherical')
assert_almost_equal(covars_pred_spherical, 1. / precs_chol_pred ** 2)
def test_compute_log_det_cholesky():
n_features = 2
rand_data = RandomData(np.random.RandomState(0))
for covar_type in COVARIANCE_TYPE:
covariance = rand_data.covariances[covar_type]
if covar_type == 'full':
predected_det = np.array([linalg.det(cov) for cov in covariance])
elif covar_type == 'tied':
predected_det = linalg.det(covariance)
elif covar_type == 'diag':
predected_det = np.array([np.prod(cov) for cov in covariance])
elif covar_type == 'spherical':
predected_det = covariance ** n_features
# We compute the cholesky decomposition of the covariance matrix
expected_det = _compute_log_det_cholesky(_compute_precision_cholesky(
covariance, covar_type), covar_type, n_features=n_features)
assert_array_almost_equal(expected_det, - .5 * np.log(predected_det))
def _naive_lmvnpdf_diag(X, means, covars):
resp = np.empty((len(X), len(means)))
stds = np.sqrt(covars)
for i, (mean, std) in enumerate(zip(means, stds)):
resp[:, i] = stats.norm.logpdf(X, mean, std).sum(axis=1)
return resp
def test_gaussian_mixture_log_probabilities():
from sklearn.mixture.gaussian_mixture import _estimate_log_gaussian_prob
# test aginst with _naive_lmvnpdf_diag
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_samples = 500
n_features = rand_data.n_features
n_components = rand_data.n_components
means = rand_data.means
covars_diag = rng.rand(n_components, n_features)
X = rng.rand(n_samples, n_features)
log_prob_naive = _naive_lmvnpdf_diag(X, means, covars_diag)
# full covariances
precs_full = np.array([np.diag(1. / np.sqrt(x)) for x in covars_diag])
log_prob = _estimate_log_gaussian_prob(X, means, precs_full, 'full')
assert_array_almost_equal(log_prob, log_prob_naive)
# diag covariances
precs_chol_diag = 1. / np.sqrt(covars_diag)
log_prob = _estimate_log_gaussian_prob(X, means, precs_chol_diag, 'diag')
assert_array_almost_equal(log_prob, log_prob_naive)
# tied
covars_tied = np.array([x for x in covars_diag]).mean(axis=0)
precs_tied = np.diag(np.sqrt(1. / covars_tied))
log_prob_naive = _naive_lmvnpdf_diag(X, means,
[covars_tied] * n_components)
log_prob = _estimate_log_gaussian_prob(X, means, precs_tied, 'tied')
assert_array_almost_equal(log_prob, log_prob_naive)
# spherical
covars_spherical = covars_diag.mean(axis=1)
precs_spherical = 1. / np.sqrt(covars_diag.mean(axis=1))
log_prob_naive = _naive_lmvnpdf_diag(X, means,
[[k] * n_features for k in
covars_spherical])
log_prob = _estimate_log_gaussian_prob(X, means,
precs_spherical, 'spherical')
assert_array_almost_equal(log_prob, log_prob_naive)
# skip tests on weighted_log_probabilities, log_weights
def test_gaussian_mixture_estimate_log_prob_resp():
# test whether responsibilities are normalized
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=5)
n_samples = rand_data.n_samples
n_features = rand_data.n_features
n_components = rand_data.n_components
X = rng.rand(n_samples, n_features)
for covar_type in COVARIANCE_TYPE:
weights = rand_data.weights
means = rand_data.means
precisions = rand_data.precisions[covar_type]
g = GaussianMixture(n_components=n_components, random_state=rng,
weights_init=weights, means_init=means,
precisions_init=precisions,
covariance_type=covar_type)
g.fit(X)
resp = g.predict_proba(X)
assert_array_almost_equal(resp.sum(axis=1), np.ones(n_samples))
assert_array_equal(g.weights_init, weights)
assert_array_equal(g.means_init, means)
assert_array_equal(g.precisions_init, precisions)
def test_gaussian_mixture_predict_predict_proba():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
Y = rand_data.Y
g = GaussianMixture(n_components=rand_data.n_components,
random_state=rng, weights_init=rand_data.weights,
means_init=rand_data.means,
precisions_init=rand_data.precisions[covar_type],
covariance_type=covar_type)
# Check a warning message arrive if we don't do fit
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this method.", g.predict, X)
g.fit(X)
Y_pred = g.predict(X)
Y_pred_proba = g.predict_proba(X).argmax(axis=1)
assert_array_equal(Y_pred, Y_pred_proba)
assert_greater(adjusted_rand_score(Y, Y_pred), .95)
def test_gaussian_mixture_fit():
# recover the ground truth
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_features = rand_data.n_features
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=20,
reg_covar=0, random_state=rng,
covariance_type=covar_type)
g.fit(X)
# needs more data to pass the test with rtol=1e-7
assert_allclose(np.sort(g.weights_), np.sort(rand_data.weights),
rtol=0.1, atol=1e-2)
arg_idx1 = g.means_[:, 0].argsort()
arg_idx2 = rand_data.means[:, 0].argsort()
assert_allclose(g.means_[arg_idx1], rand_data.means[arg_idx2],
rtol=0.1, atol=1e-2)
if covar_type == 'full':
prec_pred = g.precisions_
prec_test = rand_data.precisions['full']
elif covar_type == 'tied':
prec_pred = np.array([g.precisions_] * n_components)
prec_test = np.array([rand_data.precisions['tied']] * n_components)
elif covar_type == 'spherical':
prec_pred = np.array([np.eye(n_features) * c
for c in g.precisions_])
prec_test = np.array([np.eye(n_features) * c for c in
rand_data.precisions['spherical']])
elif covar_type == 'diag':
prec_pred = np.array([np.diag(d) for d in g.precisions_])
prec_test = np.array([np.diag(d) for d in
rand_data.precisions['diag']])
arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort()
arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort()
for k, h in zip(arg_idx1, arg_idx2):
ecov = EmpiricalCovariance()
ecov.covariance_ = prec_test[h]
# the accuracy depends on the number of data and randomness, rng
assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.1)
def test_gaussian_mixture_fit_best_params():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
n_init = 10
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type)
ll = []
for _ in range(n_init):
g.fit(X)
ll.append(g.score(X))
ll = np.array(ll)
g_best = GaussianMixture(n_components=n_components,
n_init=n_init, reg_covar=0, random_state=rng,
covariance_type=covar_type)
g_best.fit(X)
assert_almost_equal(ll.min(), g_best.score(X))
def test_gaussian_mixture_fit_convergence_warning():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=1)
n_components = rand_data.n_components
max_iter = 1
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1,
max_iter=max_iter, reg_covar=0, random_state=rng,
covariance_type=covar_type)
assert_warns_message(ConvergenceWarning,
'Initialization %d did not converged. '
'Try different init parameters, '
'or increase max_iter, tol '
'or check for degenerate data.'
% max_iter, g.fit, X)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 5, 2
X = rng.randn(n_samples, n_features)
for cv_type in COVARIANCE_TYPE:
train1 = GaussianMixture(n_components=n_components,
covariance_type=cv_type,
random_state=rng).fit(X).score(X)
train2 = GaussianMixture(n_components=n_components,
covariance_type=cv_type,
random_state=rng, n_init=5).fit(X).score(X)
assert_greater_equal(train2, train1)
def test_gaussian_mixture_n_parameters():
# Test that the right number of parameters is estimated
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 5, 2
X = rng.randn(n_samples, n_features)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in COVARIANCE_TYPE:
g = GaussianMixture(
n_components=n_components, covariance_type=cv_type,
random_state=rng).fit(X)
assert_equal(g._n_parameters(), n_params[cv_type])
def test_bic_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
rng = np.random.RandomState(0)
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
bic_full = GaussianMixture(n_components=n_components,
covariance_type='full',
random_state=rng).fit(X).bic(X)
for covariance_type in ['tied', 'diag', 'spherical']:
bic = GaussianMixture(n_components=n_components,
covariance_type=covariance_type,
random_state=rng).fit(X).bic(X)
assert_almost_equal(bic_full, bic)
def test_gaussian_mixture_aic_bic():
# Test the aic and bic criteria
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 3, 2
X = rng.randn(n_samples, n_features)
# standard gaussian entropy
sgh = 0.5 * (fast_logdet(np.cov(X.T, bias=1)) +
n_features * (1 + np.log(2 * np.pi)))
for cv_type in COVARIANCE_TYPE:
g = GaussianMixture(
n_components=n_components, covariance_type=cv_type,
random_state=rng, max_iter=200)
g.fit(X)
aic = 2 * n_samples * sgh + 2 * g._n_parameters()
bic = (2 * n_samples * sgh +
np.log(n_samples) * g._n_parameters())
bound = n_features / np.sqrt(n_samples)
assert_true((g.aic(X) - aic) / n_samples < bound)
assert_true((g.bic(X) - bic) / n_samples < bound)
def test_gaussian_mixture_verbose():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type,
verbose=1)
h = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
h.fit(X)
finally:
sys.stdout = old_stdout
def test_warm_start():
random_state = 0
rng = np.random.RandomState(random_state)
n_samples, n_features, n_components = 500, 2, 2
X = rng.rand(n_samples, n_features)
# Assert the warm_start give the same result for the same number of iter
g = GaussianMixture(n_components=n_components, n_init=1, max_iter=2,
reg_covar=0, random_state=random_state,
warm_start=False)
h = GaussianMixture(n_components=n_components, n_init=1, max_iter=1,
reg_covar=0, random_state=random_state,
warm_start=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
g.fit(X)
score1 = h.fit(X).score(X)
score2 = h.fit(X).score(X)
assert_almost_equal(g.weights_, h.weights_)
assert_almost_equal(g.means_, h.means_)
assert_almost_equal(g.precisions_, h.precisions_)
assert_greater(score2, score1)
# Assert that by using warm_start we can converge to a good solution
g = GaussianMixture(n_components=n_components, n_init=1,
max_iter=5, reg_covar=0, random_state=random_state,
warm_start=False, tol=1e-6)
h = GaussianMixture(n_components=n_components, n_init=1,
max_iter=5, reg_covar=0, random_state=random_state,
warm_start=True, tol=1e-6)
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
g.fit(X)
h.fit(X).fit(X)
assert_true(not g.converged_)
assert_true(h.converged_)
def test_score():
covar_type = 'full'
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
# Check the error message if we don't call fit
gmm1 = GaussianMixture(n_components=n_components, n_init=1,
max_iter=1, reg_covar=0, random_state=rng,
covariance_type=covar_type)
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this method.", gmm1.score, X)
# Check score value
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
gmm1.fit(X)
gmm_score = gmm1.score(X)
gmm_score_proba = gmm1.score_samples(X).mean()
assert_almost_equal(gmm_score, gmm_score_proba)
# Check if the score increase
gmm2 = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng,
covariance_type=covar_type).fit(X)
assert_greater(gmm2.score(X), gmm1.score(X))
def test_score_samples():
covar_type = 'full'
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
# Check the error message if we don't call fit
gmm = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type)
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this method.", gmm.score_samples, X)
gmm_score_samples = gmm.fit(X).score_samples(X)
assert_equal(gmm_score_samples.shape[0], rand_data.n_samples)
def test_monotonic_likelihood():
# We check that each step of the EM without regularization improve
# monotonically the training set likelihood
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, reg_covar=0,
warm_start=True, max_iter=1, random_state=rng,
tol=1e-7)
current_log_likelihood = -np.infty
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(600):
prev_log_likelihood = current_log_likelihood
try:
current_log_likelihood = gmm.fit(X).score(X)
except ConvergenceWarning:
pass
assert_greater_equal(current_log_likelihood,
prev_log_likelihood)
if gmm.converged_:
break
assert_true(gmm.converged_)
def test_regularisation():
# We train the GaussianMixture on degenerate data by defining two clusters
# of a 0 covariance.
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = np.vstack((np.ones((n_samples // 2, n_features)),
np.zeros((n_samples // 2, n_features))))
for covar_type in COVARIANCE_TYPE:
gmm = GaussianMixture(n_components=n_samples, reg_covar=0,
covariance_type=covar_type, random_state=rng)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert_raise_message(ValueError,
"Fitting the mixture model failed because "
"some components have ill-defined empirical "
"covariance (for instance caused by "
"singleton or collapsed samples). Try to "
"decrease the number of components, or "
"increase reg_covar.", gmm.fit, X)
gmm.set_params(reg_covar=1e-6).fit(X)
def test_property():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, random_state=rng,
n_init=5)
gmm.fit(X)
if covar_type == 'full':
for prec, covar in zip(gmm.precisions_, gmm.covariances_):
assert_array_almost_equal(linalg.inv(prec), covar)
elif covar_type == 'tied':
assert_array_almost_equal(linalg.inv(gmm.precisions_),
gmm.covariances_)
else:
assert_array_almost_equal(gmm.precisions_, 1. / gmm.covariances_)
def test_sample():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_features, n_components = rand_data.n_features, rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, random_state=rng)
# To sample we need that GaussianMixture is fitted
assert_raise_message(NotFittedError, "This GaussianMixture instance "
"is not fitted", gmm.sample, 0)
gmm.fit(X)
assert_raise_message(ValueError, "Invalid value for 'n_samples",
gmm.sample, 0)
# Just to make sure the class samples correctly
X_s, y_s = gmm.sample(20000)
for k in range(n_features):
if covar_type == 'full':
assert_array_almost_equal(gmm.covariances_[k],
np.cov(X_s[y_s == k].T), decimal=1)
elif covar_type == 'tied':
assert_array_almost_equal(gmm.covariances_,
np.cov(X_s[y_s == k].T), decimal=1)
elif covar_type == 'diag':
assert_array_almost_equal(gmm.covariances_[k],
np.diag(np.cov(X_s[y_s == k].T)),
decimal=1)
else:
assert_array_almost_equal(
gmm.covariances_[k], np.var(X_s[y_s == k] - gmm.means_[k]),
decimal=1)
means_s = np.array([np.mean(X_s[y_s == k], 0)
for k in range(n_features)])
assert_array_almost_equal(gmm.means_, means_s, decimal=1)
@ignore_warnings(category=ConvergenceWarning)
def test_init():
# We check that by increasing the n_init number we have a better solution
random_state = 0
rand_data = RandomData(np.random.RandomState(random_state), scale=1)
n_components = rand_data.n_components
X = rand_data.X['full']
gmm1 = GaussianMixture(n_components=n_components, n_init=1,
max_iter=1, random_state=random_state).fit(X)
gmm2 = GaussianMixture(n_components=n_components, n_init=100,
max_iter=1, random_state=random_state).fit(X)
assert_greater(gmm2.lower_bound_, gmm1.lower_bound_)
| bsd-3-clause |
Akshay0724/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 38 | 11165 | # Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
from sklearn.utils.testing import SkipTest, assert_equal, assert_true
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
# Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask,
dtype=np.float64)
assert_true(A.dtype == np.float64)
def test_connect_regions():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
for thr in (50, 150):
mask = face > thr
graph = img_to_graph(face, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
mask = face > 50
graph = grid_to_graph(*face.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = face > 150
graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_face():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
face = face.astype(np.float32)
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = face.astype(np.float32)
face /= 16.0
return face
def _orange_face(face=None):
face = _downsampled_face() if face is None else face
face_color = np.zeros(face.shape + (3,))
face_color[:, :, 0] = 256 - face
face_color[:, :, 1] = 256 - face / 2
face_color[:, :, 2] = 256 - face / 4
return face_color
def _make_images(face=None):
face = _downsampled_face() if face is None else face
# make a collection of faces
images = np.zeros((3,) + face.shape)
images[0] = face
images[1] = face + 1
images[2] = face + 2
return images
downsampled_face = _downsampled_face()
orange_face = _orange_face(downsampled_face)
face_collection = _make_images(downsampled_face)
def test_extract_patches_all():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
face = orange_face
i_h, i_w = face.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
face = downsampled_face
face = face[:, 32:97]
i_h, i_w = face.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
face = downsampled_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_reconstruct_patches_perfect_color():
face = orange_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_patch_extractor_fit():
faces = face_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(faces))
def test_patch_extractor_max_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
faces = face_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(faces)
assert_equal(patches.shape, (len(faces) * 100, 19, 25))
def test_patch_extractor_all_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
faces = _make_images(orange_face)
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
face = downsampled_face
i_h, i_w = face.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(face, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
rs2/pandas | pandas/io/formats/latex.py | 1 | 22875 | """
Module for formatting output data in Latex.
"""
from abc import ABC, abstractmethod
from typing import IO, Iterator, List, Optional, Type
import numpy as np
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.io.formats.format import DataFrameFormatter, TableFormatter
class RowStringConverter(ABC):
r"""Converter for dataframe rows into LaTeX strings.
Parameters
----------
formatter : `DataFrameFormatter`
Instance of `DataFrameFormatter`.
multicolumn: bool, optional
Whether to use \multicolumn macro.
multicolumn_format: str, optional
Multicolumn format.
multirow: bool, optional
Whether to use \multirow macro.
"""
def __init__(
self,
formatter: DataFrameFormatter,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
):
self.fmt = formatter
self.frame = self.fmt.frame
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.clinebuf: List[List[int]] = []
self.strcols = self._get_strcols()
self.strrows: List[List[str]] = list(
zip(*self.strcols) # type: ignore[arg-type]
)
def get_strrow(self, row_num: int) -> str:
"""Get string representation of the row."""
row = self.strrows[row_num]
is_multicol = (
row_num < self.column_levels and self.fmt.header and self.multicolumn
)
is_multirow = (
row_num >= self.header_levels
and self.fmt.index
and self.multirow
and self.index_levels > 1
)
is_cline_maybe_required = is_multirow and row_num < len(self.strrows) - 1
crow = self._preprocess_row(row)
if is_multicol:
crow = self._format_multicolumn(crow)
if is_multirow:
crow = self._format_multirow(crow, row_num)
lst = []
lst.append(" & ".join(crow))
lst.append(" \\\\")
if is_cline_maybe_required:
cline = self._compose_cline(row_num, len(self.strcols))
lst.append(cline)
return "".join(lst)
@property
def _header_row_num(self) -> int:
"""Number of rows in header."""
return self.header_levels if self.fmt.header else 0
@property
def index_levels(self) -> int:
"""Integer number of levels in index."""
return self.frame.index.nlevels
@property
def column_levels(self) -> int:
return self.frame.columns.nlevels
@property
def header_levels(self) -> int:
nlevels = self.column_levels
if self.fmt.has_index_names and self.fmt.show_index_names:
nlevels += 1
return nlevels
def _get_strcols(self) -> List[List[str]]:
"""String representation of the columns."""
if len(self.frame.columns) == 0 or len(self.frame.index) == 0:
info_line = (
f"Empty {type(self.frame).__name__}\n"
f"Columns: {self.frame.columns}\n"
f"Index: {self.frame.index}"
)
strcols = [[info_line]]
else:
strcols = self.fmt._to_str_columns()
# reestablish the MultiIndex that has been joined by _to_str_column
if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex):
out = self.frame.index.format(
adjoin=False,
sparsify=self.fmt.sparsify,
names=self.fmt.has_index_names,
na_rep=self.fmt.na_rep,
)
# index.format will sparsify repeated entries with empty strings
# so pad these with some empty space
def pad_empties(x):
for pad in reversed(x):
if pad:
break
return [x[0]] + [i if i else " " * len(pad) for i in x[1:]]
out = (pad_empties(i) for i in out)
# Add empty spaces for each column level
clevels = self.frame.columns.nlevels
out = [[" " * len(i[-1])] * clevels + i for i in out]
# Add the column names to the last index column
cnames = self.frame.columns.names
if any(cnames):
new_names = [i if i else "{}" for i in cnames]
out[self.frame.index.nlevels - 1][:clevels] = new_names
# Get rid of old multiindex column and add new ones
strcols = out + strcols[1:]
return strcols
def _preprocess_row(self, row: List[str]) -> List[str]:
"""Preprocess elements of the row."""
if self.fmt.escape:
crow = _escape_symbols(row)
else:
crow = [x if x else "{}" for x in row]
if self.fmt.bold_rows and self.fmt.index:
crow = _convert_to_bold(crow, self.index_levels)
return crow
def _format_multicolumn(self, row: List[str]) -> List[str]:
r"""
Combine columns belonging to a group to a single multicolumn entry
according to self.multicolumn_format
e.g.:
a & & & b & c &
will become
\multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
"""
row2 = row[: self.index_levels]
ncol = 1
coltext = ""
def append_col():
# write multicolumn if needed
if ncol > 1:
row2.append(
f"\\multicolumn{{{ncol:d}}}{{{self.multicolumn_format}}}"
f"{{{coltext.strip()}}}"
)
# don't modify where not needed
else:
row2.append(coltext)
for c in row[self.index_levels :]:
# if next col has text, write the previous
if c.strip():
if coltext:
append_col()
coltext = c
ncol = 1
# if not, add it to the previous multicolumn
else:
ncol += 1
# write last column name
if coltext:
append_col()
return row2
def _format_multirow(self, row: List[str], i: int) -> List[str]:
r"""
Check following rows, whether row should be a multirow
e.g.: becomes:
a & 0 & \multirow{2}{*}{a} & 0 &
& 1 & & 1 &
b & 0 & \cline{1-2}
b & 0 &
"""
for j in range(self.index_levels):
if row[j].strip():
nrow = 1
for r in self.strrows[i + 1 :]:
if not r[j].strip():
nrow += 1
else:
break
if nrow > 1:
# overwrite non-multirow entry
row[j] = f"\\multirow{{{nrow:d}}}{{*}}{{{row[j].strip()}}}"
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
def _compose_cline(self, i: int, icol: int) -> str:
"""
Create clines after multirow-blocks are finished.
"""
lst = []
for cl in self.clinebuf:
if cl[0] == i:
lst.append(f"\n\\cline{{{cl[1]:d}-{icol:d}}}")
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
return "".join(lst)
class RowStringIterator(RowStringConverter):
"""Iterator over rows of the header or the body of the table."""
@abstractmethod
def __iter__(self) -> Iterator[str]:
"""Iterate over LaTeX string representations of rows."""
class RowHeaderIterator(RowStringIterator):
"""Iterator for the table header rows."""
def __iter__(self) -> Iterator[str]:
for row_num in range(len(self.strrows)):
if row_num < self._header_row_num:
yield self.get_strrow(row_num)
class RowBodyIterator(RowStringIterator):
"""Iterator for the table body rows."""
def __iter__(self) -> Iterator[str]:
for row_num in range(len(self.strrows)):
if row_num >= self._header_row_num:
yield self.get_strrow(row_num)
class TableBuilderAbstract(ABC):
"""
Abstract table builder producing string representation of LaTeX table.
Parameters
----------
formatter : `DataFrameFormatter`
Instance of `DataFrameFormatter`.
column_format: str, optional
Column format, for example, 'rcl' for three columns.
multicolumn: bool, optional
Use multicolumn to enhance MultiIndex columns.
multicolumn_format: str, optional
The alignment for multicolumns, similar to column_format.
multirow: bool, optional
Use multirow to enhance MultiIndex rows.
caption: str, optional
Table caption.
label: str, optional
LaTeX label.
position: str, optional
Float placement specifier, for example, 'htb'.
"""
def __init__(
self,
formatter: DataFrameFormatter,
column_format: Optional[str] = None,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
caption: Optional[str] = None,
label: Optional[str] = None,
position: Optional[str] = None,
):
self.fmt = formatter
self.column_format = column_format
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.caption = caption
self.label = label
self.position = position
def get_result(self) -> str:
"""String representation of LaTeX table."""
elements = [
self.env_begin,
self.top_separator,
self.header,
self.middle_separator,
self.env_body,
self.bottom_separator,
self.env_end,
]
result = "\n".join([item for item in elements if item])
trailing_newline = "\n"
result += trailing_newline
return result
@property
@abstractmethod
def env_begin(self) -> str:
"""Beginning of the environment."""
@property
@abstractmethod
def top_separator(self) -> str:
"""Top level separator."""
@property
@abstractmethod
def header(self) -> str:
"""Header lines."""
@property
@abstractmethod
def middle_separator(self) -> str:
"""Middle level separator."""
@property
@abstractmethod
def env_body(self) -> str:
"""Environment body."""
@property
@abstractmethod
def bottom_separator(self) -> str:
"""Bottom level separator."""
@property
@abstractmethod
def env_end(self) -> str:
"""End of the environment."""
class GenericTableBuilder(TableBuilderAbstract):
"""Table builder producing string representation of LaTeX table."""
@property
def header(self) -> str:
iterator = self._create_row_iterator(over="header")
return "\n".join(list(iterator))
@property
def top_separator(self) -> str:
return "\\toprule"
@property
def middle_separator(self) -> str:
return "\\midrule" if self._is_separator_required() else ""
@property
def env_body(self) -> str:
iterator = self._create_row_iterator(over="body")
return "\n".join(list(iterator))
def _is_separator_required(self) -> bool:
return bool(self.header and self.env_body)
@property
def _position_macro(self) -> str:
r"""Position macro, extracted from self.position, like [h]."""
return f"[{self.position}]" if self.position else ""
@property
def _caption_macro(self) -> str:
r"""Caption macro, extracted from self.caption, like \caption{cap}."""
return f"\\caption{{{self.caption}}}" if self.caption else ""
@property
def _label_macro(self) -> str:
r"""Label macro, extracted from self.label, like \label{ref}."""
return f"\\label{{{self.label}}}" if self.label else ""
def _create_row_iterator(self, over: str) -> RowStringIterator:
"""Create iterator over header or body of the table.
Parameters
----------
over : {'body', 'header'}
Over what to iterate.
Returns
-------
RowStringIterator
Iterator over body or header.
"""
iterator_kind = self._select_iterator(over)
return iterator_kind(
formatter=self.fmt,
multicolumn=self.multicolumn,
multicolumn_format=self.multicolumn_format,
multirow=self.multirow,
)
def _select_iterator(self, over: str) -> Type[RowStringIterator]:
"""Select proper iterator over table rows."""
if over == "header":
return RowHeaderIterator
elif over == "body":
return RowBodyIterator
else:
msg = f"'over' must be either 'header' or 'body', but {over} was provided"
raise ValueError(msg)
class LongTableBuilder(GenericTableBuilder):
"""Concrete table builder for longtable.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = LongTableBuilder(formatter, caption='a long table',
... label='tab:long', column_format='lrl')
>>> table = builder.get_result()
>>> print(table)
\\begin{longtable}{lrl}
\\caption{a long table}
\\label{tab:long}\\\\
\\toprule
{} & a & b \\\\
\\midrule
\\endfirsthead
\\caption[]{a long table} \\\\
\\toprule
{} & a & b \\\\
\\midrule
\\endhead
\\midrule
\\multicolumn{3}{r}{{Continued on next page}} \\\\
\\midrule
\\endfoot
<BLANKLINE>
\\bottomrule
\\endlastfoot
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\end{longtable}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
first_row = (
f"\\begin{{longtable}}{self._position_macro}{{{self.column_format}}}"
)
elements = [first_row, f"{self._caption_and_label()}"]
return "\n".join([item for item in elements if item])
def _caption_and_label(self) -> str:
if self.caption or self.label:
double_backslash = "\\\\"
elements = [f"{self._caption_macro}", f"{self._label_macro}"]
caption_and_label = "\n".join([item for item in elements if item])
caption_and_label += double_backslash
return caption_and_label
else:
return ""
@property
def middle_separator(self) -> str:
iterator = self._create_row_iterator(over="header")
# the content between \endfirsthead and \endhead commands
# mitigates repeated List of Tables entries in the final LaTeX
# document when dealing with longtable environments; GH #34360
elements = [
"\\midrule",
"\\endfirsthead",
f"\\caption[]{{{self.caption}}} \\\\" if self.caption else "",
self.top_separator,
self.header,
"\\midrule",
"\\endhead",
"\\midrule",
f"\\multicolumn{{{len(iterator.strcols)}}}{{r}}"
"{{Continued on next page}} \\\\",
"\\midrule",
"\\endfoot\n",
"\\bottomrule",
"\\endlastfoot",
]
if self._is_separator_required():
return "\n".join(elements)
return ""
@property
def bottom_separator(self) -> str:
return ""
@property
def env_end(self) -> str:
return "\\end{longtable}"
class RegularTableBuilder(GenericTableBuilder):
"""Concrete table builder for regular table.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = RegularTableBuilder(formatter, caption='caption', label='lab',
... column_format='lrc')
>>> table = builder.get_result()
>>> print(table)
\\begin{table}
\\centering
\\caption{caption}
\\label{lab}
\\begin{tabular}{lrc}
\\toprule
{} & a & b \\\\
\\midrule
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\bottomrule
\\end{tabular}
\\end{table}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
elements = [
f"\\begin{{table}}{self._position_macro}",
"\\centering",
f"{self._caption_macro}",
f"{self._label_macro}",
f"\\begin{{tabular}}{{{self.column_format}}}",
]
return "\n".join([item for item in elements if item])
@property
def bottom_separator(self) -> str:
return "\\bottomrule"
@property
def env_end(self) -> str:
return "\n".join(["\\end{tabular}", "\\end{table}"])
class TabularBuilder(GenericTableBuilder):
"""Concrete table builder for tabular environment.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = TabularBuilder(formatter, column_format='lrc')
>>> table = builder.get_result()
>>> print(table)
\\begin{tabular}{lrc}
\\toprule
{} & a & b \\\\
\\midrule
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\bottomrule
\\end{tabular}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
return f"\\begin{{tabular}}{{{self.column_format}}}"
@property
def bottom_separator(self) -> str:
return "\\bottomrule"
@property
def env_end(self) -> str:
return "\\end{tabular}"
class LatexFormatter(TableFormatter):
"""
Used to render a DataFrame to a LaTeX tabular/longtable environment output.
Parameters
----------
formatter : `DataFrameFormatter`
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
See Also
--------
HTMLFormatter
"""
def __init__(
self,
formatter: DataFrameFormatter,
longtable: bool = False,
column_format: Optional[str] = None,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
caption: Optional[str] = None,
label: Optional[str] = None,
position: Optional[str] = None,
):
self.fmt = formatter
self.frame = self.fmt.frame
self.longtable = longtable
self.column_format = column_format # type: ignore[assignment]
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.caption = caption
self.label = label
self.position = position
def write_result(self, buf: IO[str]) -> None:
"""
Render a DataFrame to a LaTeX tabular, longtable, or table/tabular
environment output.
"""
table_string = self.builder.get_result()
buf.write(table_string)
@property
def builder(self) -> TableBuilderAbstract:
"""Concrete table builder.
Returns
-------
TableBuilder
"""
builder = self._select_builder()
return builder(
formatter=self.fmt,
column_format=self.column_format,
multicolumn=self.multicolumn,
multicolumn_format=self.multicolumn_format,
multirow=self.multirow,
caption=self.caption,
label=self.label,
position=self.position,
)
def _select_builder(self) -> Type[TableBuilderAbstract]:
"""Select proper table builder."""
if self.longtable:
return LongTableBuilder
if any([self.caption, self.label, self.position]):
return RegularTableBuilder
return TabularBuilder
@property
def column_format(self) -> str:
"""Column format."""
return self._column_format
@column_format.setter
def column_format(self, input_column_format: Optional[str]) -> None:
"""Setter for column format."""
if input_column_format is None:
self._column_format = (
self._get_index_format() + self._get_column_format_based_on_dtypes()
)
elif not isinstance(input_column_format, str):
raise ValueError(
f"column_format must be str or unicode, "
f"not {type(input_column_format)}"
)
else:
self._column_format = input_column_format
def _get_column_format_based_on_dtypes(self) -> str:
"""Get column format based on data type.
Right alignment for numbers and left - for strings.
"""
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return "r"
return "l"
dtypes = self.frame.dtypes._values
return "".join(map(get_col_type, dtypes))
def _get_index_format(self) -> str:
"""Get index column format."""
return "l" * self.frame.index.nlevels if self.fmt.index else ""
def _escape_symbols(row: List[str]) -> List[str]:
"""Carry out string replacements for special symbols.
Parameters
----------
row : list
List of string, that may contain special symbols.
Returns
-------
list
list of strings with the special symbols replaced.
"""
return [
(
x.replace("\\", "\\textbackslash ")
.replace("_", "\\_")
.replace("%", "\\%")
.replace("$", "\\$")
.replace("#", "\\#")
.replace("{", "\\{")
.replace("}", "\\}")
.replace("~", "\\textasciitilde ")
.replace("^", "\\textasciicircum ")
.replace("&", "\\&")
if (x and x != "{}")
else "{}"
)
for x in row
]
def _convert_to_bold(crow: List[str], ilevels: int) -> List[str]:
"""Convert elements in ``crow`` to bold."""
return [
f"\\textbf{{{x}}}" if j < ilevels and x.strip() not in ["", "{}"] else x
for j, x in enumerate(crow)
]
if __name__ == "__main__":
import doctest
doctest.testmod()
| bsd-3-clause |
ssorgatem/qiime | qiime/colors.py | 15 | 24391 | #!/usr/bin/env python
# file colors.py
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2011, The QIIME Project" # consider project name
# remember to add yourself
__credits__ = ["Rob Knight", "Jesse Stombaugh", "Yoshiki Vazquez-Baeza"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jesse Stombaugh"
__email__ = "[email protected]"
"""Code for coloring series based on prefs file.
"""
from colorsys import rgb_to_hsv, hsv_to_rgb
from parse import parse_mapping_file, group_by_field, parse_taxa_summary_table
from numpy import array
from math import floor
import os
import re
from qiime.util import MissingFileError
from qiime.sort import natsort
def string_to_rgb(s):
"""Converts hex string to RGB"""
orig_s = s
s = s.strip()
if s.startswith('#'):
s = s[1:]
if not len(s) == 6:
raise ValueError("String %s doesn't look like a hex string" % orig_s)
return int(s[:2], 16), int(s[2:4], 16), int(s[4:], 16)
def rgb_tuple_to_hsv(rgb):
"""Converts rgb tuple to hsv on Mage's scale"""
rgb_0_to_1 = array(rgb) / 255.0
hsv = rgb_to_hsv(*tuple(rgb_0_to_1))
return hsv[0] * 360, hsv[1] * 100, hsv[2] * 100
def mage_hsv_tuple_to_rgb(hsv):
"""Converts hsv tuple on Mage scale to rgb on 0-255 scale"""
hsv_0_to_1 = hsv[0] / 360.0, hsv[1] / 100.0, hsv[2] / 100.0
rgb = hsv_to_rgb(*tuple(hsv_0_to_1))
return int(rgb[0] * 255), int(rgb[1] * 255), int(rgb[2] * 255)
class Color(object):
"""Stores a color object: name, HSV, ability to write as HTML or Mage.
Note: the reason we store as HSV, not RGB, is that you frequently want
to do gradient colors by hue going from e.g. white to blue, white to red,
etc. Unfortunately, in RGB, you can't specify _which_ white you have
in e.g. #FFFFFF, whereas to get the right gradient you need to be able
to specify that you want (0,0,100) or (180,0,100) or whatever. Hence
the colorspace gymnastics.
"""
def __init__(self, name, coords, colorspace='rgb'):
"""Returns new Color object. Init with name and coords as (R,G,B).
Can also initialize with coords as (H,S,V) or #aabbcc format.
"""
self.Name = name
if isinstance(coords, str): # assume is hex format
self.Coords = rgb_tuple_to_hsv(string_to_rgb(coords))
elif colorspace == 'rgb':
self.Coords = rgb_tuple_to_hsv(tuple(coords))
elif colorspace == 'hsv':
self.Coords = tuple(coords)
else:
raise ValueError(
"Unknown colorspace %s: valid values are rgb, hsv" %
colorspace)
def toRGB(self):
"""Returns self as r, g, b tuple."""
return mage_hsv_tuple_to_rgb(self.Coords)
def toMage(self):
"""Returns self as Mage/KiNG-format string"""
h, s, v = self.Coords
return '@hsvcolor {%s} %3.1f %3.1f %3.1f' % (self.Name, h, s, v)
def toHex(self):
"""Returns self as hex string."""
rgb = self.toRGB()
return ('#%02s%02s%02s' % (hex(rgb[0])[2:], hex(rgb[1])[2:],
hex(rgb[2])[2:])).replace(' ', '0')
def toInt(self):
"""Returns self as hex string."""
rgb = self.toHex()[1:]
return int(float.fromhex(rgb))
def __str__(self):
"""Return string representation of self"""
return str(self.Name) + ':' + self.toHex()
def color_dict_to_objects(d, colorspace='hsv'):
"""Converts color dict to dict of Color objects"""
result = {}
for k, v in d.items():
result[k] = Color(k, v, colorspace)
return result
# Note: these are all in Mage HSV colorspace
'''
These are the old colors
data_color_hsv = {
'aqua': (180, 100, 100),
'blue': (240,100,100),
'fuchsia': (300,100,100),
'gray': (300,0,50.2),
'green': (120,100,50.2),
'lime': (120,100,100),
'maroon': (0,100,50.2),
'olive': (60,100,50.2),
'purple': (300,100,50.2),
'red': (0,100,100),
'silver': (0, 0, 75.3),
'teal': (180,100,50.2),
'yellow': (60,100,100)
}
This is the old order
data_color_order = ['blue','lime','red','aqua','fuchsia','yellow','green', \
'maroon','teal','purple','olive','silver','gray']
'''
data_color_hsv = {
#'black1': (0,0,20),
'red1': (0, 100, 100),
'blue1': (240, 100, 100),
'orange1': (28, 98, 95),
'green1': (120, 100, 50.2),
'purple1': (302, 73, 57),
'yellow1': (60, 100, 100),
'cyan1': (184, 49, 96),
'pink1': (333, 37, 96),
'teal1': (178, 42, 63),
'brown1': (36, 89, 42),
'gray1': (0, 0, 50.2),
'lime': (123, 99, 96),
'red2': (14, 51, 97),
'blue2': (211, 42, 85),
'orange2': (32, 46, 99),
'green2': (142, 36, 79),
'purple2': (269, 29, 75),
'yellow2': (56, 40, 100),
#'black2': (303,100,24),
'gray2': (0, 0, 75.3),
#'teal2': (192,100,24),
'red3': (325, 100, 93),
'blue3': (197, 100, 100),
#'purple3': (271,43,36),
'brown2': (33, 45, 77),
'green3': (60, 100, 50.2),
'purple4': (264, 75, 100),
#'yellow3': (60,66,75),
#'blue4': (213,45,77),
'red4': (348, 31, 74),
'teal3': (180, 100, 50.2),
#'brown3': (60,100,28),
'red5': (0, 100, 50.2),
'green4': (81, 100, 26),
#'purple5': (240,100,41),
'orange3': (26, 100, 65)
#'brown4': (25,100,20),
#'red6': (17,100,63),
#'purple6':(272,100,44)
}
data_color_order = ['red1', 'blue1', 'orange1', 'green1', 'purple1', 'yellow1',
'cyan1', 'pink1', 'teal1', 'brown1', 'gray1', 'lime', 'red2', 'blue2',
'orange2', 'green2', 'purple2', 'yellow2', 'gray2', 'red3',
'blue3', 'brown2', 'green3', 'purple4',
'red4', 'teal3', 'red5', 'green4', 'orange3']
data_colors = color_dict_to_objects(data_color_hsv)
kinemage_colors = [
'hotpink',
'blue',
'lime',
'gold',
'red',
'sea',
'purple',
'green']
def iter_color_groups(mapping, prefs):
"""Iterates over color groups for each category given mapping file/prefs.
See get_group_colors for details of algorithm.
"""
# Iterate through prefs and color by given mapping labels
for key in natsort(prefs.keys()):
col_name = prefs[key]['column']
if 'colors' in prefs[key]:
if isinstance(prefs[key]['colors'], dict):
colors = prefs[key]['colors'].copy() # copy so we can mutate
else:
colors = prefs[key]['colors'][:]
else:
colors = {}
labelname = prefs[key]['column']
# Define groups and associate appropriate colors to each group
groups = group_by_field(mapping, col_name)
colors, data_colors, data_color_order = \
get_group_colors(groups, colors)
yield labelname, groups, colors, data_colors, data_color_order
def get_group_colors(groups, colors, data_colors=data_colors,
data_color_order=data_color_order):
"""Figures out group colors for a specific series based on prefs.
Algorithm is as follows:
- For each name, color pair we know about:
- Check if the name is one of the groups (exact match)
- If it isn't, assume it's a prefix and pull out all the matching groups
- If the color is just a string, set everything to the color with that
name
- Otherwise, assume that either it's a new color we're adding, or that
it's a range for gradient coloring.
- If it's a new color, create it and add it to added_data_colors.
- If it's a gradient, make up all the new colors and add them to
added_data_colors
The current method for gradient coloring of columns (should perhaps
replace with more general method) is to pass in any of the following:
'colors':(('white', (0,0,100)),('red',(0,100,100)))
makes gradient between white and red, applies to all samples
'colors':{'RK':(('white',(0,0,100)),('red',(0,100,100))),
'NF':(('white',(120,0,100)),('green',(120,100,100)))
}
pulls the combination samples starting with RK, colors with
first gradient, then pulls the combination samples starting
with NF, colors with the next gradient.
Return values are:
- colors: dict of {group_value:color_name}
- data_colors: dict of {color_name:color_object}
- data_color_order: order in which the data colors are used/written.
"""
added_data_colors = {}
if isinstance(colors, dict):
# assume we're getting some of the colors out of a dict
if colors.items() != []:
for k, v in sorted(colors.items()):
if k not in groups: # assume is prefix
k_matches = [g for g in groups if g.startswith(k)]
if isinstance(v, str): # just set everything to this color
for m in k_matches:
colors[m] = v
else: # assume is new color or range
first, second = v
if isinstance(first, str): # new named color?
if first not in data_colors:
added_data_colors[first] = Color(first, second)
for m in k_matches:
colors[m] = first
else: # new color range?
start_color, end_color = map(get_color,
[first, second])
num_colors = len(k_matches)
curr_data_colors = color_dict_to_objects(
make_color_dict(start_color,
start_hsv, end_color, end_hsv, num_colors))
curr_colors = {}
color_groups(k_matches, curr_colors,
natsort(curr_data_colors))
colors.update(curr_colors)
added_data_colors.update(curr_data_colors)
del colors[k]
elif not isinstance(v, str): # assume val is new color
color = get_color(v)
if color.Name not in data_colors:
added_data_colors[color.Name] = color
colors[k] = color.Name
# handle any leftover groups
color_groups(groups, colors, data_color_order)
# add new colors
data_colors.update(added_data_colors)
if added_data_colors != {}:
data_color_order.append(''.join(natsort(added_data_colors)))
else:
# handle case where no prefs is used
color_groups(groups, colors, data_color_order)
else:
# handle the case where colors is a tuple for gradients
start_color, end_color = map(get_color, colors)
start_hsv = start_color.Coords
end_hsv = end_color.Coords
num_colors = len(groups)
data_colors = color_dict_to_objects(
make_color_dict(start_color, start_hsv, end_color,
end_hsv, num_colors))
data_color_order = list(natsort(data_colors.keys()))
colors = {}
color_groups(groups, colors, data_color_order)
return colors, data_colors, data_color_order
def get_color(color, data_colors=data_colors):
"""Gets a color by looking up its name or initializing with name+data"""
if isinstance(color, str):
if color in data_colors:
return data_colors[color]
else:
raise ValueError("Color name %s in prefs not recognized" % color)
else:
name, coords = color
if isinstance(coords, str):
colorspace = 'rgb'
else:
colorspace = 'hsv'
return Color(name, coords, colorspace)
def color_groups(groups, colors, data_color_order):
"""Colors a set of groups in data_color_order, handling special colors.
Modifies colors in-place.
Cycles through data colors (i.e. wraps around when last color is reached).
"""
group_num = -1
for g in natsort(groups):
if g not in colors:
group_num += 1
if group_num == len(data_color_order):
group_num = 0
colors[g] = data_color_order[group_num]
def make_color_dict(start_name, start_hsv, end_name, end_hsv, n):
"""Makes dict of color gradient"""
colors = linear_gradient(start_hsv, end_hsv, n)
names = ['%sto%s%s_%s' % (start_name, end_name, n, i) for i in range(n)]
return dict(zip(names, colors))
def combine_map_label_cols(combinecolorby, mapping):
"""Merge two or more mapping columns into one column"""
combinedmapdata = array([''] * len(mapping), dtype='a100')
title = []
match = False
for p in range(len(combinecolorby)):
for i in range(len(mapping[0])):
if str(combinecolorby[p]) == str(mapping[0][i]):
match = True
for q in range(len(mapping)):
combinedmapdata[q] = combinedmapdata[q] + mapping[q][i]
break
else:
match = False
if not match:
raise ValueError(
'One of the columns you tried to combine does not exist!')
title.append(combinecolorby[p])
combinedmapdata[0] = '&&'.join(title)
for i in range(len(combinedmapdata)):
mapping[i].append(combinedmapdata[i])
return mapping
def process_colorby(colorby, data, color_prefs=None):
"""Parses the colorby option from the command line.
color_prefs is required if colorby is not passed.
"""
match = False
prefs = {}
mapping = data['map']
colorbydata = []
if colorby is None and color_prefs is None:
# if coloby option are prefs file not given, color by all categories
# in mapping file
colorbydata = mapping[0]
elif colorby and color_prefs:
# if both the colorby option and prefs file are given, use the categories
# from the colorby option with their appropriate colors in the prefs
# file
prefs_colorby = [color_prefs[i]['column'] for i in color_prefs]
cmd_colorby = colorby.strip().strip("'").split(',')
for i in range(len(cmd_colorby)):
for j in range(len(prefs_colorby)):
if cmd_colorby[i] == prefs_colorby[j]:
colorbydata.append(prefs_colorby[j])
match = True
break
else:
match = False
if not match:
colorbydata.append(cmd_colorby[i])
names = list(colorbydata)
elif colorby:
# if only the colorby option is passed
colorbydata = colorby.strip().strip("'").split(',')
else:
# if only the prefs file is passed
colorbydata = [color_prefs[i]['column'] for i in color_prefs]
names = list(color_prefs)
match = False
for j, col in enumerate(colorbydata):
key = str(col)
# transfer over old color data if it was present
if '&&' in col:
# Create an array using multiple columns from mapping file
combinecolorby = col.split('&&')
data['map'] = combine_map_label_cols(combinecolorby, mapping)
prefs[key] = {}
prefs[key]['column'] = '&&'.join(combinecolorby)
else:
# Color by only one column in mapping file
prefs[key] = {}
prefs[key]['column'] = col
if color_prefs:
for p in color_prefs:
if 'column' in color_prefs[p] and color_prefs[p]['column'] == col:
if 'colors' in color_prefs[p]:
prefs[key]['colors'] = color_prefs[p]['colors']
else:
prefs[key]['colors'] = (
('white', (0, 0, 100)), ('red', (0, 100, 100)))
match = True
break
else:
match = False
if not match:
prefs[key] = {}
prefs[key]['column'] = col
prefs[key]['colors'] = (
('white', (0, 0, 100)), ('red', (0, 100, 100)))
return prefs, data
def linear_gradient(start, end, nbins, eps=1e-10):
"""Makes linear color gradient from start to end, using nbins.
Returns list of (x, y, z) tuples in current colorspace.
eps is used to prevent the case where start and end are the same.
"""
start = array(start)
end = array(end)
result = []
n_minus_1 = max(float(nbins - 1), eps)
for i in range(nbins):
result.append(
list((start * (n_minus_1 - i) / n_minus_1) + (end * (i / n_minus_1))))
return result
# The following functions were not unit_tested, however the parts within
# the functions are unit_tested
def get_map(options, data):
"""Opens and returns mapping data"""
try:
map_f = open(options.map_fname, 'U').readlines()
except (TypeError, IOError):
raise MissingFileError('Mapping file required for this analysis')
data['map'] = parse_mapping_file(map_f)
return data['map']
def map_from_coords(coords):
"""Makes pseudo mapping file from coords.
set data['map'] to result of this if coords file supplied but not map.
TODO: write equivalent function for other inputs, e.g. for rarefaction --
basic principle is that you need data structure that you can extract list
of sample ids from.
"""
result = (([['SampleID', 'Sample']]))
for i in range(len(data['coord'][0])):
data['map'].append([data['coord'][0][i], 'Sample'])
def sample_color_prefs_and_map_data_from_options(options):
"""Returns color prefs and mapping data based on options.
Note: opens files as needed. Only returns the info related to metadata
coloring and category maps. If you need additional info, it is necessary
to get that info explicitly (e.g. coord files, rarefaction files, etc.).
For example, you might modify the data dict afterwards to add coords,
rarefaction info, etc. depending on the application.
"""
data = {}
# Open and get mapping data, if none supplied create a pseudo mapping \
# file
mapping, headers, comments = get_map(options, data)
new_mapping = []
new_mapping.append(headers)
for i in range(len(mapping)):
new_mapping.append(mapping[i])
data['map'] = new_mapping
# need to set some other way from sample ids
# Determine which mapping headers to color by, if none given, color by \
# Sample ID's
try:
colorby = options.colorby
except AttributeError:
colorby = None
if options.prefs_path:
prefs = eval(open(options.prefs_path, 'U').read())
color_prefs, data = process_colorby(colorby, data,
prefs['sample_coloring'])
if 'background_color' in prefs:
background_color = prefs['background_color']
else:
background_color = 'black'
if 'ball_scale' in prefs:
ball_scale = prefs['ball_scale']
else:
ball_scale = 1.0
arrow_colors = {}
if 'arrow_line_color' in prefs:
arrow_colors['line_color'] = prefs['arrow_line_color']
else:
arrow_colors['line_color'] = 'white'
if 'arrow_head_color' in prefs:
arrow_colors['head_color'] = prefs['arrow_head_color']
else:
arrow_colors['head_color'] = 'red'
else:
background_color = 'black'
color_prefs, data = process_colorby(colorby, data, None)
ball_scale = 1.0
arrow_colors = {'line_color': 'white', 'head_color': 'red'}
if options.prefs_path and options.background_color:
background_color = options.background_color
elif options.background_color:
background_color = options.background_color
if background_color == 'black':
label_color = 'white'
else:
label_color = 'black'
return (
color_prefs, data, background_color, label_color, ball_scale, arrow_colors
)
def taxonomy_color_prefs_and_map_data_from_options(options):
"""Returns color prefs and counts data based on options.
counts data is any file in a format that can be parsed by parse_otu_table
"""
data = {}
data['counts'] = {}
taxonomy_levels = []
# need to set some other way from sample ids
# Determine which mapping headers to color by, if none given, color by \
# Sample ID's
taxonomy_count_files = options.counts_fname
for f in taxonomy_count_files:
try:
counts_f = open(f, 'U').readlines()
except (TypeError, IOError):
raise MissingFileError('Counts file required for this analysis')
sample_ids, otu_ids, otu_table = \
parse_taxa_summary_table(counts_f)
data['counts'][f] = (sample_ids, otu_ids, otu_table)
level = max([len(t.split(';')) - 1 for t in otu_ids])
taxonomy_levels.append(str(level))
if options.prefs_path:
prefs = eval(open(options.prefs_path, 'U').read())
color_prefs = taxonomy_process_prefs(taxonomy_levels,
prefs['taxonomy_coloring'])
if 'background_color' in prefs:
background_color = prefs['background_color']
else:
background_color = 'black'
else:
background_color = 'black'
color_prefs = taxonomy_process_prefs(taxonomy_levels, None)
if options.prefs_path and options.background_color:
background_color = options.background_color
elif options.background_color:
background_color = options.background_color
if background_color == 'black':
label_color = 'white'
else:
label_color = 'black'
return color_prefs, data, background_color, label_color
def taxonomy_process_prefs(taxonomy_levels, color_prefs=None):
"""Creates taxonomy prefs dict given specific taxonomy levels.
color_prefs is not required
taxonomy_levels is a list of the level number i.e. Phylum is 2
prefs will include a 'colors' dictionary for each given level
if there is a cooresponding level in color_prefs that is the
dictionary for the level otherwise it adds and empty dict
"""
prefs = {}
for j, col in enumerate(taxonomy_levels):
key = str(col)
col = str(col)
# Color by only one level
prefs[key] = {}
prefs[key]['column'] = col
if color_prefs:
for p in color_prefs:
if 'column' in color_prefs[p] and str(color_prefs[p]['column']) == col:
if 'colors' in color_prefs[p]:
prefs[key]['colors'] = color_prefs[p]['colors'].copy()
else:
prefs[key]['colors'] = {}
match = True
break
else:
match = False
if not match:
prefs[key] = {}
prefs[key]['column'] = col
prefs[key]['colors'] = {}
return prefs
def get_qiime_hex_string_color(index):
"""Retrieve an HEX color from the list of QIIME colors
Input:
index: index of the color to retrieve, if the number is greater than the
number of available colors, it will rollover in the list.
Output:
color: string in the format #FF0000
"""
assert index >= 0, "There are no negative indices for the QIIME colors"
n_colors = len(data_color_order)
if index >= n_colors:
index = int(index - floor((index / n_colors) * n_colors))
return data_colors[data_color_order[index]].toHex()
def matplotlib_rgb_color(rgb_color):
"""Returns RGB color in matplotlib format.
ex: (255,0,255) will return (1.0,0.0,1.0)
"""
return tuple([i / 255. for i in rgb_color])
| gpl-2.0 |
DomiDre/SASModels | plot_cylinder.py | 1 | 9213 | import sas_models
#import matplotlib as mpl
#mpl.use('Agg')
import stochastic_solver
import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import sys, lmfit
from scipy import constants
from mpl_toolkits.axes_grid1 import make_axes_locatable
sas_models.math.n_integration_cuts = 1
def boltzmann(x,B):
k = constants.k
mu_B = 9.274E-24
T = 298.15
B = B * 1E-3 # [T]
#mu = 7.7E3 # [J/T]
mu = 5E4 # [J/T]
Lan = B * mu*mu_B / k / T
print(Lan)
#sincos = np.sqrt(1-x**2)
nominator = Lan*np.exp(Lan*np.cos(x))
#nominator = Lan*np.exp(Lan*x)
denominator = np.exp(-Lan)*(np.exp(2.*Lan)-1.)
if B > 0:
p_langevin = nominator/denominator
else:
p_langevin = np.ones(len(x))
return p_langevin * np.sin(x)#/4/np.pi**2
def calc_langevin_dist(B, Nbins, func):
#theta = np.linspace(70,70,Nbins)/180*np.pi
theta = np.linspace(1E-3,np.pi-1E-3,Nbins)
d_theta = theta[1] - theta[0]
P = func(theta, B)
P = P/max(P)
return theta, P
def write_pixels(x,y,z):
z = z.flatten()
with open('outfile', 'w') as f:
counter = -1
for i in x:
for j in y:
counter += 1
if counter >= len(z):
break
f.writelines(str(i) + '\t' + str(j) + '\t' + str(z[counter]) + '\n')
#B_vals = [0,3,5,10,20,30,45,55,80,120,520]
#B_vals = [0,5,20,55,120,520]
B_vals = [0]
#A_vals = [0,20]
#A_vals = [0]
#fig = plt.figure()
#ax = fig.add_subplot(111)
for B in B_vals:
I0 = 0.0008
L = 4900 # Angstrom
#R = 100 # Angstrom
R = 100
beta = 0 # Rotation of magnetic field
SLDcylinder = 64.4e-6
SLDmatrix = 9.5e-6
q_yz_angle = 0.0
#sigL = 0.15
#B = 0
A = 0
# savename = str(B) + '_mT_' + str(sigL).split('.')[-1] + 'sigL_' + str(R) + '_R_' + str(int(L)) + '_L' + '_mu_E4'
if 0:
theta_array, Prob_theta = calc_langevin_dist(B, 130, boltzmann)
phi_array = np.linspace(0,2*np.pi, 50)
Prob_phi = np.ones(len(phi_array))
if 1:
theta_cos, phi_array, counts = stochastic_solver.do_stochastic(B, A)
theta_array = np.arccos(theta_cos)
Prob_theta, Prob_phi = stochastic_solver.get_dist(counts)
#theta_array = np.linspace(1,0, 99)*np.pi
Prob_theta = Prob_theta*np.sin(theta_array)
Prob_theta = Prob_theta/max(Prob_theta)
Prob_phi = Prob_phi/max(Prob_phi)
stochastic_solver.plot_2d(theta_cos, phi_array, counts)
theta_array_analy, Prob_theta_analy = calc_langevin_dist(B, 50, boltzmann)
#fig = plt.figure()
fig, ax1 = plt.subplots()
ax2 = ax1.twiny()
ax1.set_xlabel(r'Inclination Angle $\theta$')
ax1.set_ylabel(r'Probability')
ax1.plot(theta_array_analy, Prob_theta_analy, "k-", label='Analytic')
ax1.plot(theta_array, Prob_theta, "r.", label='Stochastic')
ax2.plot(phi_array, Prob_phi, "b.", label='Phi')
#plt.plot(theta_array1, Prob_theta1, "r.", label='Stochastic')
h1, l1 = ax1.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
plt.legend(h1+h2, l1+l2, loc='best')
#ax.legend(loc='best', frameon=False)
# test 1, I over q along line at angle q_yz_angle
if 0:
sigL = 0.15
savename = str(B) + '_mT_' + str(sigL).split('.')[-1] + 'sigL_' + str(R) + '_R_' + str(int(L)) + '_L' + '_mu_E4'
q = np.linspace(1e-3, 1E-1, 200)
#phi_array = np.linspace(0,2,49)*np.pi
Imodel_0 = I0 * sas_models.cylinder.formfactor(q, L, R, beta,\
SLDcylinder, SLDmatrix,\
theta_array, Prob_theta, 0,\
sigL, phi_array, Prob_phi)
Imodel_90 = I0 * sas_models.cylinder.formfactor(q, L, R, beta,\
SLDcylinder, SLDmatrix,\
theta_array, Prob_theta, 90,\
sigL, phi_array, Prob_phi)
#break
#fig = plt.figure()
#ax = fig.add_subplot(111)
labelname = str(B)
ax.plot(q, Imodel_90, label='Cylinder qz ' + labelname)
ax.plot(q, Imodel_0, label='Cylinder qy ' + labelname)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('$q \, / \, \mathrm{nm^{-1} }$')
ax.set_ylabel('$I \, / \, \mathrm{a.u.}$')
plt.legend(loc='best', frameon=False)
savename = str(B) + '_mT_' + str(sigL).split('.')[-1] + 'sigL_' + str(R) + '_R_' + str(L) + '_L_p'
plt.savefig('./Bericht_data_cyl/I_vs_q_sim/' + savename + '.png')
np.savetxt('./Bericht_data_cyl/I_vs_q_sim/' + savename + '.txt',\
np.transpose([q, Imodel_0, Imodel_90]))
plt.close()
def get_custom_cmap():
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
pos, r, g, b = item
cdict['red'].append([pos, r, r])
cdict['green'].append([pos, g, g])
cdict['blue'].append([pos, b, b])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
#Nice Coloring:
c = mcolors.ColorConverter().to_rgb
custom_colors = [(0, 0, 0, 0),\
(0.18, 0.05, 0.05, 0.2),\
(0.28, 0, 0, 1),\
(0.4, 0.7, 0.85, 0.9),\
(0.45, 0, 0.75, 0),\
(0.6, 1, 1, 0),\
(0.75, 1, 0, 0),\
(0.92 , 0.6, 0.6, 0.6),\
(1 , 0.95, 0.95, 0.95)]
custom_cmap = make_colormap(custom_colors)
custom_cmap.set_bad(color='black')
return custom_cmap
# test 2:
if 0:
sigL = 0.15
savename = str(B) + '_mT_' + str(sigL).split('.')[-1] + 'sigL_' + str(R) + '_R_' + str(int(L)) + '_L' + '_mu_E4'
pixel_number = 100
qy_min = -0.0055
qy_max = 0.0055
qz_min = -0.0055
qz_max = 0.0055
qy = np.linspace(qy_min, qy_max, pixel_number)
qz = np.linspace(qz_min, qz_max, pixel_number)
Imodel = I0*sas_models.cylinder.get_2dimage_precalculated_theta_distribution(qy, qz, L, R,\
beta, SLDcylinder, SLDmatrix, theta_array, Prob_theta,\
sigL, phi_array, Prob_phi)
fig = plt.figure()
ax = fig.add_subplot(111)
cmap = get_custom_cmap()
pcm = ax.pcolormesh(qy, qz, Imodel.T, norm=mcolors.LogNorm(), cmap=cmap, vmin = 1e4, vmax = 1e0)
ax.set_xlabel('$q_y \, / \, \AA^{-1}$')
ax.set_ylabel('$q_z \, / \, \AA^{-1}$')
ax.set_xlim(min(qy), max(qy))
ax.set_ylim(min(qz), max(qz))
ax.set_aspect('equal', adjustable='box')
divider3 = make_axes_locatable(ax)
ax.grid()
cax = divider3.append_axes('right', size="5%", pad=0.05)
cbar = fig.colorbar(pcm, cax=cax)
fig.tight_layout()
fig.savefig('./Bericht_data_cyl/2D_sim/' + savename + '.png')
data1=np.vstack((qy, qz, Imodel.T)) #.T
np.savetxt('./Bericht_data_cyl/2D_sim/' + savename + '.txt', data1, delimiter = ' ')
# #write_pixels(qy ,qz, Imodel)
lt.close()
# test 3, I over azimuth angle at qval value
if 0:
sigL = 0.15
savename = str(B) + '_mT_' + str(sigL).split('.')[-1] + 'sigL_' + str(R) + '_R_' + str(int(L)) + '_L' + '_mu_E4'
qval = 0.008
#fig = plt.figure()
#ax = fig.add_subplot(111)
angles = np.linspace(0, 360, 180)
#phi_array = phi_array[50:]
#Prob_phi = Prob_phi[50:]
Imodel = I0*sas_models.cylinder.formfactor_azimuth(angles, qval, L, R, beta,\
SLDcylinder, SLDmatrix, theta_array, Prob_theta,\
sigL, phi_array, Prob_phi)#, Prob_phi)
labelname = str(B) + '_mT'
pcm = ax.plot(angles, Imodel,label=labelname)
ax.set_xlabel(r'$\vartheta \, / \, ^\circ$')
ax.set_ylabel('$\mathit{I} \, / \, a.u.$')
ax.set_xlim(min(angles), max(angles))
ax.legend(loc='best', frameon=False)
fig.tight_layout()
fig.savefig('./Bericht_data_cyl/I_vs_angle_sim/' + savename + '.png')
np.savetxt('./Bericht_data_cyl/I_vs_angle_sim/' + savename + '.txt',\
np.transpose([angles, Imodel]))
plt.close()
plt.show()
# Export data
#data1=np.vstack((qy, qz, Imodel.T)) #.T
#np.savetxt('no_dist_test.dat', data1, delimiter = ' ')
| gpl-3.0 |
francesco-mannella/neunet-basics | course/perceptron_training_animation.py | 1 | 4422 | # The matplotlib object to do animations
from pylab import *
from utils import *
from matplotlib import animation
# This grid allows to layout subplots in a more
# flexible way
import matplotlib.gridspec as gridspec
class AnimatePerceptron :
def __init__(self, input_store, output_store, label_store):
self.input_store = input_store
self.output_store = output_store
self.label_store = label_store
self.m, self.trials = self.output_store.shape
# Initialize the figure for the animation
# target_index : int The index of the target
# to plot
# returns : tuple The three plotting objects
# to render
def init_fig(self, timestep = 0, plot_error = True) :
# This is the input digit
input_digit = self.input_store[:,timestep]
# This is the output of the network
# (10-elements vector)
output = self.output_store[:,timestep]
# This is the target of the network
# (10-elements vector)
target = self.label_store[:,timestep]
m = self.output_store.shape[0]
trials = self.output_store.shape[1]
# Init the grid and the figure
gs = gridspec.GridSpec(8, 24)
self.fig = figure(figsize=(10, 4.5))
#-------------------------------------------------
# Plot 1 - plot the input digit
# Create subplot
ax1 = self.fig.add_subplot(gs[:4,:4])
title("input")
# Create the imshow and save the handler
self.im_input = ax1.imshow(to_mat(input_digit),
interpolation = 'none',
aspect = 'auto',
cmap = cm.binary)
# Further plot specs
axis('off')
#-------------------------------------------------
# Plot 2 - plot the current state of the network
# Create subplot
ax2 = self.fig.add_subplot(gs[:4,6:])
title("output vector")
# Create the imshow and save the handler
self.im_output = ax2.bar(arange(m), output,
width=.7, color="blue", align="center")
self.im_target = ax2.bar(arange(m)-.5, target,
width=.3, color="red", align="center")
# Only bottom axes
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax2.set_xticks(arange(m)+.5)
ax2.set_xticklabels(arange(m))
ax2.set_yticks([])
# Further plot specs
ylim([-.5,1])
# Return the handlers
return (self.fig, self.im_input,
self.im_output, self.im_target)
# Updates images at each frame of the animation
# data : list of tuples Each row contains the
# arguments of update for
# a frame
# returns : tuple The handlers of the
# images
def update(self, data) :
# Unpack data
input_,output_,target_ = data
# Update data of plot1, 2 and 3
self.im_input.set_array(to_mat(input_))
# set data of the 2nd plot
# (change height of each bar)
for rect, h in zip(self.im_output, output_ ) :
rect.set_height(h)
# set data of the 3nd plot
# (change height of each bar)
for rect, h in zip(self.im_target, target_ ) :
rect.set_height(h)
# Return the handlers
return (self.im_input, self.im_output,
self.im_target)
def training_animation(self):
# Our function to render videos inline
# (see anim_to_html.py)
import anim_to_html as AH
# The first pattern
# initialize the figure
self.init_fig()
# We use a generator (see https://goo.gl/ekU3u2) to Build
# the sequence of update arguments for the three plots.
data = [ ( self.input_store[:,t], self.output_store[:,t], self.label_store[:,t] )
for t in xrange(self.trials) ]
# Create and render the animation
anim = animation.FuncAnimation(self.fig, self.update, data, blit=True)
return AH.display_animation(anim, filename="mnist-perceptron-training.gif")
| mit |
terkkila/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
MaartenGr/BERTopic | tests/test_other.py | 1 | 1097 | """
Unit Tests of uncategorized functions/features
These tests are those that could not easily be categorized
into one of the other test_XXX.py files.
"""
from sklearn.datasets import fetch_20newsgroups
from bertopic import BERTopic
newsgroup_docs = fetch_20newsgroups(subset='all')['data'][:1000]
def test_load_save_model():
""" Check if the model is correctly saved """
model = BERTopic(language="Dutch", embedding_model=None)
model.save("test")
loaded_model = BERTopic.load("test")
assert type(model) == type(loaded_model)
assert model.language == loaded_model.language
assert model.embedding_model == loaded_model.embedding_model
assert model.top_n_words == loaded_model.top_n_words
def test_get_params():
""" Test if parameters could be extracted """
model = BERTopic()
params = model.get_params()
assert not params["embedding_model"]
assert not params["low_memory"]
assert not params["nr_topics"]
assert params["n_gram_range"] == (1, 1)
assert params["min_topic_size"] == 10
assert params["language"] == 'english'
| mit |
rrohan/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 11 | 39569 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2*ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=1,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less( mse, 6.0 )
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0, max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
abitofalchemy/ScientificImpactPrediction | NetAnalysis/proj_json_graph.py | 1 | 4984 | import json, io
import pandas as pd
import numpy as np
import string, re
import networkx as nx
from pprint import pprint
from collections import defaultdict
from nltk.tokenize import RegexpTokenizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
global clust, df
verbose = False
def drop_duplicates(doclist):
df = pd.DataFrame(doclist)
df = df.drop_duplicates()
return df
def jacc_coeff(words_set_a, words_set_b):
if len(words_set_a) <1 or len(words_set_b) < 1:
return
a = frozenset((w for w in words_set_a))
b = frozenset((w for w in words_set_b))
jaccardcoefficent = (float(len(a & b)) / float(len (a|b)))
return jaccardcoefficent
def average_jacc_dist(ref_doc, to_docs_arr):
jac_vec = []
for tid in to_docs_arr:
#print df['twdoc'][df.index==int(tid)].values[0]
jac_vec.append(jacc_coeff(ref_doc,df['twdoc'][df.index==int(tid)].values[0]))
return np.mean(jac_vec)
def cluster(tid, doc):
jacc_dist_vec = []
for sid in seed_id_lst:
centroid_arr = clust[sid]
if len(centroid_arr) > 1:
jacc_dist_vec.append(average_jacc_dist(doc,centroid_arr))
else:
#print df['twdoc'][df.index==int(sid)].values[0]
seed_doc = df['twdoc'][df.index==int(sid)].values[0]
jacc_dist_vec.append(jacc_coeff(doc,seed_doc))
max_jacc_val = max(jacc_dist_vec)
sid= seed_id_lst[jacc_dist_vec.index(max_jacc_val)]
return sid
# << Begin >>
#given_json_file = "Tweets.json"
given_text_file = "../../data_collection/tweets.json"
#given_text_file = "datasets/isomorphism_quasipolynomial.json"
given_text_file = "../../data_collection/tweets.json"
given_text_file = "../datasets/iso_morph_laszlo_babai_apollo.json"
# reads in the JSON file
if 0:
with open(given_text_file) as f:
for line in f:
a = json.dumps(json.JSONEncoder().encode(line.strip('\n')))
print type(json.loads(a))
b = json.loads(line)
print type(b)
break
data_json = io.open(given_text_file, mode='r', encoding='utf-8').read()
raw_tweets=data_json.splitlines()
print 'read json lines into a list.'
docs =[]
tids =[]
links = {}
tweet_d = dict()
ucitesc = {}
k = 0
for d in raw_tweets:
k +=1
if not len(d): continue
tw_d = json.loads(d.rstrip('\r\n'))
tt = str(tw_d[u'text'].encode("utf-8"))
print tt
lnks = re.findall(r'(https?://\S+)', tt) # re.search("(?P<url>https?://[^\s]+)", tt)
if len(lnks): # dropping links from tweet string
links[tw_d['id']] = lnks
for l in lnks:
print '>',tt
print 'removing', l
tt= tt.strip(str(l))
print ':',tt
if not len(tt): # continue if len is 0
continue
tt = tt.translate(None, string.punctuation)
tt = tt.split() # tokenize
tokens = [word for word in tt if word not in stopwords.words('english')]
docs.append( tokens )
tweet_d[tw_d[u'id']] = tokens # a dict with tweet id as the key and the filtered words as the values
tids.append( tw_d[u'id'] )
ucitesc[tw_d['user']['id']] = tw_d['id']
# ##
#pprint (docs)
# fwvec: vec of filtered tokens
# tw_dic with keys as the tweet ID and a list of tweet ids
# Now, do we cluster or do we build a graph?
#
# docs tokenized tweets
# Dataset stats
print '~'*20
print '# of tweets', len(docs)
print '# of linkss', len(links)
print '# size of d', len(tweet_d)
doc_vecs = drop_duplicates(docs).values
print np.shape(doc_vecs)
print '~'*20
#for doc in docs:
# print doc
# break
#pprint (ucitesc)
#print len(ucitesc)
#print
#
#for k,v in ucitesc.items():
# if v in links.keys():
# print tweet_d[v]
#
# print links[v]
# break
# # #
g = nx.Graph()
for k,v in ucitesc.items():
g.add_edge(k,v)
print g.number_of_nodes()
print g.number_of_edges()
print nx.is_connected(g)
print 'NC',nx.number_connected_components(g)
#print list(nx.connected_component_subgraphs(g)) #max(nx.connected_component_subgraphs(g), key=len))
nx.draw_networkx(g,with_labels=False,font_size=8,node_size=20, alpha=0.75)
plt.savefig('output.pdf')
exit()
ix = tids.pop()
di = tw_dict.pop(ix)
Clusters = defaultdict(list)
Clusters[0] = {ix: di}
i = 0
while tids:
jx = tids.pop()
dj = tw_dict.pop(jx)
jv_for_cluster = []
jv = 0
for c in Clusters.values():
# Jaccard distance 1 - jaccard index
jacc_vec = [jacc_coeff(dj, ixd[1]) for ixd in c.items()]
jv = [1.0-x for x in jacc_vec]
if len(jv)>1:
print np.mean(jv)
jv_for_cluster.append([np.mean(jv)])
else:
jv_for_cluster.append(jv)
min_jv = np.min(jv_for_cluster)
if (len(Clusters) <= 25) and (min_jv > 0.):
Clusters[len(Clusters)] = {jx: dj}
else: #if (len(Clusters) <= 25) and (jv > 0.):
loci = jv_for_cluster.index(np.min(jv_for_cluster))
elems = Clusters[loci]
elems[jx] = dj
Clusters[loci] = elems
with open('kmeans_tw_clust.tsv', 'w') as f:
for k,v in Clusters.items():
[f.write('{0}\t{1}'.format(k+1, value)) for value in v.keys()]
f.write('\n')
| mit |
Fireblend/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 244 | 1593 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
dwhswenson/contact_map | contact_map/contact_count.py | 1 | 14734 | import collections
import scipy
import numpy as np
import pandas as pd
import warnings
from .plot_utils import ranged_colorbar, make_x_y_ranges, is_cmap_diverging
# matplotlib is technically optional, but required for plotting
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
HAS_MATPLOTLIB = False
else:
HAS_MATPLOTLIB = True
try:
import networkx as nx
except ImportError:
HAS_NETWORKX = False
else:
HAS_NETWORKX = True
# pandas 0.25 not available on py27; can drop this when we drop py27
_PD_VERSION = tuple(int(x) for x in pd.__version__.split('.')[:2])
def _colorbar(with_colorbar, cmap_f, norm, min_val, ax=None):
if with_colorbar is False:
return None
elif with_colorbar is True:
cbmin = np.floor(min_val) # [-1.0..0.0] => -1; [0.0..1.0] => 0
cbmax = 1.0
cb = ranged_colorbar(cmap_f, norm, cbmin, cbmax, ax=ax)
# leave open other inputs to be parsed later (like tuples)
return cb
# TODO: remove following: this is a monkeypatch for a bug in pandas
# see: https://github.com/pandas-dev/pandas/issues/29814
from pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex
def _patch_from_spmatrix(cls, data): # -no-cov-
length, ncol = data.shape
if ncol != 1:
raise ValueError("'data' must have a single column, not '{}'".format(ncol))
# our sparse index classes require that the positions be strictly
# increasing. So we need to sort loc, and arr accordingly.
arr = data.data
#idx, _ = data.nonzero()
idx = data.indices
loc = np.argsort(idx)
arr = arr.take(loc)
idx.sort()
zero = np.array(0, dtype=arr.dtype).item()
dtype = pd.SparseDtype(arr.dtype, zero)
index = IntIndex(length, idx)
return cls._simple_new(arr, index, dtype)
if _PD_VERSION >= (0, 25):
pd.core.arrays.SparseArray.from_spmatrix = classmethod(_patch_from_spmatrix)
# TODO: this is the end of what to remove when pandas is fixed
def _get_total_counter_range(counter):
numbers = [i for key in counter.keys() for i in key]
if len(numbers) == 0:
return (0, 0)
return (min(numbers), max(numbers)+1)
class ContactCount(object):
"""Return object when dealing with contacts (residue or atom).
This contains all the information about the contacts of a given type.
This information can be represented several ways. One is as a list of
contact pairs, each associated with the fraction of time the contact
occurs. Another is as a matrix, where the rows and columns label the
pair number, and the value is the fraction of time. This class provides
several methods to get different representations of this data for
further analysis.
In general, instances of this class shouldn't be created by a user using
``__init__``; instead, they will be returned by other methods. So users
will often need to use this object for analysis.
Parameters
----------
counter : :class:`collections.Counter`
the counter describing the count of how often the contact occurred;
key is a frozenset of a pair of numbers (identifying the
atoms/residues); value is the raw count of the number of times it
occurred
object_f : callable
method to obtain the object associated with the number used in
``counter``; typically :meth:`mdtraj.Topology.residue` or
:meth:`mdtraj.Topology.atom`.
n_x : int, tuple(start, end), optional
range of objects in the x direction (used in plotting)
Default tries to plot the least amount of symetric points.
n_y : int, tuple(start, end), optional
range of objects in the y direction (used in plotting)
Default tries to show the least amount of symetric points.
max_size : int, optional
maximum size of the count
(used to determine the shape of output matrices and dataframes)
"""
def __init__(self, counter, object_f, n_x=None, n_y=None, max_size=None):
self._counter = counter
self._object_f = object_f
self.total_range = _get_total_counter_range(counter)
self.n_x, self.n_y = make_x_y_ranges(n_x, n_y, counter)
if max_size is None:
self.max_size = max([self.total_range[-1],
self.n_x.max,
self.n_y.max])
else:
self.max_size = max_size
@property
def counter(self):
"""
:class:`collections.Counter` :
keys use index number; count is contact occurrences
"""
return self._counter
@property
def sparse_matrix(self):
"""
:class:`scipy.sparse.dok.dok_matrix` :
sparse matrix representation of contacts
Rows/columns correspond to indices and the values correspond to
the count
"""
max_size = self.max_size
mtx = scipy.sparse.dok_matrix((max_size, max_size))
for (k, v) in self._counter.items():
key = list(k)
mtx[key[0], key[1]] = v
mtx[key[1], key[0]] = v
return mtx
@property
def df(self):
"""
:class:`pandas.SparseDataFrame` :
DataFrame representation of the contact matrix
Rows/columns correspond to indices and the values correspond to
the count
"""
mtx = self.sparse_matrix
index = list(range(self.max_size))
columns = list(range(self.max_size))
if _PD_VERSION < (0, 25): # py27 only -no-cov-
mtx = mtx.tocoo()
return pd.SparseDataFrame(mtx, index=index, columns=columns)
df = pd.DataFrame.sparse.from_spmatrix(mtx, index=index,
columns=columns)
# note: I think we can always use float here for dtype; but in
# principle maybe we need to inspect and get the internal type?
# Problem is, pandas technically stores a different dtype for each
# column.
df = df.astype(pd.SparseDtype("float", np.nan))
return df
def to_networkx(self, weighted=True, as_index=False, graph=None):
"""Graph representation of contacts (requires networkx)
Parameters
----------
weighted : bool
whether to use the frequencies as edge weights in the graph,
default True
as_index : bool
if True, the nodes in the graph are integer indices; if False
(default), the nodes are mdtraj.topology objects (Atom/Residue)
graph : networkx.Graph or None
if provided, edges are added to an existing graph
Returns
-------
networkx.Graph :
graph representation of the contact matrix
"""
if not HAS_NETWORKX: # -no-cov-
raise RuntimeError("Error importing networkx")
graph = nx.Graph() if graph is None else graph
for pair, value in self.counter.items():
if not as_index:
pair = map(self._object_f, pair)
attr_dict = {'weight': value} if weighted else {}
graph.add_edge(*pair, **attr_dict)
return graph
def _check_number_of_pixels(self, figure):
"""
This checks to see if the number of pixels in the figure is high enough
to accuratly represent the the contact map. It raises a RuntimeWarning
if this is not the case.
Parameters
----------
figure: :class:`matplotlib.Figure`
matplotlib figure to compare the amount of pixels from
"""
# Get dpi, and total pixelswidht and pixelheight
dpi = figure.get_dpi()
figwidth = figure.get_figwidth()
figheight = figure.get_figheight()
xpixels = dpi*figwidth
ypixels = dpi*figheight
# Check if every value has a pixel
if (xpixels/self.n_x.range_length < 1 or
ypixels/self.n_y.range_length < 1):
msg = ("The number of pixels in the figure is insufficient to show"
" all the contacts.\n Please save this as a vector image "
"(such as a PDF) to view the correct result.\n Another "
"option is to increase the 'dpi' (currently: "+str(dpi)+"),"
" or the 'figsize' (currently: " + str((figwidth,
figheight)) +
").\n Recommended minimum amount of pixels = "
+ str((self.n_x.range_length,
self.n_y.range_length))
+ " (width, height).")
warnings.warn(msg, RuntimeWarning)
def plot(self, cmap='seismic', diverging_cmap=None, with_colorbar=True,
**kwargs):
"""
Plot contact matrix (requires matplotlib)
Parameters
----------
cmap : str
color map name, default 'seismic'
diverging_cmap : bool
Whether the given color map is treated as diverging (if
``True``) or sequential (if False). If a color map is diverging
and all data is positive, only the upper half of the color map
is used. Default (None) will give correct results if ``cmap`` is
the string name of a known sequential or diverging matplotlib
color map and will treat as sequential if unknown.
with_colorbar: bool
Whether to include a color bar legend.
**kwargs
All additional keyword arguments to be passed to the
:func:`matplotlib.pyplot.subplots` call
Returns
-------
fig : :class:`matplotlib.Figure`
matplotlib figure object for this plot
ax : :class:`matplotlib.Axes`
matplotlib axes object for this plot
"""
if not HAS_MATPLOTLIB: # pragma: no cover
raise RuntimeError("Error importing matplotlib")
fig, ax = plt.subplots(**kwargs)
# Check the number of pixels of the figure
self._check_number_of_pixels(fig)
self.plot_axes(ax=ax, cmap=cmap, diverging_cmap=diverging_cmap,
with_colorbar=with_colorbar)
return (fig, ax)
def plot_axes(self, ax, cmap='seismic', diverging_cmap=None,
with_colorbar=True):
"""
Plot contact matrix on a matplotlib.axes
Parameters
----------
ax : matplotlib.axes
axes to plot the contact matrix on
cmap : str
color map name, default 'seismic'
diverging_cmap : bool
If True, color map interpolation is from -1.0 to 1.0; allowing
diverging color maps to be used for contact maps and contact
differences. If false, the range is from 0 to 1.0. Default value
of None selects a value based on the value of cmap, treating as
False for unknown color maps.
with_colorbar : bool
If a colorbar is added to the axes
"""
if diverging_cmap is None:
diverging_cmap = is_cmap_diverging(cmap)
vmin, vmax = (-1, 1) if diverging_cmap else (0, 1)
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
cmap_f = plt.get_cmap(cmap)
ax.axis([self.n_x.min, self.n_x.max, self.n_y.min, self.n_y.max])
ax.set_facecolor(cmap_f(norm(0.0)))
min_val = 0.0
for (pair, value) in self.counter.items():
if value < min_val:
min_val = value
pair_list = list(pair)
patch_0 = matplotlib.patches.Rectangle(
pair_list, 1, 1,
facecolor=cmap_f(norm(value)),
linewidth=0
)
patch_1 = matplotlib.patches.Rectangle(
(pair_list[1], pair_list[0]), 1, 1,
facecolor=cmap_f(norm(value)),
linewidth=0
)
ax.add_patch(patch_0)
ax.add_patch(patch_1)
_colorbar(with_colorbar, cmap_f, norm, min_val, ax=ax)
def most_common(self, obj=None):
"""
Most common values (ordered) with object as keys.
This uses the objects for the contact pair (typically MDTraj
``Atom`` or ``Residue`` objects), instead of numeric indices. This
is more readable and can be easily used for further manipulation.
Parameters
----------
obj : MDTraj Atom or Residue
if given, the return value only has entries including this
object (allowing one to, for example, get the most common
contacts with a specific residue)
Returns
-------
list :
the most common contacts in order. If the list is ``l``, then
each element ``l[e]`` is a tuple with two parts: ``l[e][0]`` is
the key, which is a pair of Atom or Residue objects, and
``l[e][1]`` is the count of how often that contact occurred.
See also
--------
most_common_idx : same thing, using index numbers as key
"""
if obj is None:
result = [
([self._object_f(idx) for idx in common[0]], common[1])
for common in self.most_common_idx()
]
else:
obj_idx = obj.index
result = [
([self._object_f(idx) for idx in common[0]], common[1])
for common in self.most_common_idx()
if obj_idx in common[0]
]
return result
def most_common_idx(self):
"""
Most common values (ordered) with indices as keys.
Returns
-------
list :
the most common contacts in order. The if the list is ``l``,
then each element ``l[e]`` consists of two parts: ``l[e][0]`` is
a pair of integers, representing the indices of the objects
associated with the contact, and ``l[e][1]`` is the count of how
often that contact occurred
See also
--------
most_common : same thing, using objects as key
"""
return self._counter.most_common()
def filter(self, idx):
"""New ContactCount filtered to idx.
Returns a new ContactCount with the only the counter keys/values
where both the keys are in idx
"""
dct = {k: v for k, v in self._counter.items()
if all([i in idx for i in k])}
new_count = collections.Counter()
new_count.update(dct)
return ContactCount(new_count, self._object_f, self.n_x, self.n_y)
| lgpl-2.1 |
seadsystem/Backend | Analysis and Classification/Analysis/Code/Vince's_Code/Time_Error_Analysis/Time_Analysis.py | 1 | 6656 | #/usr/bin/python
##
# Analysis.py
# Author: Vincent Steffens
# Email: [email protected]
# Date: 7 November 2014
#
# This script is intended to perform exploratory analysis on amperage
# data provided by the SEAD plug. This script takes as input one
# filename in the current directory.
#
# =================================================================== #
# Outline #
# =================================================================== #
#
# Section I: Read and Prepare the Data
#
# 1. Open the file for reading
# 2. Read the lines of the file into two arrays, one for amperage
# data and one for time data.
# 3. Scale the amperage data to produce an array of milliamp values.
# 4. Subtract the earliest time from each timestamp to produce a
# timescale somewhat more intellectually manageable.
# 5. Use fft to produce a numerical spectrogram; a 2-D array in which
# one dimension is time and the other, frequency. The elements are
# intensity values.
#
# Section II: Perform Spectral Analysis
#
# We're not just looking to take the FT. We need a power spectrum,
# which is defined as
# power_spectrum(f(t)) = |FT(f(t)|^2
#
# Section II: Extract Statistical Information
#
# 1. For each frequency bin, sum find the mean and standard deviation.
# 2. Produce a "mean power spectrum", and visualize this.
# -Note: This is a good stopping point for this week
#
# Section III: Produce a Signature
# 1. Pick peaks, find mean and standard deviation for frequency
# domain.
# 2. Construct an ADT object that holds these values.
#
#
# =================================================================== #
# Options #
# An option must be given as the first argument. #
# =================================================================== #
#
# -d: Debug mode
# -f: Fragmented data mode
#
# =================================================================== #
# Return Values #
# =================================================================== #
#
# -1: Too few arguments
# -2: Too many arguments
# -3: Possible misplaced option
# -4: File does not exist!
#
# =================================================================== #
# Questions #
# =================================================================== #
#
# Questions relevant to the data
# 1. How accurate is the sampling rate? What is its deviation?
# 2. What's a good number of bins for the FFT?
#
# =================================================================== #
# TO DO #
# =================================================================== #
#
# 1. Enable the program to handle any number of arguments
##
#For numerical analysis
import numpy as np
#For visualization
import matplotlib.pyplot as pyp
#For manipulating command-line arguments
import sys
#For handling files
import os.path as op
#For using regular expressions
import re
#Check for proper number of arguments, die if necessary
#max_args should be the sum of: 1 for program name, the number of
#options, 1 for the input file name
max_args = 4
if len(sys.argv) < 2:
print "Usage Message A: <program name> [option] <input file name>"
sys.exit(-1)
if len(sys.argv) > max_args:
print "Usage Message B: <program name> [option] <input file name>"
sys.exit(-2)
#Check for options, set flags, die if necessary
debug = 0
fragmented_data = 0
flags_set = 0
flag_regex = re.compile('([-][df])')
for i in xrange(1, len(sys.argv) - 1):
option = flag_regex.match(sys.argv[i])
if option == None:
print "Usage Message C: <program name> [option] <input file name>"
sys.exit(-3)
if option.group() == '-d':
debug = 1
flags_set = flags_set + 1
elif option.group() == '-f':
fragmented_data = 1
flags_set = flags_set + 1
#Loop through the arguments (for my own benefit)
if debug == 1:
for i in xrange(0, len(sys.argv)):
print "Argument ", i, ": ", sys.argv[i]
#Try to open source file for reading
filename = sys.argv[1 + flags_set]
if op.isfile(filename):
with open(filename) as f:
content = [x.strip('\n') for x in f.readlines()]
else:
print "Analysis: file does not exist: ", filename
sys.exit(-4)
#Parse the strings and put each datum into its own list
Currents = []
Times = []
regexpr = re.compile('\A([-]*[0-9][0-9]*)[,]([0-9][0-9]*)')
for i in xrange(0, len(content)):
result = regexpr.match(content[i])
Currents.append(result.group(1))
Times.append(result.group(2))
#Convert time since Unix epoch to intervals in milliseconds
#Convert currents to milliamps
first = Times[0]
Times = [ int(x) - int(Times[0]) for x in Times ]
Currents = [ 27*float(x)/1000 for x in Currents ]
#For looking at the data in columns
if debug == 1:
print "debug marker"
d_current = []
d_time = []
#take first-order finite difference
for i in xrange(0, len(Currents) - 1):
d_time.append(Times[i + 1] - Times[i])
d_current.append(Currents[i + 1] - Currents[i])
#Print nicely
skipped_time = 0
spent_time = 0
for i in xrange(0, len(d_current)):
output_string = "Item "
if d_time[i] > 417:
output_string += str(i) + ":\t" + str(d_time[i])
skipped_time += d_time[i]
if d_time[i] <= 417:
spent_time += d_time[i]
# output_string += str(i) + ":\t" + str(d_time[i]) + "\t"
# if Currents[i] >= 0:
# output_string += " "
# output_string += str(Currents[i])
print output_string
print "Skipped time:", skipped_time
print "Spent time:", spent_time
#plot currents
pyp.plot(d_current)
pyp.ylabel("di")
pyp.xlabel("dt")
# pyp.show()
#Find spent time, skipped time, and proportion. Print each.
d_time = []
#take first-order finite difference
for i in xrange(0, len(Times) - 1):
d_time.append(Times[i + 1] - Times[i])
#calculate
skipped_time = spent_time = 0.0
for i in xrange(0, len(d_time)):
output_string = "Item "
if d_time[i] > 417:
skipped_time += d_time[i]
if d_time[i] <= 417:
spent_time += d_time[i]
spent_time *= 10**-6
skipped_time *= 10**-6
total_time = skipped_time + spent_time
proportion_skipped_to_total = float(skipped_time)/total_time
print "Skipped time in seconds:", skipped_time
print "Spent time:", spent_time
print "Elapsed time:", total_time
print "Proportion of skipped to total:", proportion_skipped_to_total
| mit |
robert-digit/superset | tests/celery_tests.py | 8 | 11738 | """Unit tests for Superset Celery worker"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import subprocess
import time
import unittest
from past.builtins import basestring
import pandas as pd
from superset import app, appbuilder, cli, db, dataframe
from superset.models import core as models
from superset.models.helpers import QueryStatus
from superset.models.sql_lab import Query
from superset.security import sync_role_definitions
from superset.sql_parse import SupersetQuery
from .base_tests import SupersetTestCase
BASE_DIR = app.config.get('BASE_DIR')
class CeleryConfig(object):
BROKER_URL = 'sqla+sqlite:///' + app.config.get('SQL_CELERY_DB_FILE_PATH')
CELERY_IMPORTS = ('superset.sql_lab', )
CELERY_RESULT_BACKEND = 'db+sqlite:///' + app.config.get('SQL_CELERY_RESULTS_DB_FILE_PATH')
CELERY_ANNOTATIONS = {'sql_lab.add': {'rate_limit': '10/s'}}
CONCURRENCY = 1
app.config['CELERY_CONFIG'] = CeleryConfig
class UtilityFunctionTests(SupersetTestCase):
# TODO(bkyryliuk): support more cases in CTA function.
def test_create_table_as(self):
q = SupersetQuery("SELECT * FROM outer_space;")
self.assertEqual(
"CREATE TABLE tmp AS \nSELECT * FROM outer_space",
q.as_create_table("tmp"))
self.assertEqual(
"DROP TABLE IF EXISTS tmp;\n"
"CREATE TABLE tmp AS \nSELECT * FROM outer_space",
q.as_create_table("tmp", overwrite=True))
# now without a semicolon
q = SupersetQuery("SELECT * FROM outer_space")
self.assertEqual(
"CREATE TABLE tmp AS \nSELECT * FROM outer_space",
q.as_create_table("tmp"))
# now a multi-line query
multi_line_query = (
"SELECT * FROM planets WHERE\n"
"Luke_Father = 'Darth Vader'")
q = SupersetQuery(multi_line_query)
self.assertEqual(
"CREATE TABLE tmp AS \nSELECT * FROM planets WHERE\n"
"Luke_Father = 'Darth Vader'",
q.as_create_table("tmp")
)
class CeleryTestCase(SupersetTestCase):
def __init__(self, *args, **kwargs):
super(CeleryTestCase, self).__init__(*args, **kwargs)
self.client = app.test_client()
def get_query_by_name(self, sql):
session = db.session
query = session.query(Query).filter_by(sql=sql).first()
session.close()
return query
def get_query_by_id(self, id):
session = db.session
query = session.query(Query).filter_by(id=id).first()
session.close()
return query
@classmethod
def setUpClass(cls):
try:
os.remove(app.config.get('SQL_CELERY_DB_FILE_PATH'))
except OSError as e:
app.logger.warn(str(e))
try:
os.remove(app.config.get('SQL_CELERY_RESULTS_DB_FILE_PATH'))
except OSError as e:
app.logger.warn(str(e))
sync_role_definitions()
worker_command = BASE_DIR + '/bin/superset worker'
subprocess.Popen(
worker_command, shell=True, stdout=subprocess.PIPE)
admin = appbuilder.sm.find_user('admin')
if not admin:
appbuilder.sm.add_user(
'admin', 'admin', ' user', '[email protected]',
appbuilder.sm.find_role('Admin'),
password='general')
cli.load_examples(load_test_data=True)
@classmethod
def tearDownClass(cls):
subprocess.call(
"ps auxww | grep 'celeryd' | awk '{print $2}' | xargs kill -9",
shell=True
)
subprocess.call(
"ps auxww | grep 'superset worker' | awk '{print $2}' | "
"xargs kill -9",
shell=True
)
def run_sql(self, db_id, sql, client_id, cta='false', tmp_table='tmp',
async='false'):
self.login()
resp = self.client.post(
'/superset/sql_json/',
data=dict(
database_id=db_id,
sql=sql,
async=async,
select_as_cta=cta,
tmp_table_name=tmp_table,
client_id=client_id,
),
)
self.logout()
return json.loads(resp.data.decode('utf-8'))
def test_add_limit_to_the_query(self):
session = db.session
main_db = self.get_main_database(db.session)
eng = main_db.get_sqla_engine()
select_query = "SELECT * FROM outer_space;"
updated_select_query = main_db.wrap_sql_limit(select_query, 100)
# Different DB engines have their own spacing while compiling
# the queries, that's why ' '.join(query.split()) is used.
# In addition some of the engines do not include OFFSET 0.
self.assertTrue(
"SELECT * FROM (SELECT * FROM outer_space;) AS inner_qry "
"LIMIT 100" in ' '.join(updated_select_query.split())
)
select_query_no_semicolon = "SELECT * FROM outer_space"
updated_select_query_no_semicolon = main_db.wrap_sql_limit(
select_query_no_semicolon, 100)
self.assertTrue(
"SELECT * FROM (SELECT * FROM outer_space) AS inner_qry "
"LIMIT 100" in
' '.join(updated_select_query_no_semicolon.split())
)
multi_line_query = (
"SELECT * FROM planets WHERE\n Luke_Father = 'Darth Vader';"
)
updated_multi_line_query = main_db.wrap_sql_limit(multi_line_query, 100)
self.assertTrue(
"SELECT * FROM (SELECT * FROM planets WHERE "
"Luke_Father = 'Darth Vader';) AS inner_qry LIMIT 100" in
' '.join(updated_multi_line_query.split())
)
def test_run_sync_query_dont_exist(self):
main_db = self.get_main_database(db.session)
db_id = main_db.id
sql_dont_exist = 'SELECT name FROM table_dont_exist'
result1 = self.run_sql(db_id, sql_dont_exist, "1", cta='true')
self.assertTrue('error' in result1)
def test_run_sync_query_cta(self):
main_db = self.get_main_database(db.session)
db_id = main_db.id
eng = main_db.get_sqla_engine()
perm_name = 'can_sql_json'
sql_where = (
"SELECT name FROM ab_permission WHERE name='{}'".format(perm_name))
result2 = self.run_sql(
db_id, sql_where, "2", tmp_table='tmp_table_2', cta='true')
self.assertEqual(QueryStatus.SUCCESS, result2['query']['state'])
self.assertEqual([], result2['data'])
self.assertEqual([], result2['columns'])
query2 = self.get_query_by_id(result2['query']['serverId'])
# Check the data in the tmp table.
df2 = pd.read_sql_query(sql=query2.select_sql, con=eng)
data2 = df2.to_dict(orient='records')
self.assertEqual([{'name': perm_name}], data2)
def test_run_sync_query_cta_no_data(self):
main_db = self.get_main_database(db.session)
db_id = main_db.id
sql_empty_result = 'SELECT * FROM ab_user WHERE id=666'
result3 = self.run_sql(
db_id, sql_empty_result, "3", tmp_table='tmp_table_3', cta='true')
self.assertEqual(QueryStatus.SUCCESS, result3['query']['state'])
self.assertEqual([], result3['data'])
self.assertEqual([], result3['columns'])
query3 = self.get_query_by_id(result3['query']['serverId'])
self.assertEqual(QueryStatus.SUCCESS, query3.status)
def test_run_async_query(self):
main_db = self.get_main_database(db.session)
eng = main_db.get_sqla_engine()
sql_where = "SELECT name FROM ab_role WHERE name='Admin'"
result = self.run_sql(
main_db.id, sql_where, "4", async='true', tmp_table='tmp_async_1',
cta='true')
assert result['query']['state'] in (
QueryStatus.PENDING, QueryStatus.RUNNING, QueryStatus.SUCCESS)
time.sleep(1)
query = self.get_query_by_id(result['query']['serverId'])
df = pd.read_sql_query(query.select_sql, con=eng)
self.assertEqual(QueryStatus.SUCCESS, query.status)
self.assertEqual([{'name': 'Admin'}], df.to_dict(orient='records'))
self.assertEqual(QueryStatus.SUCCESS, query.status)
self.assertTrue("FROM tmp_async_1" in query.select_sql)
self.assertTrue("LIMIT 666" in query.select_sql)
self.assertEqual(
"CREATE TABLE tmp_async_1 AS \nSELECT name FROM ab_role "
"WHERE name='Admin'", query.executed_sql)
self.assertEqual(sql_where, query.sql)
self.assertEqual(0, query.rows)
self.assertEqual(666, query.limit)
self.assertEqual(False, query.limit_used)
self.assertEqual(True, query.select_as_cta)
self.assertEqual(True, query.select_as_cta_used)
@staticmethod
def de_unicode_dict(d):
def str_if_basestring(o):
if isinstance(o, basestring):
return str(o)
return o
return {str_if_basestring(k): str_if_basestring(d[k]) for k in d}
@classmethod
def dictify_list_of_dicts(cls, l, k):
return {str(o[k]): cls.de_unicode_dict(o) for o in l}
def test_get_columns(self):
main_db = self.get_main_database(db.session)
df = main_db.get_df("SELECT * FROM multiformat_time_series", None)
cdf = dataframe.SupersetDataFrame(df)
# Making ordering non-deterministic
cols = self.dictify_list_of_dicts(cdf.columns, 'name')
if main_db.sqlalchemy_uri.startswith('sqlite'):
self.assertEqual(self.dictify_list_of_dicts([
{'is_date': True, 'type': 'STRING', 'name': 'ds',
'is_dim': False},
{'is_date': True, 'type': 'STRING', 'name': 'ds2',
'is_dim': False},
{'agg': 'sum', 'is_date': False, 'type': 'INT',
'name': 'epoch_ms', 'is_dim': False},
{'agg': 'sum', 'is_date': False, 'type': 'INT',
'name': 'epoch_s', 'is_dim': False},
{'is_date': True, 'type': 'STRING', 'name': 'string0',
'is_dim': False},
{'is_date': False, 'type': 'STRING',
'name': 'string1', 'is_dim': True},
{'is_date': True, 'type': 'STRING', 'name': 'string2',
'is_dim': False},
{'is_date': False, 'type': 'STRING',
'name': 'string3', 'is_dim': True}], 'name')
, cols
)
else:
self.assertEqual(self.dictify_list_of_dicts([
{'is_date': True, 'type': 'DATETIME', 'name': 'ds',
'is_dim': False},
{'is_date': True, 'type': 'DATETIME',
'name': 'ds2', 'is_dim': False},
{'agg': 'sum', 'is_date': False, 'type': 'INT',
'name': 'epoch_ms', 'is_dim': False},
{'agg': 'sum', 'is_date': False, 'type': 'INT',
'name': 'epoch_s', 'is_dim': False},
{'is_date': True, 'type': 'STRING', 'name': 'string0',
'is_dim': False},
{'is_date': False, 'type': 'STRING',
'name': 'string1', 'is_dim': True},
{'is_date': True, 'type': 'STRING', 'name': 'string2',
'is_dim': False},
{'is_date': False, 'type': 'STRING',
'name': 'string3', 'is_dim': True}], 'name')
, cols
)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ocefpaf/iris | lib/iris/tests/unit/plot/test_points.py | 5 | 2370 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris.plot.points` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.points(self.cube, coords=("bar", "str_coord"))
self.assertBoundsTickLabels("yaxis")
def test_xaxis_labels(self):
iplt.points(self.cube, coords=("str_coord", "bar"))
self.assertBoundsTickLabels("xaxis")
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0, 3)
iplt.points(self.cube, coords=("str_coord", "bar"), axes=ax)
plt.close(fig)
self.assertPointsTickLabels("xaxis", ax)
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylim(0, 3)
iplt.points(self.cube, coords=("bar", "str_coord"), axes=ax)
plt.close(fig)
self.assertPointsTickLabels("yaxis", ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.points, self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord("foo").points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord("bar").points
self.bar_index = np.arange(self.bar.size)
self.data = None
self.dataT = None
self.mpl_patch = self.patch("matplotlib.pyplot.scatter")
self.draw_func = iplt.points
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
kevin-intel/scikit-learn | examples/classification/plot_classification_probability.py | 39 | 3509 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3 class
dataset, and we classify it with a Support Vector classifier, L1 and L2
penalized logistic regression with either a One-Vs-Rest or multinomial setting,
and Gaussian process classification.
Linear SVC is not a probabilistic classifier by default but it has a built-in
calibration option enabled in this example (`probability=True`).
The logistic regression with One-Vs-Rest is not a multiclass classifier out of
the box. As a result it has more trouble in separating class 2 and 3 than the
other estimators.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 10
kernel = 1.0 * RBF([1.0, 1.0]) # for GPC
# Create different classifiers.
classifiers = {
'L1 logistic': LogisticRegression(C=C, penalty='l1',
solver='saga',
multi_class='multinomial',
max_iter=10000),
'L2 logistic (Multinomial)': LogisticRegression(C=C, penalty='l2',
solver='saga',
multi_class='multinomial',
max_iter=10000),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2',
solver='saga',
multi_class='ovr',
max_iter=10000),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'GPC': GaussianProcessClassifier(kernel)
}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
accuracy = accuracy_score(y, y_pred)
print("Accuracy (train) for %s: %0.1f%% " % (name, accuracy * 100))
# View probabilities:
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='w', edgecolor='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
quheng/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
toobaz/pandas | pandas/tests/util/test_assert_categorical_equal.py | 2 | 2785 | import pytest
from pandas import Categorical
from pandas.util.testing import assert_categorical_equal
@pytest.mark.parametrize(
"c",
[Categorical([1, 2, 3, 4]), Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4, 5])],
)
def test_categorical_equal(c):
assert_categorical_equal(c, c)
@pytest.mark.parametrize("check_category_order", [True, False])
def test_categorical_equal_order_mismatch(check_category_order):
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([1, 2, 3, 4], categories=[4, 3, 2, 1])
kwargs = dict(check_category_order=check_category_order)
if check_category_order:
msg = """Categorical\\.categories are different
Categorical\\.categories values are different \\(100\\.0 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[4, 3, 2, 1\\], dtype='int64'\\)"""
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2, **kwargs)
else:
assert_categorical_equal(c1, c2, **kwargs)
def test_categorical_equal_categories_mismatch():
msg = """Categorical\\.categories are different
Categorical\\.categories values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 5\\], dtype='int64'\\)"""
c1 = Categorical([1, 2, 3, 4])
c2 = Categorical([1, 2, 3, 5])
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2)
def test_categorical_equal_codes_mismatch():
categories = [1, 2, 3, 4]
msg = """Categorical\\.codes are different
Categorical\\.codes values are different \\(50\\.0 %\\)
\\[left\\]: \\[0, 1, 3, 2\\]
\\[right\\]: \\[0, 1, 2, 3\\]"""
c1 = Categorical([1, 2, 4, 3], categories=categories)
c2 = Categorical([1, 2, 3, 4], categories=categories)
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2)
def test_categorical_equal_ordered_mismatch():
data = [1, 2, 3, 4]
msg = """Categorical are different
Attribute "ordered" are different
\\[left\\]: False
\\[right\\]: True"""
c1 = Categorical(data, ordered=False)
c2 = Categorical(data, ordered=True)
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2)
@pytest.mark.parametrize("obj", ["index", "foo", "pandas"])
def test_categorical_equal_object_override(obj):
data = [1, 2, 3, 4]
msg = """{obj} are different
Attribute "ordered" are different
\\[left\\]: False
\\[right\\]: True""".format(
obj=obj
)
c1 = Categorical(data, ordered=False)
c2 = Categorical(data, ordered=True)
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2, obj=obj)
| bsd-3-clause |
karamarielynch/hfs-sim | hfs_sim.py | 1 | 19069 | import sys
from PyQt4 import QtGui, QtCore
import pyqtgraph as pg
import numpy as np
import time
from scipy import signal
import matplotlib.pyplot as plt
import ast
from hfs_creation import *
import os.path
x = np.linspace(-20*10**3, 60*10**3, 50*10**3, endpoint = False)
c = 299792458.0
class Example(QtGui.QMainWindow):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
widget = QtGui.QWidget()
self.setCentralWidget(widget)
self.grid = QtGui.QGridLayout(widget)
self.grid.setSpacing(10)
self.setWindowTitle('Hyperfine spectrum simulator')
self.IsotopeFilename = sys.argv[1]
HFS_params = np.loadtxt(self.IsotopeFilename, delimiter='\t', dtype='S')
self.IsotopeNames = []
names = HFS_params.T[0] #.decode("utf-8")
for i in range(len(names)):
newnames = names[i].decode("utf-8")
self.IsotopeNames.append(newnames)
Isotope_value = HFS_params[0]
self.I = float(Isotope_value[1])
self.J_lower = float(Isotope_value[2])
self.J_upper = float(Isotope_value[3])
self.CF = float(Isotope_value[4])
self.A_lower = float(Isotope_value[5])
self.A_upper = float(Isotope_value[6])
self.B_lower = float(Isotope_value[7])
self.B_upper = float(Isotope_value[8])
self.FWHM = float(Isotope_value[9])
self.Mass = float(Isotope_value[10])
self.RISLine = float(Isotope_value[11])
self.Mass_ref = float(Isotope_value[12])
self.ARatio = float(Isotope_value[13])
self.Line = float(Isotope_value[14])
self.Harmonic = float(Isotope_value[15])
Isotope = QtGui.QLabel('Isotope')
self.grid.addWidget(Isotope, 2, 0)
IsotopeEdit = QtGui.QLabel()
self.IsotopeList = QtGui.QComboBox(self)
self.IsotopeList.addItems(self.IsotopeNames)
self.grid.addWidget(self.IsotopeList, 2, 1)
self.IsotopeList.activated[str].connect(self.updateIsotope)
self.IsotopeList.activated[str].connect(self.updateTopPlot)
self.IsotopeList.activated[str].connect(self.updateBottomPlot)
self.IsotopeList.activated[str].connect(self.updateHFSPeaks)
Spin = QtGui.QLabel('Spin')
self.grid.addWidget(Spin, 3, 0)
self.SpinEdit = pg.SpinBox(value=self.I, dec=True, minStep=0.5)
self.SpinEdit.setRange(0, 12)
self.grid.addWidget(self.SpinEdit, 3, 1)
self.SpinEdit.valueChanged.connect(self.updateSpin)
self.SpinEdit.valueChanged.connect(self.updateTopPlot)
self.SpinEdit.valueChanged.connect(self.updateBottomPlot)
self.SpinEdit.valueChanged.connect(self.updateHFSPeaks)
Jl = QtGui.QLabel('Jl')
self.grid.addWidget(Jl, 3, 2)
self.JlEdit = pg.SpinBox(value=self.J_lower, dec=True, minStep=0.5)
self.JlEdit.setRange(0, 2.5)
self.grid.addWidget(self.JlEdit, 3, 3)
self.JlEdit.valueChanged.connect(self.updateJl)
self.JlEdit.valueChanged.connect(self.updateTopPlot)
self.JlEdit.valueChanged.connect(self.updateBottomPlot)
self.JlEdit.valueChanged.connect(self.updateHFSPeaks)
Ju = QtGui.QLabel('Ju')
self.grid.addWidget(Ju, 3, 4)
self.JuEdit = pg.SpinBox(value=self.J_upper, dec=True, minStep=0.5)
self.JuEdit.setRange(0, 2.5)
self.grid.addWidget(self.JuEdit, 3, 5)
self.JuEdit.valueChanged.connect(self.updateJu)
self.JuEdit.valueChanged.connect(self.updateTopPlot)
self.JuEdit.valueChanged.connect(self.updateBottomPlot)
self.JuEdit.valueChanged.connect(self.updateHFSPeaks)
Al = QtGui.QLabel('Al (MHz)')
self.grid.addWidget(Al, 4, 0)
self.AlEdit = pg.SpinBox(value=self.A_lower, dec=True, minStep=0.1)
self.AlEdit.setRange(-20000, 20000)
self.grid.addWidget(self.AlEdit, 4, 1)
self.AlEdit.valueChanged.connect(self.updateAl)
self.AlEdit.valueChanged.connect(self.updateTopPlot)
self.AlEdit.valueChanged.connect(self.updateBottomPlot)
self.AlEdit.valueChanged.connect(self.updateHFSPeaks)
Au = QtGui.QLabel('Au (MHz)')
self.grid.addWidget(Au, 4, 2)
self.AuEdit = pg.SpinBox(value=self.A_upper, dec=True, minStep=0.1)
self.AuEdit.setRange(-20000, 20000)
self.grid.addWidget(self.AuEdit, 4, 3)
self.AuEdit.valueChanged.connect(self.updateAu)
self.AuEdit.valueChanged.connect(self.updateTopPlot)
self.AuEdit.valueChanged.connect(self.updateBottomPlot)
self.AuEdit.valueChanged.connect(self.updateHFSPeaks)
Bl = QtGui.QLabel('Bl (MHz)')
self.grid.addWidget(Bl, 4, 4)
self.BlEdit = pg.SpinBox(value=self.B_lower, dec=True, minStep=0.1)
self.BlEdit.setRange(-20000, 20000)
self.grid.addWidget(self.BlEdit, 4, 5)
self.BlEdit.valueChanged.connect(self.updateBl)
self.BlEdit.valueChanged.connect(self.updateTopPlot)
self.BlEdit.valueChanged.connect(self.updateBottomPlot)
self.BlEdit.valueChanged.connect(self.updateHFSPeaks)
Bu = QtGui.QLabel('Bu (MHz)')
self.grid.addWidget(Bu, 4, 6)
self.BuEdit = pg.SpinBox(value=self.B_upper, dec=True, minStep=0.1)
self.BuEdit.setRange(-20000, 20000)
self.grid.addWidget(self.BuEdit, 4, 7)
self.BuEdit.valueChanged.connect(self.updateBu)
self.BuEdit.valueChanged.connect(self.updateTopPlot)
self.BuEdit.valueChanged.connect(self.updateBottomPlot)
self.BuEdit.valueChanged.connect(self.updateHFSPeaks)
CF = QtGui.QLabel('CF (MHz)')
self.grid.addWidget(CF, 5, 0)
self.CFEdit = pg.SpinBox(value=self.CF, dec=True, minStep=1)
self.CFEdit.setRange(-50000, 50000)
self.grid.addWidget(self.CFEdit, 5, 1)
self.CFEdit.valueChanged.connect(self.updateCF)
self.CFEdit.valueChanged.connect(self.updateTopPlot)
self.CFEdit.valueChanged.connect(self.updateBottomPlot)
self.CFEdit.valueChanged.connect(self.updateHFSPeaks)
FWHM = QtGui.QLabel('FWHM (MHz)')
self.grid.addWidget(FWHM, 5, 2)
self.FWHMEdit = pg.SpinBox(value=self.FWHM, dec=True, minStep=1)
self.FWHMEdit.setRange(0, 3000)
self.grid.addWidget(self.FWHMEdit, 5, 3)
self.FWHMEdit.valueChanged.connect(self.updateFWHM)
self.FWHMEdit.valueChanged.connect(self.updateTopPlot)
self.FWHMEdit.valueChanged.connect(self.updateBottomPlot)
self.FWHMEdit.valueChanged.connect(self.updateHFSPeaks)
ARatio = QtGui.QLabel('Au/Al Ratio')
self.grid.addWidget(ARatio, 5, 4)
self.ARatioEnable = QtGui.QCheckBox('', self)
self.grid.addWidget(self.ARatioEnable, 5, 5)
self.ARatioEnable.stateChanged.connect(self.enableARatio)
self.ARatioEnable.stateChanged.connect(self.updateTopPlot)
self.ARatioEnable.stateChanged.connect(self.updateBottomPlot)
self.ARatioEnable.stateChanged.connect(self.updateHFSPeaks)
ISCOOL = QtGui.QLabel('ISCOOL (V)')
self.grid.addWidget(ISCOOL, 8, 0)
self.ISCOOLEdit = pg.SpinBox(value=30000., dec=True, minStep=0.01)
self.ISCOOLEdit.setRange(0, 60000)
self.grid.addWidget(self.ISCOOLEdit, 8, 1)
self.ISCOOLEdit.valueChanged.connect(self.updateISCOOL)
self.ISCOOLEdit.valueChanged.connect(self.updateBottomPlot)
self.ISCOOLEdit.valueChanged.connect(self.updateHFSPeaks)
centroid = self.RISLine/self.Harmonic
alpha = self.ISCOOLEdit.value()/(self.Mass_ref*931.494061*10**6)
centroid_doppler = centroid/( 1 + alpha - sqrt(2*alpha + alpha*alpha))
offset = centroid_doppler
minrange = round(centroid_doppler,1) - 2.0 # cm-1
maxrange = round(centroid_doppler,1) + 2.0 # cm-1
calculateButton = QtGui.QPushButton('Reset Range', self)
self.grid.addWidget(calculateButton, 9, 0)
calculateButton.clicked.connect(self.calculateRanges)
calculateButton.clicked.connect(self.updateRange)
calculateButton.clicked.connect(self.updateBottomPlot)
#calculateButton.clicked.connect(self.updateHFSPeaks)
calculateButton.setToolTip('Recalculate wavenumber range')
MinRange = QtGui.QLabel('From (cm-1)')
self.grid.addWidget(MinRange, 8, 2)
self.MinRangeEdit = QtGui.QDoubleSpinBox(decimals=3)
self.MinRangeEdit.setSingleStep(0.001)
self.MinRangeEdit.setRange(0, 100000)
self.MinRangeEdit.setValue(minrange)
self.grid.addWidget(self.MinRangeEdit, 8, 3)
self.MinRangeEdit.valueChanged.connect(self.updateRange)
self.MinRangeEdit.valueChanged.connect(self.updateBottomPlot)
MaxRange = QtGui.QLabel('To (cm-1)')
self.grid.addWidget(MaxRange, 8, 4)
self.MaxRangeEdit = QtGui.QDoubleSpinBox(decimals=3)
self.MaxRangeEdit.setSingleStep(0.001)
self.MaxRangeEdit.setRange(0, 100000)
self.MaxRangeEdit.setValue(maxrange)
self.grid.addWidget(self.MaxRangeEdit, 8, 5)
self.MaxRangeEdit.valueChanged.connect(self.updateRange)
self.MaxRangeEdit.valueChanged.connect(self.updateBottomPlot)
Offset = QtGui.QLabel('Offset (cm-1)')
self.grid.addWidget(Offset, 8, 6)
self.OffsetEdit = QtGui.QDoubleSpinBox(decimals=3)
self.OffsetEdit.setSingleStep(0.001)
self.OffsetEdit.setRange(0, 100000)
self.OffsetEdit.setValue(offset)
self.grid.addWidget(self.OffsetEdit, 8, 7)
self.OffsetEdit.valueChanged.connect(self.updateOffset)
self.OffsetEdit.valueChanged.connect(self.updateBottomPlot)
Peaks = QtGui.QLabel('Peaks (cm-1)')
Intensity = QtGui.QLabel('Intensities')
self.grid.addWidget(Peaks, 9, 1)
self.grid.addWidget(Intensity, 10, 1)
self.PeaksEdits = []
self.IntensityEdits = []
for i in range(len(HF_function(self.I, self.J_lower, self.J_upper, self.CF, self.A_lower, self.A_upper, self.B_lower, self.B_upper)[0])):
PeakValue_freq = HF_function(self.I, self.J_lower, self.J_upper, self.CF, self.A_lower, self.A_upper, self.B_lower, self.B_upper)[0][i]
alpha = self.ISCOOLEdit.value()/(self.Mass*931.494061*10**6)
PeakValue_dopp = PeakValue_freq/( 1 + alpha - sqrt(2*alpha + alpha*alpha))
PeakValue_wave = PeakValue_dopp*10**4/(self.Harmonic*c) + offset
self.PeaksEdit = QtGui.QLabel(str(round(PeakValue_wave, 3)))
self.grid.addWidget(self.PeaksEdit, 9, i+2)
self.PeaksEdits.append(self.PeaksEdit)
IntensityValue = HF_function(self.I, self.J_lower, self.J_upper, self.CF, self.A_lower, self.A_upper, self.B_lower, self.B_upper)[1][i]
self.IntensityEdit = QtGui.QLabel(str(round(IntensityValue, 1)))
self.grid.addWidget(self.IntensityEdit, 10, i+2)
self.IntensityEdits.append(self.IntensityEdit)
self.plotWidget = pg.PlotWidget()
self.grid.addWidget(self.plotWidget, 0, 0, 2, 11)
self.plotWidget.setLabel('bottom', "Frequency relative to reference isotope", units='Hz', color='#0092ff')
self.plotWidget.setTitle(str(self.Line)+" nm transition", color='#0092ff')
self.plotWidget.showAxis('left', False)
self.plotWidget.plot(x*10**6, HFS(self.I, self.J_lower, self.J_upper, self.CF, self.A_lower, self.A_upper, self.B_lower, self.B_upper, self.FWHM, 10, 0, x), pen=tuple([0,146,255]))
self.wavenumber_range = np.linspace(minrange, maxrange, 10**5)
self.centroid = (self.RISLine*c)/10**4 # RISLine taken from NIST and corrected for reference isotope (units of cm-1)
self.freq_range_lab = (self.Harmonic*self.wavenumber_range*c)/10**4
self.freq_range = Doppler_correction(self.freq_range_lab, self.Mass, self.ISCOOLEdit.value())
self.plotWidget2 = pg.PlotWidget()
self.grid.addWidget(self.plotWidget2, 6, 0, 2, 11)
self.plotWidget2.setLabel('bottom', "Wavenumber offset by "+str(self.OffsetEdit.value()), units='cm-1', color='#00ffff')
self.plotWidget2.setTitle(str(self.Line)+" nm transition", color='#00ffff')
self.plotWidget2.showAxis('left', False)
self.plotWidget2.plot(self.wavenumber_range-self.OffsetEdit.value(), HFS(self.I, self.J_lower, self.J_upper, self.CF, self.A_lower, self.A_upper, self.B_lower, self.B_upper, self.FWHM, 10, 0, (self.freq_range-self.centroid) ), pen=tuple([0,255,255]))
self.old_isotope = self.IsotopeList.currentText()
self.show()
def updateTopPlot(self):
self.plotWidget.plot(clear=True)
self.plotWidget.setTitle(str(self.Line)+" nm transition", color='#0092ff')
try:
self.plotWidget.plot(x*10**6, HFS(self.I, self.J_lower, self.J_upper, self.CF, self.A_lower, self.A_upper, self.B_lower, self.B_upper, self.FWHM, 10, 0, x ), pen=tuple([0,146,255]))
except ValueError:
self.showInfo()
def showInfo(self):
msg = QtGui.QMessageBox()
msg.setIcon(QtGui.QMessageBox.Warning)
msg.setWindowTitle('Error for Jl -> Ju')
msg.setText("Please choose a valid transition")
msg.exec_()
def updateBottomPlot(self):
self.centroid = (self.RISLine*c)/10**4
self.freq_range_lab = (self.Harmonic*self.wavenumber_range*c)/10**4
freq_range = Doppler_correction(self.freq_range_lab, self.Mass, self.ISCOOLEdit.value())
offset = self.OffsetEdit.value()
self.plotWidget2.plot(clear=True)
self.plotWidget2.setLabel('bottom', "Wavenumber offset by "+str(self.OffsetEdit.value()), units='cm-1', color='#00ffff')
self.plotWidget2.setTitle(str(self.Line)+" nm transition", color='#00ffff')
try:
self.plotWidget2.plot(self.wavenumber_range-offset, HFS(self.I, self.J_lower, self.J_upper, self.CF, self.A_lower, self.A_upper, self.B_lower, self.B_upper, self.FWHM, 10, 0, (freq_range-self.centroid) ), pen=tuple([0,255,255]))
except ValueError:
pass
def updateIsotope(self):
isotope = self.sender().currentText()
HFS_params = np.loadtxt(self.IsotopeFilename, delimiter='\t', dtype='S')
for i in range(len(HFS_params)):
if HFS_params[i][0].decode("utf-8") == isotope:
Isotope_value = HFS_params[i]
self.I = float(Isotope_value[1])
self.J_lower = float(Isotope_value[2])
self.J_upper = float(Isotope_value[3])
self.CF = float(Isotope_value[4])
self.A_lower = float(Isotope_value[5])
self.A_upper = float(Isotope_value[6])
self.B_lower = float(Isotope_value[7])
self.B_upper = float(Isotope_value[8])
self.FWHM = float(Isotope_value[9])
self.Mass = float(Isotope_value[10])
self.RISLine = float(Isotope_value[11])
self.Mass_ref = float(Isotope_value[12])
self.ARatio = float(Isotope_value[13])
self.Line = float(Isotope_value[14])
self.Harmonic = float(Isotope_value[15])
self.SpinEdit.setValue(self.I)
self.JlEdit.setValue(self.J_lower)
self.JuEdit.setValue(self.J_upper)
self.AlEdit.setValue(self.A_lower)
if self.ARatioEnable.isChecked():
self.AuEdit.setValue(self.A_lower*self.ARatio)
else:
self.AuEdit.setValue(self.A_upper)
self.BlEdit.setValue(self.B_lower)
self.BuEdit.setValue(self.B_upper)
self.FWHMEdit.setValue(self.FWHM)
self.CFEdit.setValue(self.CF)
if self.old_isotope.split('-')[1] != isotope.split('-')[1]:
centroid = self.RISLine/self.Harmonic
alpha = self.ISCOOLEdit.value()/(self.Mass_ref*931.494061*10**6)
centroid_doppler = centroid/( 1 + alpha - sqrt(2*alpha + alpha*alpha))
offset = centroid_doppler
minrange = round(centroid_doppler,1) - 2.0 # cm-1
maxrange = round(centroid_doppler,1) + 2.0 # cm-1
self.wavenumber_range = np.linspace(minrange, maxrange, 10**5)
self.freq_range_lab = (self.Harmonic*self.wavenumber_range*c)/10**4
self.freq_range = Doppler_correction(self.freq_range_lab, self.Mass, self.ISCOOLEdit.value())
self.MinRangeEdit.setValue(minrange)
self.MaxRangeEdit.setValue(maxrange)
self.OffsetEdit.setValue(offset)
else:
pass
self.old_isotope = self.IsotopeList.currentText()
def updateHFSPeaks(self):
for i in range(len(self.PeaksEdits)):
self.PeaksEdits[i].setText('')
self.IntensityEdits[i].setText('')
PeakValues = []
IntensityValues = []
for i in range(len(HF_function(self.I, self.J_lower, self.J_upper, self.CF, self.A_lower, self.A_upper, self.B_lower, self.B_upper)[0])):
PeakValue_freq = HF_function(self.I, self.J_lower, self.J_upper, self.CF, self.A_lower, self.A_upper, self.B_lower, self.B_upper)[0][i]
IntensityValue = HF_function(self.I, self.J_lower, self.J_upper, self.CF, self.A_lower, self.A_upper, self.B_lower, self.B_upper)[1][i]
if IntensityValue > 0:
centroid = self.RISLine/self.Harmonic
alpha = self.ISCOOLEdit.value()/(self.Mass*931.494061*10**6)
centroid_doppler = centroid/( 1 + alpha - sqrt(2*alpha + alpha*alpha))
offset = centroid_doppler
PeakValue_wave = PeakValue_freq*10**4/(self.Harmonic*c) + offset
PeakValues.append(PeakValue_wave)
IntensityValues.append(IntensityValue)
PeakValues, IntensityValues = np.array(PeakValues), np.array(IntensityValues)
PeakValues_sort = sort(PeakValues)
inds = PeakValues.argsort()
IntensityValues_sort = IntensityValues[inds]
self.PeaksEdits = []
self.IntensityEdits = []
for i in range(len(PeakValues_sort)):
self.PeaksEdit = QtGui.QLabel(str(round(PeakValues_sort[i], 3)))
self.grid.addWidget(self.PeaksEdit, 9, i+2)
self.PeaksEdits.append(self.PeaksEdit)
self.IntensityEdit = QtGui.QLabel(str(round(IntensityValues_sort[i], 1)))
self.grid.addWidget(self.IntensityEdit, 10, i+2)
self.IntensityEdits.append(self.IntensityEdit)
with open("hfs_peaks.txt", 'w') as f:
f.write(str(self.IsotopeList.currentText().split('-')[0])+'\n')
f.write(str(self.ISCOOLEdit.value())+'\n')
for p in PeakValues_sort:
f.write(str(p)+"\n")
def updateSpin(self):
self.I = self.sender().value()
def updateJl(self):
self.J_lower = self.sender().value()
def updateJu(self):
self.J_upper = self.sender().value()
def updateAl(self):
self.A_lower = self.sender().value()
if self.ARatioEnable.isChecked():
self.A_upper = self.A_lower*self.ARatio
self.AuEdit.setValue(self.A_upper)
def updateAu(self):
self.A_upper = self.sender().value()
def updateBl(self):
self.B_lower = self.sender().value()
def updateBu(self):
self.B_upper = self.sender().value()
def updateFWHM(self):
self.FWHM = self.sender().value()
def updateCF(self):
self.CF = self.sender().value()
def updateISCOOL(self):
self.ISCOOL = self.sender().value()
def updateOffset(self):
self.Offset = self.sender().value()
def updateRange(self):
MinRange = self.MinRangeEdit.value()
MaxRange = self.MaxRangeEdit.value()
self.wavenumber_range = np.linspace(MinRange, MaxRange, 10**5)
def calculateRanges(self):
centroid = self.RISLine/self.Harmonic
alpha = self.ISCOOLEdit.value()/(self.Mass_ref*931.494061*10**6)
centroid_doppler = centroid/( 1 + alpha - sqrt(2*alpha + alpha*alpha))
offset = centroid_doppler
minrange = round(centroid_doppler,1) - 2.0 # cm-1
maxrange = round(centroid_doppler,1) + 2.0 # cm-1
self.wavenumber_range = np.linspace(minrange, maxrange, 10**5)
self.freq_range_lab = (self.Harmonic*self.wavenumber_range*c)/10**4
self.freq_range = Doppler_correction(self.freq_range_lab, self.Mass, self.ISCOOLEdit.value())
self.MinRangeEdit.setValue(minrange)
self.MaxRangeEdit.setValue(maxrange)
self.OffsetEdit.setValue(offset)
def enableARatio(self, state):
if state == QtCore.Qt.Checked:
self.A_upper = self.A_lower*self.ARatio
self.AuEdit.setValue(self.A_upper)
def main():
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| mit |
montoyjh/pymatgen | pymatgen/apps/battery/plotter.py | 5 | 3385 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides plotting capabilities for battery related applications.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Jul 12, 2012"
from collections import OrderedDict
from pymatgen.util.plotting import pretty_plot
class VoltageProfilePlotter:
"""
A plotter to make voltage profile plots for batteries.
Args:
xaxis: The quantity to use as the xaxis. Can be either capacity (the
default), or the frac_x.
"""
def __init__(self, xaxis="capacity"):
self._electrodes = OrderedDict()
self.xaxis = xaxis
def add_electrode(self, electrode, label=None):
"""
Add an electrode to the plot.
Args:
electrode: An electrode. All electrodes satisfying the
AbstractElectrode interface should work.
label: A label for the electrode. If None, defaults to a counting
system, i.e. 'Electrode 1', 'Electrode 2', ...
"""
if not label:
label = "Electrode {}".format(len(self._electrodes) + 1)
self._electrodes[label] = electrode
def get_plot_data(self, electrode):
x = []
y = []
cap = 0
most_discharged = electrode[-1].frac_discharge
norm = most_discharged / (1 - most_discharged)
for vpair in electrode:
if self.xaxis == "capacity":
x.append(cap)
cap += vpair.mAh / electrode.normalization_mass
x.append(cap)
else:
x.append(vpair.frac_charge / (1 - vpair.frac_charge) / norm)
x.append(vpair.frac_discharge / (1 - vpair.frac_discharge)
/ norm)
y.extend([vpair.voltage] * 2)
x.append(x[-1])
y.append(0)
return x, y
def get_plot(self, width=8, height=8):
"""
Returns a plot object.
Args:
width: Width of the plot. Defaults to 8 in.
height: Height of the plot. Defaults to 6 in.
Returns:
A matplotlib plot object.
"""
plt = pretty_plot(width, height)
for label, electrode in self._electrodes.items():
(x, y) = self.get_plot_data(electrode)
plt.plot(x, y, '-', linewidth=2, label=label)
plt.legend()
if self.xaxis == "capacity":
plt.xlabel('Capacity (mAh/g)')
else:
plt.xlabel('Fraction')
plt.ylabel('Voltage (V)')
plt.tight_layout()
return plt
def show(self, width=8, height=6):
"""
Show the voltage profile plot.
Args:
width: Width of the plot. Defaults to 8 in.
height: Height of the plot. Defaults to 6 in.
"""
self.get_plot(width, height).show()
def save(self, filename, image_format="eps", width=8, height=6):
"""
Save the plot to an image file.
Args:
filename: Filename to save to.
image_format: Format to save to. Defaults to eps.
"""
self.get_plot(width, height).savefig(filename, format=image_format)
| mit |
joroKr21/incubator-zeppelin | python/src/main/resources/python/zeppelin_python.py | 19 | 6945 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError
import ast
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PythonCompletion:
def __init__(self, interpreter, userNameSpace):
self.interpreter = interpreter
self.userNameSpace = userNameSpace
def getObjectCompletion(self, text_value):
completions = [completion for completion in list(self.userNameSpace.keys()) if completion.startswith(text_value)]
builtinCompletions = [completion for completion in dir(__builtins__) if completion.startswith(text_value)]
return completions + builtinCompletions
def getMethodCompletion(self, objName, methodName):
execResult = locals()
try:
exec("{} = dir({})".format("objectDefList", objName), _zcUserQueryNameSpace, execResult)
except:
self.interpreter.logPythonOutput("Fail to run dir on " + objName)
self.interpreter.logPythonOutput(traceback.format_exc())
return None
else:
objectDefList = execResult['objectDefList']
return [completion for completion in execResult['objectDefList'] if completion.startswith(methodName)]
def getCompletion(self, text_value):
if text_value == None:
return None
dotPos = text_value.find(".")
if dotPos == -1:
objName = text_value
completionList = self.getObjectCompletion(objName)
else:
objName = text_value[:dotPos]
methodName = text_value[dotPos + 1:]
completionList = self.getMethodCompletion(objName, methodName)
if completionList is None or len(completionList) <= 0:
self.interpreter.setStatementsFinished("", False)
else:
result = json.dumps(list(filter(lambda x : not re.match("^__.*", x), list(completionList))))
self.interpreter.setStatementsFinished(result, False)
host = sys.argv[1]
port = int(sys.argv[2])
if "PY4J_GATEWAY_SECRET" in os.environ:
from py4j.java_gateway import GatewayParameters
gateway_secret = os.environ["PY4J_GATEWAY_SECRET"]
gateway = JavaGateway(gateway_parameters=GatewayParameters(
address=host, port=port, auth_token=gateway_secret, auto_convert=True))
else:
gateway = JavaGateway(GatewayClient(address=host, port=port), auto_convert=True)
intp = gateway.entry_point
_zcUserQueryNameSpace = {}
completion = PythonCompletion(intp, _zcUserQueryNameSpace)
_zcUserQueryNameSpace["__zeppelin_completion__"] = completion
_zcUserQueryNameSpace["gateway"] = gateway
from zeppelin_context import PyZeppelinContext
if intp.getZeppelinContext():
z = __zeppelin__ = PyZeppelinContext(intp.getZeppelinContext(), gateway)
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["z"] = z
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
intp.onPythonScriptInitialized(os.getpid())
# redirect stdout/stderr to java side so that PythonInterpreter can capture the python execution result
output = Logger()
sys.stdout = output
sys.stderr = output
while True :
req = intp.getStatements()
try:
stmts = req.statements().split("\n")
isForCompletion = req.isForCompletion()
# Get post-execute hooks
try:
if req.isCallHooks():
global_hook = intp.getHook('post_exec_dev')
else:
global_hook = None
except:
global_hook = None
try:
if req.isCallHooks():
user_hook = __zeppelin__.getHook('post_exec')
else:
user_hook = None
except:
user_hook = None
nhooks = 0
if not isForCompletion:
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
if stmts:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
code = compile('\n'.join(stmts), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]] if len(code.body) > nhooks else [])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
if not isForCompletion:
# only call it when it is not for code completion. code completion will call it in
# PythonCompletion.getCompletion
intp.setStatementsFinished("", False)
except Py4JJavaError:
# raise it to outside try except
raise
except:
if not isForCompletion:
# extract which line incur error from error message. e.g.
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# ZeroDivisionError: integer division or modulo by zero
exception = traceback.format_exc()
m = re.search("File \"<stdin>\", line (\d+).*", exception)
if m:
line_no = int(m.group(1))
intp.setStatementsFinished(
"Fail to execute line {}: {}\n".format(line_no, stmts[line_no - 1]) + exception, True)
else:
intp.setStatementsFinished(exception, True)
else:
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
| apache-2.0 |
chrsrds/scikit-learn | sklearn/utils/estimator_checks.py | 1 | 96974 | import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
from functools import partial
from inspect import signature
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
import joblib
from . import IS_PYPY
from .testing import assert_raises, _get_args
from .testing import assert_raises_regex
from .testing import assert_raise_message
from .testing import assert_array_equal
from .testing import assert_array_almost_equal
from .testing import assert_allclose
from .testing import assert_allclose_dense_sparse
from .testing import assert_warns_message
from .testing import set_random_state
from .testing import SkipTest
from .testing import ignore_warnings
from .testing import assert_dict_equal
from .testing import create_memmap_backed_data
from . import is_scalar_nan
from ..discriminant_analysis import LinearDiscriminantAnalysis
from ..linear_model import Ridge
from ..base import (clone, ClusterMixin, is_classifier, is_regressor,
_DEFAULT_TAGS, RegressorMixin, is_outlier_detector)
from ..metrics import accuracy_score, adjusted_rand_score, f1_score
from ..random_projection import BaseRandomProjection
from ..feature_selection import SelectKBest
from ..pipeline import make_pipeline
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..exceptions import SkipTestWarning
from ..model_selection import train_test_split
from ..model_selection import ShuffleSplit
from ..model_selection._validation import _safe_split
from ..metrics.pairwise import (rbf_kernel, linear_kernel, pairwise_distances)
from .import shuffle
from .validation import has_fit_parameter, _num_samples
from ..preprocessing import StandardScaler
from ..datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
def _safe_tags(estimator, key=None):
# if estimator doesn't have _get_tags, use _DEFAULT_TAGS
# if estimator has tags but not key, use _DEFAULT_TAGS[key]
if hasattr(estimator, "_get_tags"):
if key is not None:
return estimator._get_tags().get(key, _DEFAULT_TAGS[key])
tags = estimator._get_tags()
return {key: tags.get(key, _DEFAULT_TAGS[key])
for key in _DEFAULT_TAGS.keys()}
if key is not None:
return _DEFAULT_TAGS[key]
return _DEFAULT_TAGS
def _yield_checks(name, estimator):
tags = _safe_tags(estimator)
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_sample_weights_pandas_series
yield check_sample_weights_list
yield check_sample_weights_invariance
yield check_estimators_fit_returns_self
yield partial(check_estimators_fit_returns_self, readonly_memmap=True)
# Check that all estimator yield informative messages when
# trained on empty datasets
if not tags["no_validation"]:
yield check_complex_data
yield check_dtype_object
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION:
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if not tags["allow_nan"] and not tags["no_validation"]:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
yield check_estimators_overwrite_params
if hasattr(estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, classifier):
tags = _safe_tags(classifier)
# test classifiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield partial(check_classifiers_train, readonly_memmap=True)
yield check_classifiers_regression_target
if not tags["no_validation"]:
yield check_supervised_y_no_nan
yield check_supervised_y_2d
if tags["requires_fit"]:
yield check_estimators_unfitted
if 'class_weight' in classifier.get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_supervised_y_no_nan(name, estimator_orig):
# Checks that the Estimator targets are not NaN.
estimator = clone(estimator_orig)
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.full(10, np.inf)
y = enforce_estimator_tags_y(estimator, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
estimator.fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised error as expected, but "
"does not match expected error message"
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, regressor):
tags = _safe_tags(regressor)
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield partial(check_regressors_train, readonly_memmap=True)
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
if not tags["no_validation"]:
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if tags["requires_fit"]:
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(name, transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if not _safe_tags(transformer, "no_validation"):
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
yield check_transformer_general
yield partial(check_transformer_general, readonly_memmap=True)
if not _safe_tags(transformer, "stateless"):
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(name, clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield partial(check_clustering, readonly_memmap=True)
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_outliers_checks(name, estimator):
# checks for outlier detectors that have a fit_predict method
if hasattr(estimator, 'fit_predict'):
yield check_outliers_fit_predict
# checks for estimators that can be used on a test set
if hasattr(estimator, 'predict'):
yield check_outliers_train
yield partial(check_outliers_train, readonly_memmap=True)
# test outlier detectors can handle non-array data
yield check_classifier_data_not_an_array
# test if NotFittedError is raised
if _safe_tags(estimator, "requires_fit"):
yield check_estimators_unfitted
def _yield_all_checks(name, estimator):
tags = _safe_tags(estimator)
if "2darray" not in tags["X_types"]:
warnings.warn("Can't test estimator {} which requires input "
" of type {}".format(name, tags["X_types"]),
SkipTestWarning)
return
if tags["_skip_test"]:
warnings.warn("Explicit SKIP via _skip_test tag for estimator "
"{}.".format(name),
SkipTestWarning)
return
for check in _yield_checks(name, estimator):
yield check
if is_classifier(estimator):
for check in _yield_classifier_checks(name, estimator):
yield check
if is_regressor(estimator):
for check in _yield_regressor_checks(name, estimator):
yield check
if hasattr(estimator, 'transform'):
for check in _yield_transformer_checks(name, estimator):
yield check
if isinstance(estimator, ClusterMixin):
for check in _yield_clustering_checks(name, estimator):
yield check
if is_outlier_detector(estimator):
for check in _yield_outliers_checks(name, estimator):
yield check
yield check_fit2d_predict1d
yield check_methods_subset_invariance
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d
yield check_get_params_invariance
yield check_set_params
yield check_dict_unchanged
yield check_dont_overwrite_parameters
yield check_fit_idempotent
def check_estimator(Estimator):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
This test can be applied to classes or instances.
Classes currently have some additional tests that related to construction,
while passing instances allows the testing of multiple options.
Parameters
----------
estimator : estimator object or class
Estimator to check. Estimator is a class object or instance.
"""
if isinstance(Estimator, type):
# got a class
name = Estimator.__name__
estimator = Estimator()
check_parameters_default_constructible(name, Estimator)
check_no_attributes_set_in_init(name, estimator)
else:
# got an instance
estimator = Estimator
name = type(estimator).__name__
for check in _yield_all_checks(name, estimator):
try:
check(name, estimator)
except SkipTest as exception:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(str(exception), SkipTestWarning)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_checking_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
name = estimator.__class__.__name__
if ("n_iter" in params and name != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR, LinearSVC
if estimator.__class__.__name__ in ['LinearSVR', 'LinearSVC']:
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if hasattr(estimator, "n_components"):
estimator.n_components = 2
if name == 'TruncatedSVD':
# TruncatedSVD doesn't run with n_components = n_features
# This is ugly :-/
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = min(estimator.n_clusters, 2)
if hasattr(estimator, "n_best"):
estimator.n_best = 1
if name == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if name == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=2)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
if name in ('HistGradientBoostingClassifier',
'HistGradientBoostingRegressor'):
# The default min_samples_leaf (20) isn't appropriate for small
# datasets (only very shallow trees are built) that the checks use.
estimator.set_params(min_samples_leaf=5)
# Speed-up by reducing the number of CV or splits for CV estimators
loo_cv = ['RidgeCV']
if name not in loo_cv and hasattr(estimator, 'cv'):
estimator.set_params(cv=3)
if hasattr(estimator, 'n_splits'):
estimator.set_params(n_splits=3)
if name == 'OneHotEncoder':
estimator.set_params(handle_unknown='ignore')
class NotAnArray:
"""An object that is convertible to an array
Parameters
----------
data : array_like
The data.
"""
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_pairwise(estimator):
"""Returns True if estimator has a _pairwise attribute set to True.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if _pairwise is set to True and False otherwise.
"""
return bool(getattr(estimator, "_pairwise", False))
def _is_pairwise_metric(estimator):
"""Returns True if estimator accepts pairwise metric.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if _pairwise is set to True and False otherwise.
"""
metric = getattr(estimator, "metric", None)
return bool(metric == 'precomputed')
def pairwise_estimator_convert_X(X, estimator, kernel=linear_kernel):
if _is_pairwise_metric(estimator):
return pairwise_distances(X, metric='euclidean')
if _is_pairwise(estimator):
return kernel(X, X)
return X
def _generate_sparse_matrix(X_csr):
"""Generate sparse matrices with {32,64}bit indices of diverse format
Parameters
----------
X_csr: CSR Matrix
Input matrix in CSR format
Returns
-------
out: iter(Matrices)
In format['dok', 'lil', 'dia', 'bsr', 'csr', 'csc', 'coo',
'coo_64', 'csc_64', 'csr_64']
"""
assert X_csr.format == 'csr'
yield 'csr', X_csr.copy()
for sparse_format in ['dok', 'lil', 'dia', 'bsr', 'csc', 'coo']:
yield sparse_format, X_csr.asformat(sparse_format)
# Generate large indices matrix only if its supported by scipy
X_coo = X_csr.asformat('coo')
X_coo.row = X_coo.row.astype('int64')
X_coo.col = X_coo.col.astype('int64')
yield "coo_64", X_coo
for sparse_format in ['csc', 'csr']:
X = X_csr.asformat(sparse_format)
X.indices = X.indices.astype('int64')
X.indptr = X.indptr.astype('int64')
yield sparse_format + "_64", X
def check_estimator_sparse_data(name, estimator_orig):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = pairwise_estimator_convert_X(X, estimator_orig)
X_csr = sparse.csr_matrix(X)
tags = _safe_tags(estimator_orig)
if tags['binary_only']:
y = (2 * rng.rand(40)).astype(np.int)
else:
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
estimator = clone(estimator_orig)
y = enforce_estimator_tags_y(estimator, y)
for matrix_format, X in _generate_sparse_matrix(X_csr):
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
estimator = clone(estimator_orig)
if name in ['Scaler', 'StandardScaler']:
estimator.set_params(with_mean=False)
# fit and predict
try:
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
if tags['multioutput_only']:
assert pred.shape == (X.shape[0], 1)
else:
assert pred.shape == (X.shape[0],)
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
if tags['binary_only']:
expected_probs_shape = (X.shape[0], 2)
else:
expected_probs_shape = (X.shape[0], 4)
assert probs.shape == expected_probs_shape
except (TypeError, ValueError) as e:
if 'sparse' not in repr(e).lower():
if "64" in matrix_format:
msg = ("Estimator %s doesn't seem to support %s matrix, "
"and is not failing gracefully, e.g. by using "
"check_array(X, accept_large_sparse=False)")
raise AssertionError(msg % (name, matrix_format))
else:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not"
" the case." % name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sample_weights_pandas_series(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = clone(estimator_orig)
if has_fit_parameter(estimator, "sample_weight"):
try:
import pandas as pd
X = np.array([[1, 1], [1, 2], [1, 3], [1, 4],
[2, 1], [2, 2], [2, 3], [2, 4]])
X = pd.DataFrame(pairwise_estimator_convert_X(X, estimator_orig))
y = pd.Series([1, 1, 1, 1, 2, 2, 2, 2])
weights = pd.Series([1] * 8)
if _safe_tags(estimator, "multioutput_only"):
y = pd.DataFrame(y)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError("Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name))
except ImportError:
raise SkipTest("pandas is not installed: not testing for "
"input of type pandas.Series to class weight.")
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sample_weights_list(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
if has_fit_parameter(estimator_orig, "sample_weight"):
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = pairwise_estimator_convert_X(rnd.uniform(size=(10, 3)),
estimator_orig)
if _safe_tags(estimator, 'binary_only'):
y = np.arange(10) % 2
else:
y = np.arange(10) % 3
y = enforce_estimator_tags_y(estimator, y)
sample_weight = [3] * 10
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sample_weights_invariance(name, estimator_orig):
# check that the estimators yield same results for
# unit weights and no weights
if (has_fit_parameter(estimator_orig, "sample_weight") and
not (hasattr(estimator_orig, "_pairwise")
and estimator_orig._pairwise)):
# We skip pairwise because the data is not pairwise
estimator1 = clone(estimator_orig)
estimator2 = clone(estimator_orig)
set_random_state(estimator1, random_state=0)
set_random_state(estimator2, random_state=0)
X = np.array([[1, 3], [1, 3], [1, 3], [1, 3],
[2, 1], [2, 1], [2, 1], [2, 1],
[3, 3], [3, 3], [3, 3], [3, 3],
[4, 1], [4, 1], [4, 1], [4, 1]], dtype=np.dtype('float'))
y = np.array([1, 1, 1, 1, 2, 2, 2, 2,
1, 1, 1, 1, 2, 2, 2, 2], dtype=np.dtype('int'))
y = enforce_estimator_tags_y(estimator1, y)
estimator1.fit(X, y=y, sample_weight=np.ones(shape=len(y)))
estimator2.fit(X, y=y, sample_weight=None)
for method in ["predict", "transform"]:
if hasattr(estimator_orig, method):
X_pred1 = getattr(estimator1, method)(X)
X_pred2 = getattr(estimator2, method)(X)
if sparse.issparse(X_pred1):
X_pred1 = X_pred1.toarray()
X_pred2 = X_pred2.toarray()
assert_allclose(X_pred1, X_pred2,
err_msg="For %s sample_weight=None is not"
" equivalent to sample_weight=ones"
% name)
@ignore_warnings(category=(DeprecationWarning, FutureWarning, UserWarning))
def check_dtype_object(name, estimator_orig):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = pairwise_estimator_convert_X(rng.rand(40, 10), estimator_orig)
X = X.astype(object)
tags = _safe_tags(estimator_orig)
if tags['binary_only']:
y = (X[:, 0] * 2).astype(np.int)
else:
y = (X[:, 0] * 4).astype(np.int)
estimator = clone(estimator_orig)
y = enforce_estimator_tags_y(estimator, y)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
if 'string' not in tags['X_types']:
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string.* number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
else:
# Estimators supporting string will not call np.asarray to convert the
# data to numeric and therefore, the error will not be raised.
# Checking for each element dtype in the input array will be costly.
# Refer to #11401 for full discussion.
estimator.fit(X, y)
def check_complex_data(name, estimator_orig):
# check that estimators raise an exception on providing complex data
X = np.random.sample(10) + 1j * np.random.sample(10)
X = X.reshape(-1, 1)
y = np.random.sample(10) + 1j * np.random.sample(10)
estimator = clone(estimator_orig)
assert_raises_regex(ValueError, "Complex data not supported",
estimator.fit, X, y)
@ignore_warnings
def check_dict_unchanged(name, estimator_orig):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ['SpectralCoclustering']:
return
rnd = np.random.RandomState(0)
if name in ['RANSACRegressor']:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
X = pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert estimator.__dict__ == dict_before, (
'Estimator changes __dict__ during %s' % method)
def is_public_parameter(attr):
return not (attr.startswith('_') or attr.endswith('_'))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_dont_overwrite_parameters(name, estimator_orig):
# check that fit method only changes or sets private attributes
if hasattr(estimator_orig.__init__, "deprecated_original"):
# to not check deprecated classes
return
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(np.int)
if _safe_tags(estimator, 'binary_only'):
y[y == 2] = 1
y = enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [key for key in dict_after_fit.keys()
if is_public_parameter(key)]
attrs_added_by_fit = [key for key in public_keys_after_fit
if key not in dict_before_fit.keys()]
# check that fit doesn't add any public attribute
assert not attrs_added_by_fit, (
'Estimator adds public attribute(s) during'
' the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but %s added'
% ', '.join(attrs_added_by_fit))
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [key for key in public_keys_after_fit
if (dict_before_fit[key]
is not dict_after_fit[key])]
assert not attrs_changed_by_fit, (
'Estimator changes public attribute(s) during'
' the fit method. Estimators are only allowed'
' to change attributes started'
' or ended with _, but'
' %s changed'
% ', '.join(attrs_changed_by_fit))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_fit2d_predict1d(name, estimator_orig):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(np.int)
tags = _safe_tags(estimator_orig)
if tags['binary_only']:
y[y == 2] = 1
estimator = clone(estimator_orig)
y = enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
if tags["no_validation"]:
# FIXME this is a bit loose
return
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(ValueError, "Reshape your data",
getattr(estimator, method), X[0])
def _apply_on_subsets(func, X):
# apply function on the whole set and on mini batches
result_full = func(X)
n_features = X.shape[1]
result_by_batch = [func(batch.reshape(1, n_features))
for batch in X]
# func can output tuple (e.g. score_samples)
if type(result_full) == tuple:
result_full = result_full[0]
result_by_batch = list(map(lambda x: x[0], result_by_batch))
if sparse.issparse(result_full):
result_full = result_full.A
result_by_batch = [x.A for x in result_by_batch]
return np.ravel(result_full), np.ravel(result_by_batch)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_methods_subset_invariance(name, estimator_orig):
# check that method gives invariant results if applied
# on mini batches or the whole set
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(np.int)
if _safe_tags(estimator_orig, 'binary_only'):
y[y == 2] = 1
estimator = clone(estimator_orig)
y = enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"score_samples", "predict_proba"]:
msg = ("{method} of {name} is not invariant when applied "
"to a subset.").format(method=method, name=name)
# TODO remove cases when corrected
if (name, method) in [('NuSVC', 'decision_function'),
('SparsePCA', 'transform'),
('MiniBatchSparsePCA', 'transform'),
('DummyClassifier', 'predict'),
('BernoulliRBM', 'score_samples')]:
raise SkipTest(msg)
if hasattr(estimator, method):
result_full, result_by_batch = _apply_on_subsets(
getattr(estimator, method), X)
assert_allclose(result_full, result_by_batch,
atol=1e-7, err_msg=msg)
@ignore_warnings
def check_fit2d_1sample(name, estimator_orig):
# Check that fitting a 2d array with only one sample either works or
# returns an informative message. The error message should either mention
# the number of samples or the number of classes.
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
# min_cluster_size cannot be less than the data size for OPTICS.
if name == 'OPTICS':
estimator.set_params(min_samples=1)
msgs = ["1 sample", "n_samples = 1", "n_samples=1", "one sample",
"1 class", "one class"]
try:
estimator.fit(X, y)
except ValueError as e:
if all(msg not in repr(e) for msg in msgs):
raise e
@ignore_warnings
def check_fit2d_1feature(name, estimator_orig):
# check fitting a 2d array with only 1 feature either works or returns
# informative message
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
X = pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
# ensure two labels in subsample for RandomizedLogisticRegression
if name == 'RandomizedLogisticRegression':
estimator.sample_fraction = 1
# ensure non skipped trials for RANSACRegressor
if name == 'RANSACRegressor':
estimator.residual_threshold = 0.5
y = enforce_estimator_tags_y(estimator, y)
set_random_state(estimator, 1)
msgs = ["1 feature(s)", "n_features = 1", "n_features=1"]
try:
estimator.fit(X, y)
except ValueError as e:
if all(msg not in repr(e) for msg in msgs):
raise e
@ignore_warnings
def check_fit1d(name, estimator_orig):
# check fitting 1d X array raises a ValueError
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
estimator = clone(estimator_orig)
tags = _safe_tags(estimator)
if tags["no_validation"]:
# FIXME this is a bit loose
return
y = enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
assert_raises(ValueError, estimator.fit, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_general(name, transformer, readonly_memmap=False):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
if readonly_memmap:
X, y = create_memmap_backed_data([X, y])
_check_transformer(name, transformer, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_data_not_an_array(name, transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, transformer, this_X, this_y)
# try the same with some list
_check_transformer(name, transformer, X.tolist(), y.tolist())
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformers_unfitted(name, transformer):
X, y = _boston_subset()
transformer = clone(transformer)
with assert_raises((AttributeError, ValueError), msg="The unfitted "
"transformer {} does not raise an error when "
"transform is called. Perhaps use "
"check_is_fitted in transform.".format(name)):
transformer.transform(X)
def _check_transformer(name, transformer_orig, X, y):
n_samples, n_features = np.asarray(X).shape
transformer = clone(transformer_orig)
set_random_state(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert x_pred.shape[0] == n_samples
else:
# check for consistent n_samples
assert X_pred.shape[0] == n_samples
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if _safe_tags(transformer_orig, 'non_deterministic'):
msg = name + ' is non deterministic'
raise SkipTest(msg)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_allclose_dense_sparse(
x_pred, x_pred2, atol=1e-2,
err_msg="fit_transform and transform outcomes "
"not consistent in %s"
% transformer)
assert_allclose_dense_sparse(
x_pred, x_pred3, atol=1e-2,
err_msg="consecutive fit_transform outcomes "
"not consistent in %s"
% transformer)
else:
assert_allclose_dense_sparse(
X_pred, X_pred2,
err_msg="fit_transform and transform outcomes "
"not consistent in %s"
% transformer, atol=1e-2)
assert_allclose_dense_sparse(
X_pred, X_pred3, atol=1e-2,
err_msg="consecutive fit_transform outcomes "
"not consistent in %s"
% transformer)
assert _num_samples(X_pred2) == n_samples
assert _num_samples(X_pred3) == n_samples
# raises error on malformed input for transform
if hasattr(X, 'T') and not _safe_tags(transformer, "stateless"):
# If it's not an array, it does not have a 'T' property
with assert_raises(ValueError, msg="The transformer {} does "
"not raise an error when the number of "
"features in transform is different from"
" the number of features in "
"fit.".format(name)):
transformer.transform(X.T)
@ignore_warnings
def check_pipeline_consistency(name, estimator_orig):
if _safe_tags(estimator_orig, 'non_deterministic'):
msg = name + ' is non deterministic'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
X = pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)
estimator = clone(estimator_orig)
y = enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_allclose_dense_sparse(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, estimator_orig):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
X = pairwise_estimator_convert_X(X, estimator_orig)
if _safe_tags(estimator_orig, 'binary_only'):
y = np.arange(10) % 2
else:
y = np.arange(10) % 3
estimator = clone(estimator_orig)
y = enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
if args[0] == "self":
# if_delegate_has_method makes methods into functions
# with an explicit "self", so need to shift arguments
args = args[1:]
assert args[1] in ["y", "Y"], (
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, type(estimator).__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, estimator_orig):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_32 = pairwise_estimator_convert_X(X_train_32, estimator_orig)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
if _safe_tags(estimator_orig, 'binary_only'):
y[y == 2] = 1
y = enforce_estimator_tags_y(estimator_orig, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_empty_data_messages(name, estimator_orig):
e = clone(estimator_orig)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
with assert_raises(ValueError, msg="The estimator {} does not"
" raise an error when an empty data is used "
"to train. Perhaps use "
"check_array in train.".format(name)):
e.fit(X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = enforce_estimator_tags_y(e, np.array([1, 0, 1]))
msg = (r"0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* "
"is required.")
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_nan_inf(name, estimator_orig):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = pairwise_estimator_convert_X(rnd.uniform(size=(10, 3)),
estimator_orig)
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = enforce_estimator_tags_y(estimator_orig, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, estimator)
@ignore_warnings
def check_estimators_pickle(name, estimator_orig):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
X = pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)
tags = _safe_tags(estimator_orig)
# include NaN values when the estimator should deal with them
if tags['allow_nan']:
# set randomly 10 elements to np.nan
rng = np.random.RandomState(42)
mask = rng.choice(X.size, 10, replace=False)
X.reshape(-1)[mask] = np.nan
estimator = clone(estimator_orig)
y = enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
if estimator.__module__.startswith('sklearn.'):
assert b"version" in pickled_estimator
unpickled_estimator = pickle.loads(pickled_estimator)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_allclose_dense_sparse(result[method], unpickled_result)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_partial_fit_n_features(name, estimator_orig):
# check if number of features changes between calls to partial_fit.
if not hasattr(estimator_orig, 'partial_fit'):
return
estimator = clone(estimator_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
try:
if is_classifier(estimator):
classes = np.unique(y)
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
except NotImplementedError:
return
with assert_raises(ValueError,
msg="The estimator {} does not raise an"
" error when the number of features"
" changes between calls to "
"partial_fit.".format(name)):
estimator.partial_fit(X[:, :-1], y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_clustering(name, clusterer_orig, readonly_memmap=False):
clusterer = clone(clusterer_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
rng = np.random.RandomState(7)
X_noise = np.concatenate([X, rng.uniform(low=-3, high=3, size=(5, 2))])
if readonly_memmap:
X, y, X_noise = create_memmap_backed_data([X, y, X_noise])
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
if hasattr(clusterer, "n_clusters"):
clusterer.set_params(n_clusters=3)
set_random_state(clusterer)
if name == 'AffinityPropagation':
clusterer.set_params(preference=-100)
clusterer.set_params(max_iter=100)
# fit
clusterer.fit(X)
# with lists
clusterer.fit(X.tolist())
pred = clusterer.labels_
assert pred.shape == (n_samples,)
assert adjusted_rand_score(pred, y) > 0.4
if _safe_tags(clusterer, 'non_deterministic'):
return
set_random_state(clusterer)
with warnings.catch_warnings(record=True):
pred2 = clusterer.fit_predict(X)
assert_array_equal(pred, pred2)
# fit_predict(X) and labels_ should be of type int
assert pred.dtype in [np.dtype('int32'), np.dtype('int64')]
assert pred2.dtype in [np.dtype('int32'), np.dtype('int64')]
# Add noise to X to test the possible values of the labels
labels = clusterer.fit_predict(X_noise)
# There should be at least one sample in every cluster. Equivalently
# labels_ should contain all the consecutive values between its
# min and its max.
labels_sorted = np.unique(labels)
assert_array_equal(labels_sorted, np.arange(labels_sorted[0],
labels_sorted[-1] + 1))
# Labels are expected to start at 0 (no noise) or -1 (if noise)
assert labels_sorted[0] in [0, -1]
# Labels should be less than n_clusters - 1
if hasattr(clusterer, 'n_clusters'):
n_clusters = getattr(clusterer, 'n_clusters')
assert n_clusters - 1 >= labels_sorted[-1]
# else labels should be less than max(labels_) which is necessarily true
@ignore_warnings(category=DeprecationWarning)
def check_clusterer_compute_labels_predict(name, clusterer_orig):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = clone(clusterer_orig)
set_random_state(clusterer)
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
@ignore_warnings(category=DeprecationWarning)
def check_classifiers_one_label(name, classifier_orig):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
classifier = clone(classifier_orig)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, classifier_orig, readonly_memmap=False):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
if name in ['BernoulliNB', 'MultinomialNB', 'ComplementNB']:
X_m -= X_m.min()
X_b -= X_b.min()
if readonly_memmap:
X_m, y_m, X_b, y_b = create_memmap_backed_data([X_m, y_m, X_b, y_b])
problems = [(X_b, y_b)]
tags = _safe_tags(classifier_orig)
if not tags['binary_only']:
problems.append((X_m, y_m))
for (X, y) in problems:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = clone(classifier_orig)
X = pairwise_estimator_convert_X(X, classifier)
y = enforce_estimator_tags_y(classifier, y)
set_random_state(classifier)
# raises error on malformed input for fit
if not tags["no_validation"]:
with assert_raises(
ValueError,
msg="The classifier {} does not "
"raise an error when incorrect/malformed input "
"data for fit is passed. The number of training "
"examples is not the same as the number of labels. "
"Perhaps use check_X_y in fit.".format(name)):
classifier.fit(X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert hasattr(classifier, "classes_")
y_pred = classifier.predict(X)
assert y_pred.shape == (n_samples,)
# training set performance
if not tags['poor_score']:
assert accuracy_score(y, y_pred) > 0.83
# raises error on malformed input for predict
msg_pairwise = (
"The classifier {} does not raise an error when shape of X in "
" {} is not equal to (n_test_samples, n_training_samples)")
msg = ("The classifier {} does not raise an error when the number of "
"features in {} is different from the number of features in "
"fit.")
if not tags["no_validation"]:
if _is_pairwise(classifier):
with assert_raises(ValueError,
msg=msg_pairwise.format(name, "predict")):
classifier.predict(X.reshape(-1, 1))
else:
with assert_raises(ValueError,
msg=msg.format(name, "predict")):
classifier.predict(X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes == 2:
if not tags["multioutput_only"]:
assert decision.shape == (n_samples,)
else:
assert decision.shape == (n_samples, 1)
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
else:
assert decision.shape == (n_samples, n_classes)
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input for decision_function
if not tags["no_validation"]:
if _is_pairwise(classifier):
with assert_raises(ValueError, msg=msg_pairwise.format(
name, "decision_function")):
classifier.decision_function(X.reshape(-1, 1))
else:
with assert_raises(ValueError, msg=msg.format(
name, "decision_function")):
classifier.decision_function(X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert y_prob.shape == (n_samples, n_classes)
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
if not tags["no_validation"]:
# raises error on malformed input for predict_proba
if _is_pairwise(classifier_orig):
with assert_raises(ValueError, msg=msg_pairwise.format(
name, "predict_proba")):
classifier.predict_proba(X.reshape(-1, 1))
else:
with assert_raises(ValueError, msg=msg.format(
name, "predict_proba")):
classifier.predict_proba(X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
def check_outlier_corruption(num_outliers, expected_outliers, decision):
# Check for deviation from the precise given contamination level that may
# be due to ties in the anomaly scores.
if num_outliers < expected_outliers:
start = num_outliers
end = expected_outliers + 1
else:
start = expected_outliers
end = num_outliers + 1
# ensure that all values in the 'critical area' are tied,
# leading to the observed discrepancy between provided
# and actual contamination levels.
sorted_decision = np.sort(decision)
msg = ('The number of predicted outliers is not equal to the expected '
'number of outliers and this difference is not explained by the '
'number of ties in the decision_function values')
assert len(np.unique(sorted_decision[start:end])) == 1, msg
def check_outliers_train(name, estimator_orig, readonly_memmap=True):
n_samples = 300
X, _ = make_blobs(n_samples=n_samples, random_state=0)
X = shuffle(X, random_state=7)
if readonly_memmap:
X = create_memmap_backed_data(X)
n_samples, n_features = X.shape
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X)
# with lists
estimator.fit(X.tolist())
y_pred = estimator.predict(X)
assert y_pred.shape == (n_samples,)
assert y_pred.dtype.kind == 'i'
assert_array_equal(np.unique(y_pred), np.array([-1, 1]))
decision = estimator.decision_function(X)
scores = estimator.score_samples(X)
for output in [decision, scores]:
assert output.dtype == np.dtype('float')
assert output.shape == (n_samples,)
# raises error on malformed input for predict
assert_raises(ValueError, estimator.predict, X.T)
# decision_function agrees with predict
dec_pred = (decision >= 0).astype(np.int)
dec_pred[dec_pred == 0] = -1
assert_array_equal(dec_pred, y_pred)
# raises error on malformed input for decision_function
assert_raises(ValueError, estimator.decision_function, X.T)
# decision_function is a translation of score_samples
y_dec = scores - estimator.offset_
assert_allclose(y_dec, decision)
# raises error on malformed input for score_samples
assert_raises(ValueError, estimator.score_samples, X.T)
# contamination parameter (not for OneClassSVM which has the nu parameter)
if (hasattr(estimator, 'contamination')
and not hasattr(estimator, 'novelty')):
# proportion of outliers equal to contamination parameter when not
# set to 'auto'. This is true for the training set and cannot thus be
# checked as follows for estimators with a novelty parameter such as
# LocalOutlierFactor (tested in check_outliers_fit_predict)
expected_outliers = 30
contamination = expected_outliers / n_samples
estimator.set_params(contamination=contamination)
estimator.fit(X)
y_pred = estimator.predict(X)
num_outliers = np.sum(y_pred != 1)
# num_outliers should be equal to expected_outliers unless
# there are ties in the decision_function values. this can
# only be tested for estimators with a decision_function
# method, i.e. all estimators except LOF which is already
# excluded from this if branch.
if num_outliers != expected_outliers:
decision = estimator.decision_function(X)
check_outlier_corruption(num_outliers, expected_outliers, decision)
# raises error when contamination is a scalar and not in [0,1]
for contamination in [-0.5, 2.3]:
estimator.set_params(contamination=contamination)
assert_raises(ValueError, estimator.fit, X)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_fit_returns_self(name, estimator_orig,
readonly_memmap=False):
"""Check if self is returned when calling fit"""
if _safe_tags(estimator_orig, 'binary_only'):
n_centers = 2
else:
n_centers = 3
X, y = make_blobs(random_state=0, n_samples=9, centers=n_centers)
# some want non-negative input
X -= X.min()
X = pairwise_estimator_convert_X(X, estimator_orig)
estimator = clone(estimator_orig)
y = enforce_estimator_tags_y(estimator, y)
if readonly_memmap:
X, y = create_memmap_backed_data([X, y])
set_random_state(estimator)
assert estimator.fit(X, y) is estimator
@ignore_warnings
def check_estimators_unfitted(name, estimator_orig):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise a NotFittedError.
"""
# Common test for Regressors, Classifiers and Outlier detection estimators
X, y = _boston_subset()
estimator = clone(estimator_orig)
for method in ('decision_function', 'predict', 'predict_proba',
'predict_log_proba'):
if hasattr(estimator, method):
assert_raises(NotFittedError, getattr(estimator, method), X)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_supervised_y_2d(name, estimator_orig):
tags = _safe_tags(estimator_orig)
if tags['multioutput_only']:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = pairwise_estimator_convert_X(rnd.uniform(size=(10, 3)), estimator_orig)
if tags['binary_only']:
y = np.arange(10) % 2
else:
y = np.arange(10) % 3
y = enforce_estimator_tags_y(estimator_orig, y)
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if not tags['multioutput']:
# check that we warned if we don't support multi-output
assert len(w) > 0, msg
assert "DataConversionWarning('A column-vector y" \
" was passed when a 1d array was expected" in msg
assert_allclose(y_pred.ravel(), y_pred_2d.ravel())
@ignore_warnings
def check_classifiers_predictions(X, y, name, classifier_orig):
classes = np.unique(y)
classifier = clone(classifier_orig)
if name == 'BernoulliNB':
X = X > X.mean()
set_random_state(classifier)
classifier.fit(X, y)
y_pred = classifier.predict(X)
if hasattr(classifier, "decision_function"):
decision = classifier.decision_function(X)
assert isinstance(decision, np.ndarray)
if len(classes) == 2:
dec_pred = (decision.ravel() > 0).astype(np.int)
dec_exp = classifier.classes_[dec_pred]
assert_array_equal(dec_exp, y_pred,
err_msg="decision_function does not match "
"classifier for %r: expected '%s', got '%s'" %
(classifier, ", ".join(map(str, dec_exp)),
", ".join(map(str, y_pred))))
elif getattr(classifier, 'decision_function_shape', 'ovr') == 'ovr':
decision_y = np.argmax(decision, axis=1).astype(int)
y_exp = classifier.classes_[decision_y]
assert_array_equal(y_exp, y_pred,
err_msg="decision_function does not match "
"classifier for %r: expected '%s', got '%s'" %
(classifier, ", ".join(map(str, y_exp)),
", ".join(map(str, y_pred))))
# training set performance
if name != "ComplementNB":
# This is a pathological data set for ComplementNB.
# For some specific cases 'ComplementNB' predicts less classes
# than expected
assert_array_equal(np.unique(y), np.unique(y_pred))
assert_array_equal(classes, classifier.classes_,
err_msg="Unexpected classes_ attribute for %r: "
"expected '%s', got '%s'" %
(classifier, ", ".join(map(str, classes)),
", ".join(map(str, classifier.classes_))))
def choose_check_classifiers_labels(name, y, y_names):
return y if name in ["LabelPropagation", "LabelSpreading"] else y_names
def check_classifiers_classes(name, classifier_orig):
X_multiclass, y_multiclass = make_blobs(n_samples=30, random_state=0,
cluster_std=0.1)
X_multiclass, y_multiclass = shuffle(X_multiclass, y_multiclass,
random_state=7)
X_multiclass = StandardScaler().fit_transform(X_multiclass)
# We need to make sure that we have non negative data, for things
# like NMF
X_multiclass -= X_multiclass.min() - .1
X_binary = X_multiclass[y_multiclass != 2]
y_binary = y_multiclass[y_multiclass != 2]
X_multiclass = pairwise_estimator_convert_X(X_multiclass, classifier_orig)
X_binary = pairwise_estimator_convert_X(X_binary, classifier_orig)
labels_multiclass = ["one", "two", "three"]
labels_binary = ["one", "two"]
y_names_multiclass = np.take(labels_multiclass, y_multiclass)
y_names_binary = np.take(labels_binary, y_binary)
problems = [(X_binary, y_binary, y_names_binary)]
if not _safe_tags(classifier_orig, 'binary_only'):
problems.append((X_multiclass, y_multiclass, y_names_multiclass))
for X, y, y_names in problems:
for y_names_i in [y_names, y_names.astype('O')]:
y_ = choose_check_classifiers_labels(name, y, y_names_i)
check_classifiers_predictions(X, y_, name, classifier_orig)
labels_binary = [-1, 1]
y_names_binary = np.take(labels_binary, y_binary)
y_binary = choose_check_classifiers_labels(name, y_binary, y_names_binary)
check_classifiers_predictions(X_binary, y_binary, name, classifier_orig)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_regressors_int(name, regressor_orig):
X, _ = _boston_subset()
X = pairwise_estimator_convert_X(X[:50], regressor_orig)
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = enforce_estimator_tags_y(regressor_orig, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = clone(regressor_orig)
regressor_2 = clone(regressor_orig)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_regressors_train(name, regressor_orig, readonly_memmap=False):
X, y = _boston_subset()
X = pairwise_estimator_convert_X(X, regressor_orig)
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
regressor = clone(regressor_orig)
y = enforce_estimator_tags_y(regressor, y)
if name in CROSS_DECOMPOSITION:
rnd = np.random.RandomState(0)
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
if readonly_memmap:
X, y, y_ = create_memmap_backed_data([X, y, y_])
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
with assert_raises(ValueError, msg="The classifier {} does not"
" raise an error when incorrect/malformed input "
"data for fit is passed. The number of training "
"examples is not the same as the number of "
"labels. Perhaps use check_X_y in fit.".format(name)):
regressor.fit(X, y[:-1])
# fit
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert y_pred.shape == y_.shape
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if not _safe_tags(regressor, "poor_score"):
assert regressor.score(X, y_) > 0.5
@ignore_warnings
def check_regressors_no_decision_function(name, regressor_orig):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
regressor = clone(regressor_orig)
y = enforce_estimator_tags_y(regressor, X[:, 0])
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_classifiers(name, classifier_orig):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest("Not testing NuSVC class weight as it is ignored.")
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
if _safe_tags(classifier_orig, 'binary_only'):
problems = [2]
else:
problems = [2, 3]
for n_centers in problems:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# can't use gram_if_pairwise() here, setting up gram matrix manually
if _is_pairwise(classifier_orig):
X_test = rbf_kernel(X_test, X_train)
X_train = rbf_kernel(X_train, X_train)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
classifier = clone(classifier_orig).set_params(
class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
if hasattr(classifier, "n_iter_no_change"):
classifier.set_params(n_iter_no_change=20)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# XXX: Generally can use 0.89 here. On Windows, LinearSVC gets
# 0.88 (Issue #9111)
assert np.mean(y_pred == 0) > 0.87
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_balanced_classifiers(name, classifier_orig, X_train,
y_train, X_test, y_test, weights):
classifier = clone(classifier_orig)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert (f1_score(y_test, y_pred_balanced, average='weighted') >
f1_score(y_test, y_pred, average='weighted'))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
# this is run on classes, not instances, though this should be changed
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
if hasattr(classifier, 'cv'):
classifier.set_params(cv=3)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_allclose(coef_balanced, coef_manual,
err_msg="Classifier %s is not computing"
" class_weight=balanced properly."
% name)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_overwrite_params(name, estimator_orig):
if _safe_tags(estimator_orig, 'binary_only'):
n_centers = 2
else:
n_centers = 3
X, y = make_blobs(random_state=0, n_samples=9, centers=n_centers)
# some want non-negative input
X -= X.min()
X = pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)
estimator = clone(estimator_orig)
y = enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert joblib.hash(new_value) == joblib.hash(original_value), (
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_no_attributes_set_in_init(name, estimator):
"""Check setting during init. """
if hasattr(type(estimator).__init__, "deprecated_original"):
return
init_params = _get_args(type(estimator).__init__)
if IS_PYPY:
# __init__ signature has additional objects in PyPy
for key in ['obj']:
if key in init_params:
init_params.remove(key)
parents_init_params = [param for params_parent in
(_get_args(parent) for parent in
type(estimator).__mro__)
for param in params_parent]
# Test for no setting apart from parameters during init
invalid_attr = (set(vars(estimator)) - set(init_params)
- set(parents_init_params))
assert not invalid_attr, (
"Estimator %s should not set any attribute apart"
" from parameters during init. Found attributes %s."
% (name, sorted(invalid_attr)))
# Ensure that each parameter is set in init
invalid_attr = set(init_params) - set(vars(estimator)) - {"self"}
assert not invalid_attr, (
"Estimator %s should store all parameters"
" as an attribute during init. Did not find "
"attributes %s."
% (name, sorted(invalid_attr)))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sparsify_coefficients(name, estimator_orig):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = clone(estimator_orig)
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert sparse.issparse(est.coef_)
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert sparse.issparse(est.coef_)
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
@ignore_warnings(category=DeprecationWarning)
def check_classifier_data_not_an_array(name, estimator_orig):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
X = pairwise_estimator_convert_X(X, estimator_orig)
y = [1, 1, 1, 2, 2, 2]
y = enforce_estimator_tags_y(estimator_orig, y)
check_estimators_data_not_an_array(name, estimator_orig, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_regressor_data_not_an_array(name, estimator_orig):
X, y = _boston_subset(n_samples=50)
X = pairwise_estimator_convert_X(X, estimator_orig)
y = enforce_estimator_tags_y(estimator_orig, y)
check_estimators_data_not_an_array(name, estimator_orig, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_data_not_an_array(name, estimator_orig, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest("Skipping check_estimators_data_not_an_array "
"for cross decomposition module as estimators "
"are not deterministic.")
# separate estimators to control random seeds
estimator_1 = clone(estimator_orig)
estimator_2 = clone(estimator_orig)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
def check_parameters_default_constructible(name, Estimator):
# this check works on classes, not instances
# test default-constructibility
# get rid of deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
required_parameters = getattr(Estimator, "_required_parameters", [])
if required_parameters:
if required_parameters in (["base_estimator"], ["estimator"]):
if issubclass(Estimator, RegressorMixin):
estimator = Estimator(Ridge())
else:
estimator = Estimator(LinearDiscriminantAnalysis())
else:
raise SkipTest("Can't instantiate estimator {} which"
" requires parameters {}".format(
name, required_parameters))
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert estimator.set_params() is estimator
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self' and
p.kind != p.VAR_KEYWORD and
p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if required_parameters == ["estimator"]:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert init_param.default != init_param.empty, (
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
if type(init_param.default) is type:
assert init_param.default in [np.float64, np.int64]
else:
assert (type(init_param.default) in
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, joblib.Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert init_param.default is None
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
if is_scalar_nan(param_value):
# Allows to set default parameters to np.nan
assert param_value is init_param.default, init_param.name
else:
assert param_value == init_param.default, init_param.name
def enforce_estimator_tags_y(estimator, y):
# Estimators with a `requires_positive_y` tag only accept strictly positive
# data
if _safe_tags(estimator, "requires_positive_y"):
# Create strictly positive y. The minimal increment above 0 is 1, as
# y could be of integer dtype.
y += 1 + abs(y.min())
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if _safe_tags(estimator, "multioutput_only"):
return np.reshape(y, (-1, 1))
return y
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_non_transformer_estimators_n_iter(name, estimator_orig):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV', 'LinearSVC',
'LogisticRegression']
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == 'LassoLars':
estimator = clone(estimator_orig).set_params(alpha=0.)
else:
estimator = clone(estimator_orig)
if hasattr(estimator, 'max_iter'):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = enforce_estimator_tags_y(estimator, y_)
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert estimator.n_iter_ >= 1
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_n_iter(name, estimator_orig):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = clone(estimator_orig)
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert iter_ >= 1
else:
assert estimator.n_iter_ >= 1
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_get_params_invariance(name, estimator_orig):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
e = clone(estimator_orig)
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert all(item in deep_params.items() for item in
shallow_params.items())
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_set_params(name, estimator_orig):
# Check that get_params() returns the same thing
# before and after set_params() with some fuzz
estimator = clone(estimator_orig)
orig_params = estimator.get_params(deep=False)
msg = ("get_params result does not match what was passed to set_params")
estimator.set_params(**orig_params)
curr_params = estimator.get_params(deep=False)
assert set(orig_params.keys()) == set(curr_params.keys()), msg
for k, v in curr_params.items():
assert orig_params[k] is v, msg
# some fuzz values
test_values = [-np.inf, np.inf, None]
test_params = deepcopy(orig_params)
for param_name in orig_params.keys():
default_value = orig_params[param_name]
for value in test_values:
test_params[param_name] = value
try:
estimator.set_params(**test_params)
except (TypeError, ValueError) as e:
e_type = e.__class__.__name__
# Exception occurred, possibly parameter validation
warnings.warn("{0} occurred during set_params of param {1} on "
"{2}. It is recommended to delay parameter "
"validation until fit.".format(e_type,
param_name,
name))
change_warning_msg = "Estimator's parameters changed after " \
"set_params raised {}".format(e_type)
params_before_exception = curr_params
curr_params = estimator.get_params(deep=False)
try:
assert (set(params_before_exception.keys()) ==
set(curr_params.keys()))
for k, v in curr_params.items():
assert params_before_exception[k] is v
except AssertionError:
warnings.warn(change_warning_msg)
else:
curr_params = estimator.get_params(deep=False)
assert (set(test_params.keys()) ==
set(curr_params.keys())), msg
for k, v in curr_params.items():
assert test_params[k] is v, msg
test_params[param_name] = default_value
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_classifiers_regression_target(name, estimator_orig):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = clone(estimator_orig)
msg = 'Unknown label type: '
if not _safe_tags(e, "no_validation"):
assert_raises_regex(ValueError, msg, e.fit, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_decision_proba_consistency(name, estimator_orig):
# Check whether an estimator having both decision_function and
# predict_proba methods has outputs with perfect rank correlation.
centers = [(2, 2), (4, 4)]
X, y = make_blobs(n_samples=100, random_state=0, n_features=4,
centers=centers, cluster_std=1.0, shuffle=True)
X_test = np.random.randn(20, 2) + 4
estimator = clone(estimator_orig)
if (hasattr(estimator, "decision_function") and
hasattr(estimator, "predict_proba")):
estimator.fit(X, y)
# Since the link function from decision_function() to predict_proba()
# is sometimes not precise enough (typically expit), we round to the
# 10th decimal to avoid numerical issues.
a = estimator.predict_proba(X_test)[:, 1].round(decimals=10)
b = estimator.decision_function(X_test).round(decimals=10)
assert_array_equal(rankdata(a), rankdata(b))
def check_outliers_fit_predict(name, estimator_orig):
# Check fit_predict for outlier detectors.
n_samples = 300
X, _ = make_blobs(n_samples=n_samples, random_state=0)
X = shuffle(X, random_state=7)
n_samples, n_features = X.shape
estimator = clone(estimator_orig)
set_random_state(estimator)
y_pred = estimator.fit_predict(X)
assert y_pred.shape == (n_samples,)
assert y_pred.dtype.kind == 'i'
assert_array_equal(np.unique(y_pred), np.array([-1, 1]))
# check fit_predict = fit.predict when the estimator has both a predict and
# a fit_predict method. recall that it is already assumed here that the
# estimator has a fit_predict method
if hasattr(estimator, 'predict'):
y_pred_2 = estimator.fit(X).predict(X)
assert_array_equal(y_pred, y_pred_2)
if hasattr(estimator, "contamination"):
# proportion of outliers equal to contamination parameter when not
# set to 'auto'
expected_outliers = 30
contamination = float(expected_outliers)/n_samples
estimator.set_params(contamination=contamination)
y_pred = estimator.fit_predict(X)
num_outliers = np.sum(y_pred != 1)
# num_outliers should be equal to expected_outliers unless
# there are ties in the decision_function values. this can
# only be tested for estimators with a decision_function
# method
if (num_outliers != expected_outliers and
hasattr(estimator, 'decision_function')):
decision = estimator.decision_function(X)
check_outlier_corruption(num_outliers, expected_outliers, decision)
# raises error when contamination is a scalar and not in [0,1]
for contamination in [-0.5, 2.3]:
estimator.set_params(contamination=contamination)
assert_raises(ValueError, estimator.fit_predict, X)
def check_fit_idempotent(name, estimator_orig):
# Check that est.fit(X) is the same as est.fit(X).fit(X). Ideally we would
# check that the estimated parameters during training (e.g. coefs_) are
# the same, but having a universal comparison function for those
# attributes is difficult and full of edge cases. So instead we check that
# predict(), predict_proba(), decision_function() and transform() return
# the same results.
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
if 'warm_start' in estimator.get_params().keys():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
X = pairwise_estimator_convert_X(X, estimator)
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = enforce_estimator_tags_y(estimator, y)
train, test = next(ShuffleSplit(test_size=.2, random_state=rng).split(X))
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
# Fit for the first time
estimator.fit(X_train, y_train)
result = {method: getattr(estimator, method)(X_test)
for method in check_methods
if hasattr(estimator, method)}
# Fit again
set_random_state(estimator)
estimator.fit(X_train, y_train)
for method in check_methods:
if hasattr(estimator, method):
new_result = getattr(estimator, method)(X_test)
if np.issubdtype(new_result.dtype, np.floating):
tol = 2*np.finfo(new_result.dtype).eps
else:
tol = 2*np.finfo(np.float64).eps
assert_allclose_dense_sparse(
result[method], new_result,
atol=max(tol, 1e-9), rtol=max(tol, 1e-7),
err_msg="Idempotency check failed for method {}".format(method)
)
| bsd-3-clause |
ioam/geoviews | geoviews/plotting/__init__.py | 1 | 1407 | from holoviews import Store, extension
from holoviews.core.options import Compositor
from holoviews.operation.element import contours
from ..element import Contours, Polygons
if hasattr(extension, 'register_backend_callback'):
def _load_bokeh():
from . import bokeh # noqa
extension.register_backend_callback('bokeh', _load_bokeh)
def _load_mpl():
from . import mpl # noqa
extension.register_backend_callback('matplotlib', _load_mpl)
backends = Store.loaded_backends()
if 'bokeh' in backends:
_load_bokeh()
if 'matplotlib' in backends:
_load_mpl()
else:
try:
from . import mpl # noqa
except ImportError:
pass
try:
from . import bokeh # noqa
except ImportError:
pass
Compositor.register(Compositor("LineContours", contours, None,
'data', transfer_options=True,
transfer_parameters=True,
output_type=Contours,
backends=['bokeh', 'matplotlib']))
Compositor.register(Compositor("FilledContours", contours.instance(filled=True),
None, 'data', transfer_options=True,
transfer_parameters=True,
output_type=Polygons,
backends=['bokeh', 'matplotlib']))
| bsd-3-clause |
backtou/longlab | gr-utils/src/python/plot_data.py | 17 | 5768 | #
# Copyright 2007,2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class plot_data:
def __init__(self, datatype, filenames, options):
self.hfile = list()
self.legend_text = list()
for f in filenames:
self.hfile.append(open(f, "r"))
self.legend_text.append(f)
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = datatype
self.sizeof_data = datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 9), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.40, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_f.get_xlim()
self.manager = get_current_fig_manager()
connect('key_press_event', self.click)
show()
def get_data(self, hfile):
self.text_file_pos.set_text("File Position: %d" % (hfile.tell()//self.sizeof_data))
try:
f = scipy.fromfile(hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
else:
self.f = scipy.array(f)
self.time = scipy.array([i*(1/self.sample_rate) for i in range(len(self.f))])
def make_plots(self):
self.sp_f = self.fig.add_subplot(2,1,1, position=[0.075, 0.2, 0.875, 0.6])
self.sp_f.set_title(("Amplitude"), fontsize=self.title_font_size, fontweight="bold")
self.sp_f.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_f.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
self.plot_f = list()
maxval = -1e12
minval = 1e12
for hf in self.hfile:
# if specified on the command-line, set file pointer
hf.seek(self.sizeof_data*self.start, 1)
self.get_data(hf)
# Subplot for real and imaginary parts of signal
self.plot_f += plot(self.time, self.f, 'o-')
maxval = max(maxval, self.f.max())
minval = min(minval, self.f.min())
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
self.leg = self.sp_f.legend(self.plot_f, self.legend_text)
draw()
def update_plots(self):
maxval = -1e12
minval = 1e12
for hf,p in zip(self.hfile,self.plot_f):
self.get_data(hf)
p.set_data([self.time, self.f])
maxval = max(maxval, self.f.max())
minval = min(minval, self.f.min())
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.update_plots()
def step_backward(self):
for hf in self.hfile:
# Step back in file position
if(hf.tell() >= 2*self.sizeof_data*self.block_length ):
hf.seek(-2*self.sizeof_data*self.block_length, 1)
else:
hf.seek(-hf.tell(),1)
self.update_plots()
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
| gpl-3.0 |
alexeyum/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 161 | 1380 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
fengzhyuan/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
yyjiang/scikit-learn | sklearn/tests/test_dummy.py | 129 | 17774 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
PatrickChrist/scikit-learn | examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
bhargav/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 17 | 3115 | from nose.tools import assert_equal
import numpy as np
from sklearn.utils import testing
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have received X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have received X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
def test_kw_arg():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
# Test that rounding is correct
testing.assert_array_equal(F.transform(X),
np.around(X, decimals=3))
def test_kw_arg_update():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args['decimals'] = 1
# Test that rounding is correct
testing.assert_array_equal(F.transform(X),
np.around(X, decimals=1))
def test_kw_arg_reset():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args = dict(decimals=1)
# Test that rounding is correct
testing.assert_array_equal(F.transform(X),
np.around(X, decimals=1))
| bsd-3-clause |
sonnyhu/scikit-learn | sklearn/feature_selection/variance_threshold.py | 123 | 2572 | # Author: Lars Buitinck
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
cheeseywhiz/cheeseywhiz | math/Taylor Series/main.py | 1 | 1884 | #!/usr/bin/python3
import math
import sys
import inspect
import matplotlib.pyplot as plt
import differentiable
xmin, xmax = -2, 4
ymin, ymax = -4, 4
h = 1 / 1000
dx = differentiable.equations()
def taylor(f, center, order):
"""Plot a Taylor polynomial and its parent function
taylor(differentiable equation f, float center, int order)
"""
coef = [f(center, i) / math.factorial(i) for i in range(order + 1)]
domain = [n * h for n in range(int(xmin / h), int(xmax / h) + 1)
if differentiable.in_domain(f, n * h)]
original = list(zip(*[(x, f(x)) for x in domain if ymin <= f(x) <= ymax]))
dec = differentiable.evaluate_coefficients
approximation = list(zip(*[(x, dec(coef, x - center))
for x in domain
if ymin <= dec(coef, x - center) <= ymax]))
plt.clf()
# graph will be at least as big as the specified domain and range
plt.plot([xmin, xmax], [ymin, ymax], 'w')
plt.plot(original[0], original[1], 'b')
plt.plot(approximation[0], approximation[1], 'r')
plt.show()
if __name__ == '__main__':
def share_element(x, y):
for elem in x:
if elem in y:
return True
return False
help_args = ['help', '-h', '-help', '/?']
if share_element(sys.argv, help_args) == 1 or len(sys.argv) == 1:
print(taylor.__doc__)
print("""'main.py f center order' executes taylor(f,center,order)
Or run 'main.py taylor(f,center,order)'\n""")
print([pair[0]
for pair in inspect.getmembers(dx)
if isinstance(pair[1], type(dx.sin))])
sys.exit()
if len(sys.argv) == 2:
eval(sys.argv[1])
sys.exit()
if len(sys.argv) > 2:
eval('taylor(dx.' + sys.argv[1] + ',' +
sys.argv[2] + ',' + sys.argv[3] + ')')
sys.exit()
| mit |
AstroVPK/kali | python/kali/util/mpl_settings.py | 2 | 4994 | # -*- coding: utf-8 -*-
import matplotlib as mpl
def set_plot_params(fontfamily='serif', fontstyle='normal', fontvariant='normal', fontweight='normal',
fontstretch='normal', fontsize=20, useTex='False'):
# make ticks longer and thicker
mpl.rcParams['xtick.major.size'] = 8
mpl.rcParams['xtick.minor.size'] = 6
mpl.rcParams['xtick.major.width'] = 2
mpl.rcParams['xtick.minor.width'] = 2
mpl.rcParams['ytick.major.size'] = 8
mpl.rcParams['ytick.minor.size'] = 6
mpl.rcParams['ytick.major.width'] = 2
mpl.rcParams['ytick.minor.width'] = 2
# make border thicker
mpl.rcParams['axes.linewidth'] = 2
# make plotted lines thicker
mpl.rcParams['lines.linewidth'] = 2
# make fonts bigger
mpl.rcParams['xtick.labelsize'] = fontsize
mpl.rcParams['ytick.labelsize'] = fontsize
mpl.rcParams['legend.fontsize'] = fontsize
mpl.rcParams['axes.titlesize'] = fontsize
mpl.rcParams['axes.labelsize'] = fontsize
# save figure settings
mpl.rcParams['savefig.bbox'] = 'tight'
# set font properties
mpl.rcParams['font.family'] = fontfamily
mpl.rcParams['font.style'] = fontstyle # 'normal', 'italic','oblique'
mpl.rcParams['font.variant'] = fontvariant # 'normal', 'small-caps'
# 'light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black'
mpl.rcParams['font.weight'] = fontweight
# ‘ultra-condensed’, ‘extra-condensed’, ‘condensed’, ‘semi-condensed’, ‘normal’, ‘semi-expanded’,
# ‘expanded’, ‘extra-expanded’, ‘ultra-expanded’
mpl.rcParams['font.stretch'] = fontstretch
mpl.rcParams[
'font.size'] = fontsize # ['xx-small', 'x-small', 'small', 'medium', 'large','x-large', 'xx-large']
mpl.rcParams['font.serif'] = ['Times', 'Times New Roman', 'Palatino', 'Bitstream Vera Serif',
'New Century Schoolbook', 'Century Schoolbook L', 'Utopia', 'ITC Bookman',
'Bookman', 'Nimbus Roman No9 L', 'Charter', 'serif']
mpl.rcParams['font.sans-serif'] = ['Bitstream Vera Sans', 'Lucida Grande', 'Verdana', 'Geneva', 'Lucid',
'Arial', 'Helvetica', 'Avant Garde', 'sans-serif']
mpl.rcParams['font.cursive'] = ['Apple Chancery', 'Textile', 'Zapf Chancery', 'Sand', 'cursive']
mpl.rcParams['font.fantasy'] = ['Comic Sans MS', 'Chicago', 'Charcoal', 'Impact', 'Western', 'fantasy']
mpl.rcParams['font.monospace'] = ['Bitstream Vera Sans Mono', 'Andale Mono', 'Nimbus Mono L',
'Courier New', 'Courier', 'Fixed', 'Terminal', 'monospace']
mpl.rcParams['text.usetex'] = useTex
# set math mode font properties
mpl.rcParams['mathtext.cal'] = 'cursive'
mpl.rcParams['mathtext.rm'] = 'serif'
mpl.rcParams['mathtext.tt'] = 'monospace'
mpl.rcParams['mathtext.it'] = 'serif:italic'
mpl.rcParams['mathtext.bf'] = 'serif:bold'
mpl.rcParams['mathtext.sf'] = 'sans'
mpl.rcParams['mathtext.fontset'] = 'cm' # Should be 'cm' (Computer Modern), 'stix','stixsans' or 'custom'
mpl.rcParams['mathtext.fallback_to_cm'] = 'True' # When True, use symbols from the Computer Modern fonts
# when a symbol can not be found in one of the custom math fonts.
mpl.rcParams['mathtext.default'] = 'rm' # The default font to use for math. Can be any of the LaTeX font
# names, including the special name "regular" for the same font used in regular text.
mpl.rcParams['pdf.fonttype'] = 42 # Force matplotlib to use Type42 (a.k.a. TrueType) fonts for .pdf
mpl.rcParams['ps.fonttype'] = 42 # Force matplotlib to use Type42 (a.k.a. TrueType) fonts for .eps
plot_params = dict()
plot_params['goldenRatio'] = 1.61803398875
plot_params['fhgt'] = 10.0
plot_params['fwid'] = plot_params['fhgt']*plot_params['goldenRatio']
plot_params['dpi'] = 300
plot_params['AnnotateXXLarge'] = 72
plot_params['AnnotateXLarge'] = 48
plot_params['AnnotateLarge'] = 32
plot_params['AnnotateMedium'] = 28
plot_params['AnnotateSmall'] = 24
plot_params['AnnotateXSmall'] = 20
plot_params['AnnotateXXSmall'] = 16
plot_params['LegendLarge'] = 24
plot_params['LegendMedium'] = 20
plot_params['LegendSmall'] = 16
plot_params['LegendXSmall'] = 12
plot_params['LegendXXSmall'] = 10
plot_params['LegendXXXSmall'] = 8
plot_params['LabelXLarge'] = 32
plot_params['LabelLarge'] = 28
plot_params['LabelMedium'] = 24
plot_params['LabelSmall'] = 20
plot_params['LabelXSmall'] = 16
plot_params['AxisXXLarge'] = 32
plot_params['AxisXLarge'] = 28
plot_params['AxisLarge'] = 24
plot_params['AxisMedium'] = 20
plot_params['AxisSmall'] = 16
plot_params['AxisXSmall'] = 12
plot_params['AxisXXSmall'] = 8
plot_params['normalFontSize'] = 32
plot_params['smallFontSize'] = 24
plot_params['footnoteFontSize'] = 20
plot_params['scriptFontSize'] = 16
plot_params['tinyFontSize'] = 12
| gpl-2.0 |
mlyundin/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
lsst-dm/great3-public | inputs/galdata/combine_image_info.py | 2 | 4085 | # Copyright (c) 2014, the GREAT3 executive committee (http://www.great3challenge.info/?q=contacts)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This script takes the outputs of `training_galaxy_props_real.py` for 6 different PSFs defined in
`run_props.py`, and combines them into a single file that is used by the GREAT3 simulation scripts
to select galaxies for which the use of real galaxy images is not problematic in some way.
"""
import pyfits
import numpy as np
import matplotlib.pyplot as plt
import os
# Define filenames, etc.
property_files = ['real_galaxy_catalog_23.5_real_props_Euclid_0.05.fits',
'real_galaxy_catalog_23.5_real_props_Euclid_0.10.fits',
'real_galaxy_catalog_23.5_real_props_Kolm_0.5_0.2.fits',
'real_galaxy_catalog_23.5_real_props_Kolm_0.65_0.2.fits',
'real_galaxy_catalog_23.5_real_props_Kolm_0.8_0.2.fits',
'real_galaxy_catalog_23.5_real_props_Kolm_0.95_0.2.fits']
sn_index = 3 # index of file on list to use to get S/N in original image: should be 2-5, since did
# not calculate for 0-1. (It's the original image, so we only really needed to do it
# once.)
exclude_file = []
n_files = len(property_files)
out_dir = './'
out_filename = 'real_galaxy_image_selection_info.fits'
# Read in the list of properties from `training_galaxy_props_real.py` for all the different options
# for PSF and pixel scale.
props_list = []
for filename in property_files:
props_cat = pyfits.getdata(filename)
n_gal = len(props_cat)
print 'Read in properties from ',filename,' for ',n_gal
props_list.append(props_cat)
# Info to save for all galaxies:
sn_ellip_gauss = props_list[sn_index]['sn_ellip_gauss']
min_var_white = np.zeros((n_gal,n_files-len(exclude_file)))
i_file_use = 0
for i_file in range(n_files):
if i_file not in exclude_file:
min_var_white[:,i_file_use] = props_list[i_file]['min_var_white']
i_file_use += 1
# Save results to file.
tbhdu = pyfits.new_table(pyfits.ColDefs([pyfits.Column(name='sn_ellip_gauss',
format='D',
array=sn_ellip_gauss),
pyfits.Column(name='min_var_white',
format='6D',
array=min_var_white)]
))
# Write outputs.
outfile = os.path.join(out_dir, out_filename)
print "Writing to file ",outfile
tbhdu.writeto(outfile, clobber=True)
| bsd-3-clause |
huzq/scikit-learn | sklearn/tests/test_build.py | 17 | 1175 | import os
import pytest
import textwrap
from sklearn import __version__
from sklearn.utils._openmp_helpers import _openmp_parallelism_enabled
def test_openmp_parallelism_enabled():
# Check that sklearn is built with OpenMP-based parallelism enabled.
# This test can be skipped by setting the environment variable
# ``SKLEARN_SKIP_OPENMP_TEST``.
if os.getenv("SKLEARN_SKIP_OPENMP_TEST"):
pytest.skip("test explicitly skipped (SKLEARN_SKIP_OPENMP_TEST)")
base_url = "dev" if __version__.endswith(".dev0") else "stable"
err_msg = textwrap.dedent(
"""
This test fails because scikit-learn has been built without OpenMP.
This is not recommended since some estimators will run in sequential
mode instead of leveraging thread-based parallelism.
You can find instructions to build scikit-learn with OpenMP at this
address:
https://scikit-learn.org/{}/developers/advanced_installation.html
You can skip this test by setting the environment variable
SKLEARN_SKIP_OPENMP_TEST to any value.
""").format(base_url)
assert _openmp_parallelism_enabled(), err_msg
| bsd-3-clause |
okadate/romspy | romspy/get_wind.py | 1 | 8150 | # coding: utf-8
# (c) 2015-11-18 Teruhisa Okada
import urllib
import urllib2
import datetime
from bs4 import BeautifulSoup
import re
def get_mp_wind(outdir, year=2012, stations=None, items=['A','B','C']):
url_tmp = 'http://222.158.204.199/obweb/data/c1/c1_csv_output.aspx?dataid={0:03d}&soutitypeid={1}&from={2}&to={3}'
outfile_tmp = outdir + 'mp_{0:03d}_{1}_{2}_{3}.csv'
if stations is None:
stations = [i+1 for i in range(13)]
for station in stations:
print 'downloading mp sta.{}'.format(station)
#d_start = datetime.datetime(year, 1, 1, 0)
#d_end = datetime.datetime(year, 12, 31, 0)
d_start = datetime.date(year-1, 12, 31)
d_end = datetime.date(year+1, 1, 1)
s_start = d_start.strftime('%Y%m%d')
s_end = d_end.strftime('%Y%m%d')
for item in items:
url = url_tmp.format(station, item, s_start, s_end)
outfile = outfile_tmp.format(station, item, s_start, s_end)
urllib.urlretrieve(url, outfile)
def get_jma_wind(outdir, year=2012, stations=None):
outfile = outdir + 'jma_{}_{}_{}.csv'
url = {}
url_tmp = 'http://www.data.jma.go.jp/obd/stats/etrn/view/hourly_{}.php?prec_no={}&block_no={}&year=%d&month=%d&day=%d&view='
url['osaka'] = url_tmp.format('s1', 62, 47772)
#url['hirakata'] = url_tmp.format('a1', 62, 1065)
url['sakai'] = url_tmp.format('a1', 62, 1062)
url['toyonaka'] = url_tmp.format('a1', 62, '0602')
url['kumatori'] = url_tmp.format('a1', 62, '0606')
url['kansaiAP'] = url_tmp.format('a1', 62, 1471)
url['kobe'] = url_tmp.format('s1', 63, 47770)
url['kobeAP'] = url_tmp.format('a1', 63, 1587)
url['gunge'] = url_tmp.format('a1', 63, '0970')
#url['awaji'] = url_tmp.format('a1', 63, 1448)
#url['nishimoniya'] = url_tmp.format('a1', 63, 1588)
url['sumoto'] = url_tmp.format('s1', 63, 47776)
url['akashi'] = url_tmp.format('a1', 63, '0625')
url['wakayama'] = url_tmp.format('s1', 65, 47777)
url['tomogashima'] = url_tmp.format('a1', 65, 1457)
#d_start = datetime.date(year, 1, 1)
#d_end = datetime.date(year, 12, 31)
d_start = datetime.date(year-1, 12, 31)
d_end = datetime.date(year+1, 1, 1)
s_start = d_start.strftime('%Y%m%d')
s_end = d_end.strftime('%Y%m%d')
dates = [d_start + datetime.timedelta(days=t) for t in range(368)]
for sname in url.keys():
print 'downloading jma {}'.format(sname)
lists = []
for date in dates:
url1 = url[sname] % (date.year, date.month, date.day)
#print url1
html = urllib2.urlopen(url1).read()
#print html
soup = BeautifulSoup(html, "lxml")
trs = soup.find('table', {'class':'data2_s'})
for tr in trs.findAll('tr')[2:]:
dic = []
tds = tr.findAll('td')
if tds[1].string is None:
break
if re.search('s1',url1):
data_names = ['id','date','hour','air_pressure','precipitation','temperature','humidity','wind_velocity','wind_direction','sun_time','radiation','cloud']
#ota
if unicode(tds[0].string) ==unicode('24','shift_jis'):
tds[0] ='0'
datef = date+datetime.timedelta(days=1)
dic.append(datef.strftime("%Y/%m/%d") + ' ' + tds[0].zfill(2) + ":00")
dic.append(datef.strftime("%Y/%m/%d"))
dic.append(_str2float(tds[0]))
else:
dic.append(date.strftime("%Y/%m/%d") + ' ' + tds[0].string.zfill(2) + ":00")
dic.append(date.strftime("%Y/%m/%d"))
dic.append(_str2float(tds[0].string))
#print tds[0].string,type(tds[0].string)
dic.append(_str2float(tds[2].string)) # 気圧
dic.append(_str2float(tds[3].string)) # rain
dic.append(_str2float(tds[4].string)) # 気温
dic.append(_str2float(tds[7].string)) # 湿度
dic.append(_str2float(tds[8].string)) # wind
#dic.append(_str2string(tds[9].string)) # direction
dic.append(_mod_corruption(_str2string(tds[9].string))) # direction
if tds[10].string == "" or tds[5].string == "--":
dic.append(0.0)
else:
dic.append(_str2float(tds[10].string)) # 日照時間
# dic.append(_str2float(tds[10].string)) # 日照時間
dic.append(_str2float(tds[11].string)) # swrad
cloud_ = re.match("\d*",_str2string(tds[15].string))
dic.append(cloud_.group()) # cloud
else:
data_names = ['id','date','hour','precipitation','temperature','wind_velocity','wind_direction','sun_time']
if unicode(tds[0].string) ==unicode('24','shift_jis'):
tds[0] ='0'
datef = date+datetime.timedelta(days=1)
dic.append(datef.strftime("%Y/%m/%d") + ' ' + tds[0].zfill(2) + ":00")
dic.append(datef.strftime("%Y/%m/%d"))
dic.append(_str2float(tds[0]))
else:
dic.append(date.strftime("%Y/%m/%d") + ' ' + tds[0].string.zfill(2) + ":00")
dic.append(date.strftime("%Y/%m/%d"))
dic.append(_str2float(tds[0].string))
dic.append(_str2float(tds[1].string)) # rain
dic.append(_str2float(tds[2].string)) # temp
dic.append(_str2float(tds[3].string)) # wind
#dic.append(_str2string(tds[4].string)) # direction
dic.append(_mod_corruption(_str2string(tds[4].string))) # direction
if tds[5].string == "" or tds[5].string == "--":
dic.append(0.0)
else:
dic.append(_str2float(tds[5].string)) # daytime
lists.append(dic)
import pandas as pd
df = pd.DataFrame(data=lists, columns=data_names)
df.index = df.id
df.to_csv(outfile.format(sname, s_start, s_end), encoding='shift_jis', index=None)
def _str2float(str):
if str:
try:
return float(str)
except:
return '--'
else:
return '--'
def _str2string(str):
if str:
try:
return str.string
except:
return '--'
else:
return '--'
def _mod_corruption(str1):
str1 = unicode(str1)
delta = 22.5
if str1 == u'北':
str2 = delta * 0
elif str1 == u'北北東':
str2 = delta * 1
elif str1 == u'北東':
str2 = delta * 2
elif str1 == u'東北東':
str2 = delta * 3
elif str1 == u'東':
str2 = delta * 4
elif str1 == u'東南東':
str2 = delta * 5
elif str1 == u'南東':
str2 = delta * 6
elif str1 == u'南南東':
str2 = delta * 7
elif str1 == u'南':
str2 = delta * 8
elif str1 == u'南南西':
str2 = delta * 9
elif str1 == u'南西':
str2 = delta * 10
elif str1 == u'西南西':
str2 = delta * 11
elif str1 == u'西':
str2 = delta * 12
elif str1 == u'西北西':
str2 = delta * 13
elif str1 == u'北西':
str2 = delta * 14
elif str1 == u'北北西':
str2 = delta * 15
elif str1 == u'静穏':
str2 = delta * 16
else:
str2 = '--'
return str2
if __name__ == '__main__':
get_mp_wind('F:/okada/Data/mp/', year=2012, items=['C'])
get_jma_wind('F:/okada/Data/jma/', year=2012)
| mit |
boland1992/seissuite_iran | build/lib/seissuite/misc/event_mag.py | 8 | 1891 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 8 08:44:28 2015
@author: boland
"""
import numpy as np
import obspy
from obspy import UTCDateTime, read
import datetime
from obspy.fdsn import Client
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
# set server name. United States Geological Survey.
client = Client("IRIS")
# get current UTC date in "yy-mm-dd" format and set that as the end of the searched time-window
date_end = datetime.datetime.utcnow().date();
#convert end date from datetime object to a UTCDateTime object
end = UTCDateTime(date_end)
# set the time period to scan. in this case we're looking at the previous 10 days
no_of_days = 1000.0
# define time difference as a datetime object
number_days = datetime.timedelta(days=no_of_days)
# set start date for the time-window as the current date minus the number of days set
date_start = date_end - number_days
#convert start date from datetime object to a UTCDateTime object
start = UTCDateTime(date_start)
# set minimum magnitude threshold to search for.
min_mag = 2.0
minlat = -39.1 #minlatitude
maxlat = -37.0 # maxlatitude
minlon = 143.5 #minlongitude
maxlon = 147.0 #maxlongitude
cat = client.get_events(starttime=date_start,
endtime=date_end,
minlatitude=minlat,
maxlatitude=maxlat,
minlongitude=minlon,
maxlongitude=maxlon,
minmagnitude=min_mag)
#print(cat)
cat.plot()
print(cat.__str__(print_all=True))
net = 'AU'
stat = 'TOO'
date_start = UTCDateTime("2003-10-18T10:29:26.580000Z")
date_end = date_start + 3600
st = client.get_waveforms(net, stat, "00", "*Z",
date_start, date_end,
attach_response=True)
st.plot()
#st.write('Gippsland_low.MSEED', format='MSEED')
| gpl-3.0 |
nelson-liu/scikit-learn | sklearn/__init__.py | 28 | 3073 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.19.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'exceptions', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'learning_curve', 'linear_model', 'manifold', 'metrics',
'mixture', 'model_selection', 'multiclass', 'multioutput',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
olologin/scikit-learn | sklearn/neighbors/graph.py | 14 | 6609 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self):
"""Return the query based on include_self param"""
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default=False.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
markhamstra/spark | python/pyspark/sql/tests.py | 2 | 232229 | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import array
import ctypes
import warnings
import py4j
from contextlib import contextmanager
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.util import _exception_message
_pandas_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
_pandas_requirement_message = _exception_message(e)
_pyarrow_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
_pyarrow_requirement_message = _exception_message(e)
_have_pandas = _pandas_requirement_message is None
_have_pyarrow = _pyarrow_requirement_message is None
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type, _make_type_verifier
from pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings
from pyspark.sql.types import _array_unsigned_int_typecode_ctype_mappings
from pyspark.sql.types import _merge_type
from pyspark.tests import QuietTest, ReusedPySparkTestCase, PySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
def assertPandasEqual(self, expected, result):
msg = ("DataFrames are not equal: " +
"\n\nExpected:\n%s\n%s" % (expected, expected.dtypes) +
"\n\nResult:\n%s\n%s" % (result, result.dtypes))
self.assertTrue(expected.equals(result), msg=msg)
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
def test_struct_field_type_name(self):
struct_field = StructField("a", IntegerType())
self.assertRaises(TypeError, struct_field.typeName)
class SQLTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedSQLTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def tearDown(self):
super(SQLTests, self).tearDown()
# tear down test_bucketed_write state
self.spark.sql("DROP TABLE IF EXISTS pyspark_bucket")
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
# This is to check if a deprecated 'SQLContext.registerFunction' can call its alias.
sqlContext = self.spark._wrapped
sqlContext.registerFunction("oneArg", lambda x: len(x), IntegerType())
[row] = sqlContext.sql("SELECT oneArg('test')").collect()
self.assertEqual(row[0], 4)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_udf3(self):
two_args = self.spark.catalog.registerFunction(
"twoArgs", UserDefinedFunction(lambda x, y: len(x) + y))
self.assertEqual(two_args.deterministic, True)
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], u'5')
def test_udf_registration_return_type_none(self):
two_args = self.spark.catalog.registerFunction(
"twoArgs", UserDefinedFunction(lambda x, y: len(x) + y, "integer"), None)
self.assertEqual(two_args.deterministic, True)
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf_registration_return_type_not_none(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, "Invalid returnType"):
self.spark.catalog.registerFunction(
"f", UserDefinedFunction(lambda x, y: len(x) + y, StringType()), StringType())
def test_nondeterministic_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
from pyspark.sql.functions import udf
import random
udf_random_col = udf(lambda: int(100 * random.random()), IntegerType()).asNondeterministic()
self.assertEqual(udf_random_col.deterministic, False)
df = self.spark.createDataFrame([Row(1)]).select(udf_random_col().alias('RAND'))
udf_add_ten = udf(lambda rand: rand + 10, IntegerType())
[row] = df.withColumn('RAND_PLUS_TEN', udf_add_ten('RAND')).collect()
self.assertEqual(row[0] + 10, row[1])
def test_nondeterministic_udf2(self):
import random
from pyspark.sql.functions import udf
random_udf = udf(lambda: random.randint(6, 6), IntegerType()).asNondeterministic()
self.assertEqual(random_udf.deterministic, False)
random_udf1 = self.spark.catalog.registerFunction("randInt", random_udf)
self.assertEqual(random_udf1.deterministic, False)
[row] = self.spark.sql("SELECT randInt()").collect()
self.assertEqual(row[0], 6)
[row] = self.spark.range(1).select(random_udf1()).collect()
self.assertEqual(row[0], 6)
[row] = self.spark.range(1).select(random_udf()).collect()
self.assertEqual(row[0], 6)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(udf(lambda: random.randint(6, 6), IntegerType()))
pydoc.render_doc(random_udf)
pydoc.render_doc(random_udf1)
pydoc.render_doc(udf(lambda x: x).asNondeterministic)
def test_nondeterministic_udf3(self):
# regression test for SPARK-23233
from pyspark.sql.functions import udf
f = udf(lambda x: x)
# Here we cache the JVM UDF instance.
self.spark.range(1).select(f("id"))
# This should reset the cache to set the deterministic status correctly.
f = f.asNondeterministic()
# Check the deterministic status of udf.
df = self.spark.range(1).select(f("id"))
deterministic = df._jdf.logicalPlan().projectList().head().deterministic()
self.assertFalse(deterministic)
def test_nondeterministic_udf_in_aggregate(self):
from pyspark.sql.functions import udf, sum
import random
udf_random_col = udf(lambda: int(100 * random.random()), 'int').asNondeterministic()
df = self.spark.range(10)
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, "nondeterministic"):
df.groupby('id').agg(sum(udf_random_col())).collect()
with self.assertRaisesRegexp(AnalysisException, "nondeterministic"):
df.agg(sum(udf_random_col())).collect()
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_single_udf_with_repeated_argument(self):
# regression test for SPARK-20685
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
row = self.spark.sql("SELECT add(1, 1)").first()
self.assertEqual(tuple(row), (2, ))
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
# This is to check if a 'SQLContext.udf' can call its alias.
sqlContext = self.spark._wrapped
add_four = sqlContext.udf.register("add_four", lambda x: x + 4, IntegerType())
self.assertListEqual(
df.selectExpr("add_four(id) AS plus_four").collect(),
df.select(add_four("id").alias("plus_four")).collect()
)
def test_non_existed_udf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: spark.udf.registerJavaFunction("udf1", "non_existed_udf"))
# This is to check if a deprecated 'SQLContext.registerJavaFunction' can call its alias.
sqlContext = spark._wrapped
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: sqlContext.registerJavaFunction("udf1", "non_existed_udf"))
def test_non_existed_udaf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udaf",
lambda: spark.udf.registerJavaUDAF("udaf1", "non_existed_udaf"))
def test_linesep_text(self):
df = self.spark.read.text("python/test_support/sql/ages_newlines.csv", lineSep=",")
expected = [Row(value=u'Joe'), Row(value=u'20'), Row(value=u'"Hi'),
Row(value=u'\nI am Jeo"\nTom'), Row(value=u'30'),
Row(value=u'"My name is Tom"\nHyukjin'), Row(value=u'25'),
Row(value=u'"I am Hyukjin\n\nI love Spark!"\n')]
self.assertEqual(df.collect(), expected)
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
df.write.text(tpath, lineSep="!")
expected = [Row(value=u'Joe!20!"Hi!'), Row(value=u'I am Jeo"'),
Row(value=u'Tom!30!"My name is Tom"'),
Row(value=u'Hyukjin!25!"I am Hyukjin'),
Row(value=u''), Row(value=u'I love Spark!"'),
Row(value=u'!')]
readback = self.spark.read.text(tpath)
self.assertEqual(readback.collect(), expected)
finally:
shutil.rmtree(tpath)
def test_multiline_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
multiLine=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_encoding_json(self):
people_array = self.spark.read\
.json("python/test_support/sql/people_array_utf16le.json",
multiLine=True, encoding="UTF-16LE")
expected = [Row(age=30, name=u'Andy'), Row(age=19, name=u'Justin')]
self.assertEqual(people_array.collect(), expected)
def test_linesep_json(self):
df = self.spark.read.json("python/test_support/sql/people.json", lineSep=",")
expected = [Row(_corrupt_record=None, name=u'Michael'),
Row(_corrupt_record=u' "age":30}\n{"name":"Justin"', name=None),
Row(_corrupt_record=u' "age":19}\n', name=None)]
self.assertEqual(df.collect(), expected)
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
df = self.spark.read.json("python/test_support/sql/people.json")
df.write.json(tpath, lineSep="!!")
readback = self.spark.read.json(tpath, lineSep="!!")
self.assertEqual(readback.collect(), df.collect())
finally:
shutil.rmtree(tpath)
def test_multiline_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", multiLine=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initalization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
class F(object):
"""Identity"""
def __call__(self, x):
return x
f = F()
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
f = functools.partial(f, x=1)
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_validate_column_types(self):
from pyspark.sql.functions import udf, to_json
from pyspark.sql.column import _to_java_column
self.assertTrue("Column" in _to_java_column("a").getClass().toString())
self.assertTrue("Column" in _to_java_column(u"a").getClass().toString())
self.assertTrue("Column" in _to_java_column(self.spark.range(1).id).getClass().toString())
self.assertRaisesRegexp(
TypeError,
"Invalid argument, not a string or column",
lambda: _to_java_column(1))
class A():
pass
self.assertRaises(TypeError, lambda: _to_java_column(A()))
self.assertRaises(TypeError, lambda: _to_java_column([]))
self.assertRaisesRegexp(
TypeError,
"Invalid argument, not a string or column",
lambda: udf(lambda x: x)(None))
self.assertRaises(TypeError, lambda: to_json(1))
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_schema_not_enough_names(self):
df = self.spark.createDataFrame([["a", "b"]], ["col1"])
self.assertEqual(df.columns, ['col1', '_2'])
def test_infer_schema_fails(self):
with self.assertRaisesRegexp(TypeError, 'field a'):
self.spark.createDataFrame(self.spark.sparkContext.parallelize([[1, 1], ["x", 1]]),
schema=["a", "b"], samplingRatio=0.99)
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_dict_respects_schema(self):
df = self.spark.createDataFrame([{'a': 1}], ["b"])
self.assertEqual(df.columns, ['b'])
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0]))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(
ValueError,
lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_nonparam_udf_with_aggregate(self):
import pyspark.sql.functions as f
df = self.spark.createDataFrame([(1, 2), (1, 2)])
f_udf = f.udf(lambda: "const_str")
rows = df.distinct().withColumn("a", f_udf()).collect()
self.assertEqual(rows, [Row(_1=1, _2=2, a=u'const_str')])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_cast_to_string_with_udt(self):
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
from pyspark.sql.functions import col
row = (ExamplePoint(1.0, 2.0), PythonOnlyPoint(3.0, 4.0))
schema = StructType([StructField("point", ExamplePointUDT(), False),
StructField("pypoint", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
result = df.select(col('point').cast('string'), col('pypoint').cast('string')).head()
self.assertEqual(result, Row(point=u'(1.0, 2.0)', pypoint=u'[3.0, 4.0]'))
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
for f in ["a", u"a"]:
aq = df.stat.approxQuantile(f, [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", u"b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile((u"a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr(u"a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_sampleby(self):
df = self.sc.parallelize([Row(a=i, b=(i % 3)) for i in range(10)]).toDF()
sampled = df.stat.sampleBy(u"b", fractions={0: 0.5, 1: 0.5}, seed=0)
self.assertTrue(sampled.count() == 3)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov(u"a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab(u"a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_string_functions(self):
from pyspark.sql.functions import col, lit
df = self.spark.createDataFrame([['nick']], schema=['name'])
self.assertRaisesRegexp(
TypeError,
"must be the same type",
lambda: df.select(col('name').substr(0, lit(1))))
if sys.version_info.major == 2:
self.assertRaises(
TypeError,
lambda: df.select(col('name').substr(long(0), long(1))))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: StructType().add("name"))
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
self.assertRaises(KeyError, lambda: struct1["f9"])
self.assertRaises(IndexError, lambda: struct1[9])
self.assertRaises(TypeError, lambda: struct1[9.9])
def test_parse_datatype_string(self):
from pyspark.sql.types import _all_atomic_types, _parse_datatype_string
for k, t in _all_atomic_types.items():
if t != NullType:
self.assertEqual(t(), _parse_datatype_string(k))
self.assertEqual(IntegerType(), _parse_datatype_string("int"))
self.assertEqual(DecimalType(1, 1), _parse_datatype_string("decimal(1 ,1)"))
self.assertEqual(DecimalType(10, 1), _parse_datatype_string("decimal( 10,1 )"))
self.assertEqual(DecimalType(11, 1), _parse_datatype_string("decimal(11,1)"))
self.assertEqual(
ArrayType(IntegerType()),
_parse_datatype_string("array<int >"))
self.assertEqual(
MapType(IntegerType(), DoubleType()),
_parse_datatype_string("map< int, double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("struct<a:int, c:double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a:int, c:double"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a INT, c DOUBLE"))
def test_metadata_null(self):
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(processingTime='5 seconds', continuous='1 second')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_merge_type(self):
self.assertEqual(_merge_type(LongType(), NullType()), LongType())
self.assertEqual(_merge_type(NullType(), LongType()), LongType())
self.assertEqual(_merge_type(LongType(), LongType()), LongType())
self.assertEqual(_merge_type(
ArrayType(LongType()),
ArrayType(LongType())
), ArrayType(LongType()))
with self.assertRaisesRegexp(TypeError, 'element in array'):
_merge_type(ArrayType(LongType()), ArrayType(DoubleType()))
self.assertEqual(_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), LongType())
), MapType(StringType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'key of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(DoubleType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'value of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), DoubleType()))
self.assertEqual(_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", LongType()), StructField("f2", StringType())])
), StructType([StructField("f1", LongType()), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'field f1'):
_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", DoubleType()), StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", LongType())]))])
), StructType([StructField("f1", StructType([StructField("f2", LongType())]))]))
with self.assertRaisesRegexp(TypeError, 'field f2 in field f1'):
_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", StringType())]))]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]),
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())])
), StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'element in array field f1'):
_merge_type(
StructType([
StructField("f1", ArrayType(LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", ArrayType(DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())])
), StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'value of map field f1'):
_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))])
), StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]))
with self.assertRaisesRegexp(TypeError, 'key of map element in array field f1'):
_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(DoubleType(), LongType())))])
)
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_dayofweek(self):
from pyspark.sql.functions import dayofweek
dt = datetime.datetime(2017, 11, 6)
df = self.spark.createDataFrame([Row(date=dt)])
row = df.select(dayofweek(df.date)).first()
self.assertEqual(row[0], 2)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_repartitionByRange_dataframe(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
df1 = self.spark.createDataFrame(
[(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema)
df2 = self.spark.createDataFrame(
[(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema)
# test repartitionByRange(numPartitions, *cols)
df3 = df1.repartitionByRange(2, "name", "age")
self.assertEqual(df3.rdd.getNumPartitions(), 2)
self.assertEqual(df3.rdd.first(), df2.rdd.first())
self.assertEqual(df3.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(numPartitions, *cols)
df4 = df1.repartitionByRange(3, "name", "age")
self.assertEqual(df4.rdd.getNumPartitions(), 3)
self.assertEqual(df4.rdd.first(), df2.rdd.first())
self.assertEqual(df4.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(*cols)
df5 = df1.repartitionByRange("name", "age")
self.assertEqual(df5.rdd.first(), df2.rdd.first())
self.assertEqual(df5.rdd.take(3), df2.rdd.take(3))
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
with self.assertRaisesRegexp(
TypeError,
'value argument is required when to_replace is not a dictionary.'):
self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_sample(self):
self.assertRaisesRegexp(
TypeError,
"should be a bool, float and number",
lambda: self.spark.range(1).sample())
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample("a"))
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample(seed="abc"))
self.assertRaises(
IllegalArgumentException,
lambda: self.spark.range(1).sample(-1.0))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
self.assertEqual(spark.conf.get("hyukjin", None), None)
# This returns 'STATIC' because it's the default value of
# 'spark.sql.sources.partitionOverwriteMode', and `defaultValue` in
# `spark.conf.get` is unset.
self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode"), "STATIC")
# This returns None because 'spark.sql.sources.partitionOverwriteMode' is unset, but
# `defaultValue` in `spark.conf.get` is set to None.
self.assertEqual(spark.conf.get("spark.sql.sources.partitionOverwriteMode", None), None)
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
# The empty bytearray is test for SPARK-21534.
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')],
[bytearray(b'')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
# test for SPARK-16542
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assertCollectSuccess(typecode, value):
row = Row(myarray=array.array(typecode, [value]))
df = self.spark.createDataFrame([row])
self.assertEqual(df.first()["myarray"][0], value)
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assertCollectSuccess('u', u'a')
if sys.version_info[0] < 3:
supported_string_types += ['c']
# test string
assertCollectSuccess('c', 'a')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assertCollectSuccess('f', ctypes.c_float(1e+38).value)
assertCollectSuccess('f', ctypes.c_float(1e-38).value)
assertCollectSuccess('f', ctypes.c_float(1.123456).value)
assertCollectSuccess('d', sys.float_info.max)
assertCollectSuccess('d', sys.float_info.min)
assertCollectSuccess('d', sys.float_info.epsilon)
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assertCollectSuccess(t, max_val - 1)
assertCollectSuccess(t, -max_val)
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1)
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
# `array.typecodes` are not supported in python 2.
if sys.version_info[0] < 3:
all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'])
else:
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
a = array.array(t)
self.spark.createDataFrame([Row(myarray=a)]).collect()
def test_bucketed_write(self):
data = [
(1, "foo", 3.0), (2, "foo", 5.0),
(3, "bar", -1.0), (4, "bar", 6.0),
]
df = self.spark.createDataFrame(data, ["x", "y", "z"])
def count_bucketed_cols(names, table="pyspark_bucket"):
"""Given a sequence of column names and a table name
query the catalog and return number o columns which are
used for bucketing
"""
cols = self.spark.catalog.listColumns(table)
num = len([c for c in cols if c.name in names and c.isBucket])
return num
# Test write with one bucketing column
df.write.bucketBy(3, "x").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write two bucketing columns
df.write.bucketBy(3, "x", "y").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort
df.write.bucketBy(2, "x").sortBy("z").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with a list of columns
df.write.bucketBy(3, ["x", "y"]).mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with a list of columns
(df.write.bucketBy(2, "x")
.sortBy(["y", "z"])
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with multiple columns
(df.write.bucketBy(2, "x")
.sortBy("y", "z")
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
def _to_pandas(self):
from datetime import datetime, date
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())\
.add("dt", DateType()).add("ts", TimestampType())
data = [
(1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(2, "foo", True, 5.0, None, None),
(3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)),
(4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)),
]
df = self.spark.createDataFrame(data, schema)
return df.toPandas()
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_to_pandas(self):
import numpy as np
pdf = self._to_pandas()
types = pdf.dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
self.assertEquals(types[4], np.object) # datetime.date
self.assertEquals(types[5], 'datetime64[ns]')
@unittest.skipIf(_have_pandas, "Required Pandas was found.")
def test_to_pandas_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be installed'):
self._to_pandas()
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", IntegerType())
data = [(1, "foo", 16777220), (None, "bar", None)]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.float64) # doesn't convert to np.int32 due to NaN value.
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.float64)
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
# test types are inferred correctly without specifying schema
df = self.spark.createDataFrame(pdf)
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
# test with schema will accept pdf as input
df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp")
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
@unittest.skipIf(_have_pandas, "Required Pandas was found.")
def test_create_dataframe_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
ImportError,
"(Pandas >= .* must be installed|No module named '?pandas'?)"):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
self.spark.createDataFrame(pdf)
# Regression test for SPARK-23360
@unittest.skipIf(not _have_pandas, _pandas_requirement_message)
def test_create_dateframe_from_pandas_with_dst(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]})
df = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df.toPandas())
orig_env_tz = os.environ.get('TZ', None)
try:
tz = 'America/Los_Angeles'
os.environ['TZ'] = tz
time.tzset()
with self.sql_conf({'spark.sql.session.timeZone': tz}):
df = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df.toPandas())
finally:
del os.environ['TZ']
if orig_env_tz is not None:
os.environ['TZ'] = orig_env_tz
time.tzset()
def test_sort_with_nulls_order(self):
from pyspark.sql import functions
df = self.spark.createDataFrame(
[('Tom', 80), (None, 60), ('Alice', 50)], ["name", "height"])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Alice'), Row(name=u'Tom')])
self.assertEquals(
df.select(df.name).orderBy(functions.asc_nulls_last('name')).collect(),
[Row(name=u'Alice'), Row(name=u'Tom'), Row(name=None)])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_first('name')).collect(),
[Row(name=None), Row(name=u'Tom'), Row(name=u'Alice')])
self.assertEquals(
df.select(df.name).orderBy(functions.desc_nulls_last('name')).collect(),
[Row(name=u'Tom'), Row(name=u'Alice'), Row(name=None)])
def test_json_sampling_ratio(self):
rdd = self.spark.sparkContext.range(0, 100, 1, 1) \
.map(lambda x: '{"a":0.1}' if x == 1 else '{"a":%s}' % str(x))
schema = self.spark.read.option('inferSchema', True) \
.option('samplingRatio', 0.5) \
.json(rdd).schema
self.assertEquals(schema, StructType([StructField("a", LongType(), True)]))
def test_csv_sampling_ratio(self):
rdd = self.spark.sparkContext.range(0, 100, 1, 1) \
.map(lambda x: '0.1' if x == 1 else str(x))
schema = self.spark.read.option('inferSchema', True)\
.csv(rdd, samplingRatio=0.5).schema
self.assertEquals(schema, StructType([StructField("_c0", IntegerType(), True)]))
class HiveSparkSubmitTests(SparkSubmitTests):
@classmethod
def setUpClass(cls):
# get a SparkContext to check for availability of Hive
sc = SparkContext('local[4]', cls.__name__)
cls.hive_available = True
try:
sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.hive_available = False
except TypeError:
cls.hive_available = False
finally:
# we don't need this SparkContext for the test
sc.stop()
def setUp(self):
super(HiveSparkSubmitTests, self).setUp()
if not self.hive_available:
self.skipTest("Hive is not available.")
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
self.sparkSubmit + ["--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedSQLTestCase):
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
try:
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
finally:
spark.stop()
sc.stop()
class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils):
# These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"TestQueryExecutionListener.class")
cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern)))
if cls.has_listener:
# Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.queryExecutionListeners",
"org.apache.spark.sql.TestQueryExecutionListener") \
.getOrCreate()
def setUp(self):
if not self.has_listener:
raise self.skipTest(
"'org.apache.spark.sql.TestQueryExecutionListener' is not "
"available. Will skip the related tests.")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def tearDown(self):
self.spark._jvm.OnSuccessCall.clear()
def test_query_execution_listener_on_collect(self):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be called before 'collect'")
self.spark.sql("SELECT * FROM range(1)").collect()
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'collect'")
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.enabled": True}):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be "
"called before 'toPandas'")
self.spark.sql("SELECT * FROM range(1)").toPandas()
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'toPandas'")
class SparkSessionTests(PySparkTestCase):
# This test is separate because it's closely related with session's start and stop.
# See SPARK-23228.
def test_set_jvm_default_session(self):
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
finally:
spark.stop()
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isEmpty())
def test_jvm_default_session_already_set(self):
# Here, we assume there is the default session already set in JVM.
jsession = self.sc._jvm.SparkSession(self.sc._jsc.sc())
self.sc._jvm.SparkSession.setDefaultSession(jsession)
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
# The session should be the same with the exiting one.
self.assertTrue(jsession.equals(spark._jvm.SparkSession.getDefaultSession().get()))
finally:
spark.stop()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_contex.stop()
def test_udf_init_shouldnt_initalize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
cls.hive_available = True
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.hive_available = False
except TypeError:
cls.hive_available = False
os.unlink(cls.tempdir.name)
if cls.hive_available:
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
def setUp(self):
if not self.hive_available:
self.skipTest("Hive is not available.")
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
class DataTypeVerificationTests(unittest.TestCase):
def test_verify_type_exception_msg(self):
self.assertRaisesRegexp(
ValueError,
"test_name",
lambda: _make_type_verifier(StringType(), nullable=False, name="test_name")(None))
schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))])
self.assertRaisesRegexp(
TypeError,
"field b in field a",
lambda: _make_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [IntegerType(), FloatType(), StringType(), StructType([])]
for data_type in types:
try:
_make_type_verifier(data_type, nullable=True)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = StructType([
StructField('s', StringType(), nullable=False),
StructField('i', IntegerType(), nullable=True)])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", StringType()),
(u"", StringType()),
(1, StringType()),
(1.0, StringType()),
([], StringType()),
({}, StringType()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, BooleanType()),
# Byte
(-(2**7), ByteType()),
(2**7 - 1, ByteType()),
# Short
(-(2**15), ShortType()),
(2**15 - 1, ShortType()),
# Integer
(-(2**31), IntegerType()),
(2**31 - 1, IntegerType()),
# Long
(2**64, LongType()),
# Float & Double
(1.0, FloatType()),
(1.0, DoubleType()),
# Decimal
(decimal.Decimal("1.0"), DecimalType()),
# Binary
(bytearray([1, 2]), BinaryType()),
# Date/Timestamp
(datetime.date(2000, 1, 2), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), TimestampType()),
# Array
([], ArrayType(IntegerType())),
(["1", None], ArrayType(StringType(), containsNull=True)),
([1, 2], ArrayType(IntegerType())),
((1, 2), ArrayType(IntegerType())),
(array.array('h', [1, 2]), ArrayType(IntegerType())),
# Map
({}, MapType(StringType(), IntegerType())),
({"a": 1}, MapType(StringType(), IntegerType())),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# String (match anything but None)
(None, StringType(), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, BooleanType(), TypeError),
("True", BooleanType(), TypeError),
([1], BooleanType(), TypeError),
# Byte
(-(2**7) - 1, ByteType(), ValueError),
(2**7, ByteType(), ValueError),
("1", ByteType(), TypeError),
(1.0, ByteType(), TypeError),
# Short
(-(2**15) - 1, ShortType(), ValueError),
(2**15, ShortType(), ValueError),
# Integer
(-(2**31) - 1, IntegerType(), ValueError),
(2**31, IntegerType(), ValueError),
# Float & Double
(1, FloatType(), TypeError),
(1, DoubleType(), TypeError),
# Decimal
(1.0, DecimalType(), TypeError),
(1, DecimalType(), TypeError),
("1.0", DecimalType(), TypeError),
# Binary
(1, BinaryType(), TypeError),
# Date/Timestamp
("2000-01-02", DateType(), TypeError),
(946811040, TimestampType(), TypeError),
# Array
(["1", None], ArrayType(StringType(), containsNull=False), ValueError),
([1, "2"], ArrayType(IntegerType()), TypeError),
# Map
({"a": 1}, MapType(IntegerType(), IntegerType()), TypeError),
({"a": "1"}, MapType(StringType(), IntegerType()), TypeError),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=False),
ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_make_type_verifier(data_type, nullable=False)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_make_type_verifier(data_type, nullable=False)(obj)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class ArrowTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
from datetime import date, datetime
from decimal import Decimal
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.spark.conf.set("spark.sql.session.timeZone", tz)
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# Disable fallback by default to easily detect the failures.
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True),
StructField("6_decimal_t", DecimalType(38, 18), True),
StructField("7_date_t", DateType(), True),
StructField("8_timestamp_t", TimestampType(), True)])
cls.data = [(u"a", 1, 10, 0.2, 2.0, Decimal("2.0"),
date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(u"b", 2, 20, 0.4, 4.0, Decimal("4.0"),
date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2)),
(u"c", 3, 30, 0.8, 6.0, Decimal("6.0"),
date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3))]
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
def create_pandas_data_frame(self):
import pandas as pd
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
return pd.DataFrame(data=data_dict)
def test_toPandas_fallback_enabled(self):
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([({u'a': 1},)], schema=schema)
with QuietTest(self.sc):
with warnings.catch_warnings(record=True) as warns:
pdf = df.toPandas()
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertPandasEqual(pdf, pd.DataFrame({u'map': [{u'a': 1}]}))
def test_toPandas_fallback_disabled(self):
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Unsupported type'):
df.toPandas()
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def _toPandas_arrow_toggle(self, df):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
pdf = df.toPandas()
pdf_arrow = df.toPandas()
return pdf, pdf_arrow
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
expected = self.create_pandas_data_frame()
self.assertPandasEqual(expected, pdf)
self.assertPandasEqual(expected, pdf_arrow)
def test_toPandas_respect_session_timezone(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_la, pdf_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_ny, pdf_ny)
self.assertFalse(pdf_ny.equals(pdf_la))
from pyspark.sql.types import _check_series_convert_timestamps_local_tz
pdf_la_corrected = pdf_la.copy()
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz(
pdf_la_corrected[field.name], timezone)
self.assertPandasEqual(pdf_ny, pdf_la_corrected)
def test_pandas_round_trip(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
def _createDataFrame_toggle(self, pdf, schema=None):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
df_no_arrow = self.spark.createDataFrame(pdf, schema=schema)
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
return df_no_arrow, df_arrow
def test_createDataFrame_toggle(self):
pdf = self.create_pandas_data_frame()
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema)
self.assertEquals(df_no_arrow.collect(), df_arrow.collect())
def test_createDataFrame_respect_session_timezone(self):
from datetime import timedelta
pdf = self.create_pandas_data_frame()
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema)
result_la = df_no_arrow_la.collect()
result_arrow_la = df_arrow_la.collect()
self.assertEqual(result_la, result_arrow_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema)
result_ny = df_no_arrow_ny.collect()
result_arrow_ny = df_arrow_ny.collect()
self.assertEqual(result_ny, result_arrow_ny)
self.assertNotEqual(result_ny, result_la)
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v
for k, v in row.asDict().items()})
for row in result_la]
self.assertEqual(result_ny, result_la_corrected)
def test_createDataFrame_with_schema(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(pdf, schema=self.schema)
self.assertEquals(self.schema, df.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_createDataFrame_with_incorrect_schema(self):
pdf = self.create_pandas_data_frame()
wrong_schema = StructType(list(reversed(self.schema)))
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, ".*No cast.*string.*timestamp.*"):
self.spark.createDataFrame(pdf, schema=wrong_schema)
def test_createDataFrame_with_names(self):
pdf = self.create_pandas_data_frame()
# Test that schema as a list of column names gets applied
df = self.spark.createDataFrame(pdf, schema=list('abcdefgh'))
self.assertEquals(df.schema.fieldNames(), list('abcdefgh'))
# Test that schema as tuple of column names gets applied
df = self.spark.createDataFrame(pdf, schema=tuple('abcdefgh'))
self.assertEquals(df.schema.fieldNames(), list('abcdefgh'))
def test_createDataFrame_column_name_encoding(self):
import pandas as pd
pdf = pd.DataFrame({u'a': [1]})
columns = self.spark.createDataFrame(pdf).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'a')
columns = self.spark.createDataFrame(pdf, [u'b']).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'b')
def test_createDataFrame_with_single_data_type(self):
import pandas as pd
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, ".*IntegerType.*not supported.*"):
self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int")
def test_createDataFrame_does_not_modify_input(self):
import pandas as pd
# Some series get converted for Spark to consume, this makes sure input is unchanged
pdf = self.create_pandas_data_frame()
# Use a nanosecond value to make sure it is not truncated
pdf.ix[0, '8_timestamp_t'] = pd.Timestamp(1)
# Integers with nulls will get NaNs filled with 0 and will be casted
pdf.ix[1, '2_int_t'] = None
pdf_copy = pdf.copy(deep=True)
self.spark.createDataFrame(pdf, schema=self.schema)
self.assertTrue(pdf.equals(pdf_copy))
def test_schema_conversion_roundtrip(self):
from pyspark.sql.types import from_arrow_schema, to_arrow_schema
arrow_schema = to_arrow_schema(self.schema)
schema_rt = from_arrow_schema(arrow_schema)
self.assertEquals(self.schema, schema_rt)
def test_createDataFrame_with_array_type(self):
import pandas as pd
pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]})
df, df_arrow = self._createDataFrame_toggle(pdf)
result = df.collect()
result_arrow = df_arrow.collect()
expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_toPandas_with_array_type(self):
expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])]
array_schema = StructType([StructField("a", ArrayType(IntegerType())),
StructField("b", ArrayType(StringType()))])
df = self.spark.createDataFrame(expected, schema=array_schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_createDataFrame_with_int_col_names(self):
import numpy as np
import pandas as pd
pdf = pd.DataFrame(np.random.rand(4, 2))
df, df_arrow = self._createDataFrame_toggle(pdf)
pdf_col_names = [str(c) for c in pdf.columns]
self.assertEqual(pdf_col_names, df.columns)
self.assertEqual(pdf_col_names, df_arrow.columns)
def test_createDataFrame_fallback_enabled(self):
import pandas as pd
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
with warnings.catch_warnings(record=True) as warns:
df = self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertEqual(df.collect(), [Row(a={u'a': 1})])
def test_createDataFrame_fallback_disabled(self):
import pandas as pd
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type'):
self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# Regression test for SPARK-23314
def test_timestamp_dst(self):
import pandas as pd
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
pdf = pd.DataFrame({'time': dt})
df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
df_from_pandas = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df_from_python.toPandas())
self.assertPandasEqual(pdf, df_from_pandas.toPandas())
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class PandasUDFTests(ReusedSQLTestCase):
def test_pandas_udf_basic(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, PandasUDFType
udf = pandas_udf(lambda x: x, DoubleType())
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, DoubleType(), PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'double', PandasUDFType.SCALAR)
self.assertEqual(udf.returnType, DoubleType())
self.assertEqual(udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
udf = pandas_udf(lambda x: x, StructType([StructField("v", DoubleType())]),
PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double', PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, 'v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
udf = pandas_udf(lambda x: x, returnType='v double',
functionType=PandasUDFType.GROUPED_MAP)
self.assertEqual(udf.returnType, StructType([StructField("v", DoubleType())]))
self.assertEqual(udf.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_pandas_udf_decorator(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import StructType, StructField, DoubleType
@pandas_udf(DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=DoubleType())
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
schema = StructType([StructField("v", DoubleType())])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf('v double', PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
@pandas_udf(returnType='double', functionType=PandasUDFType.SCALAR)
def foo(x):
return x
self.assertEqual(foo.returnType, DoubleType())
self.assertEqual(foo.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
def foo(x):
return x
self.assertEqual(foo.returnType, schema)
self.assertEqual(foo.evalType, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF)
def test_udf_wrong_arg(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaises(ParseException):
@pandas_udf('blah')
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid returnType.*None'):
@pandas_udf(functionType=PandasUDFType.SCALAR)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, 'Invalid functionType'):
@pandas_udf('double', 100)
def foo(x):
return x
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
pandas_udf(lambda: 1, LongType(), PandasUDFType.SCALAR)
with self.assertRaisesRegexp(ValueError, '0-arg pandas_udfs.*not.*supported'):
@pandas_udf(LongType(), PandasUDFType.SCALAR)
def zero_with_type():
return 1
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(TypeError, 'Invalid returnType'):
@pandas_udf(returnType='double', functionType=PandasUDFType.GROUPED_MAP)
def foo(df):
return df
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
@pandas_udf(returnType='k int, v double', functionType=PandasUDFType.GROUPED_MAP)
def foo(k, v, w):
return k
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class ScalarPandasUDFTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.sc.environment["TZ"] = tz
cls.spark.conf.set("spark.sql.session.timeZone", tz)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
@property
def nondeterministic_vectorized_udf(self):
from pyspark.sql.functions import pandas_udf
@pandas_udf('double')
def random_udf(v):
import pandas as pd
import numpy as np
return pd.Series(np.random.random(len(v)))
random_udf = random_udf.asNondeterministic()
return random_udf
def test_vectorized_udf_basic(self):
from pyspark.sql.functions import pandas_udf, col, array
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'),
array(col('id')).alias('array_long'))
f = lambda x: x
str_f = pandas_udf(f, StringType())
int_f = pandas_udf(f, IntegerType())
long_f = pandas_udf(f, LongType())
float_f = pandas_udf(f, FloatType())
double_f = pandas_udf(f, DoubleType())
decimal_f = pandas_udf(f, DecimalType())
bool_f = pandas_udf(f, BooleanType())
array_long_f = pandas_udf(f, ArrayType(LongType()))
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')), array_long_f('array_long'))
self.assertEquals(df.collect(), res.collect())
def test_register_nondeterministic_vectorized_udf_basic(self):
from pyspark.sql.functions import pandas_udf
from pyspark.rdd import PythonEvalType
import random
random_pandas_udf = pandas_udf(
lambda x: random.randint(6, 6) + x, IntegerType()).asNondeterministic()
self.assertEqual(random_pandas_udf.deterministic, False)
self.assertEqual(random_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
nondeterministic_pandas_udf = self.spark.catalog.registerFunction(
"randomPandasUDF", random_pandas_udf)
self.assertEqual(nondeterministic_pandas_udf.deterministic, False)
self.assertEqual(nondeterministic_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
[row] = self.spark.sql("SELECT randomPandasUDF(1)").collect()
self.assertEqual(row[0], 7)
def test_vectorized_udf_null_boolean(self):
from pyspark.sql.functions import pandas_udf, col
data = [(True,), (True,), (None,), (False,)]
schema = StructType().add("bool", BooleanType())
df = self.spark.createDataFrame(data, schema)
bool_f = pandas_udf(lambda x: x, BooleanType())
res = df.select(bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_byte(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("byte", ByteType())
df = self.spark.createDataFrame(data, schema)
byte_f = pandas_udf(lambda x: x, ByteType())
res = df.select(byte_f(col('byte')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_short(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("short", ShortType())
df = self.spark.createDataFrame(data, schema)
short_f = pandas_udf(lambda x: x, ShortType())
res = df.select(short_f(col('short')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_int(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("int", IntegerType())
df = self.spark.createDataFrame(data, schema)
int_f = pandas_udf(lambda x: x, IntegerType())
res = df.select(int_f(col('int')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_long(self):
from pyspark.sql.functions import pandas_udf, col
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("long", LongType())
df = self.spark.createDataFrame(data, schema)
long_f = pandas_udf(lambda x: x, LongType())
res = df.select(long_f(col('long')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_float(self):
from pyspark.sql.functions import pandas_udf, col
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("float", FloatType())
df = self.spark.createDataFrame(data, schema)
float_f = pandas_udf(lambda x: x, FloatType())
res = df.select(float_f(col('float')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_double(self):
from pyspark.sql.functions import pandas_udf, col
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("double", DoubleType())
df = self.spark.createDataFrame(data, schema)
double_f = pandas_udf(lambda x: x, DoubleType())
res = df.select(double_f(col('double')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_decimal(self):
from decimal import Decimal
from pyspark.sql.functions import pandas_udf, col
data = [(Decimal(3.0),), (Decimal(5.0),), (Decimal(-1.0),), (None,)]
schema = StructType().add("decimal", DecimalType(38, 18))
df = self.spark.createDataFrame(data, schema)
decimal_f = pandas_udf(lambda x: x, DecimalType(38, 18))
res = df.select(decimal_f(col('decimal')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_string(self):
from pyspark.sql.functions import pandas_udf, col
data = [("foo",), (None,), ("bar",), ("bar",)]
schema = StructType().add("str", StringType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, StringType())
res = df.select(str_f(col('str')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_string_in_udf(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
str_f = pandas_udf(lambda x: pd.Series(map(str, x)), StringType())
actual = df.select(str_f(col('id')))
expected = df.select(col('id').cast('string'))
self.assertEquals(expected.collect(), actual.collect())
def test_vectorized_udf_datatype_string(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'))
f = lambda x: x
str_f = pandas_udf(f, 'string')
int_f = pandas_udf(f, 'integer')
long_f = pandas_udf(f, 'long')
float_f = pandas_udf(f, 'float')
double_f = pandas_udf(f, 'double')
decimal_f = pandas_udf(f, 'decimal(38, 18)')
bool_f = pandas_udf(f, 'boolean')
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_array_type(self):
from pyspark.sql.functions import pandas_udf, col
data = [([1, 2],), ([3, 4],)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_null_array(self):
from pyspark.sql.functions import pandas_udf, col
data = [([1, 2],), (None,), (None,), ([3, 4],), (None,)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_complex(self):
from pyspark.sql.functions import pandas_udf, col, expr
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'),
col('id').cast('double').alias('c'))
add = pandas_udf(lambda x, y: x + y, IntegerType())
power2 = pandas_udf(lambda x: 2 ** x, IntegerType())
mul = pandas_udf(lambda x, y: x * y, DoubleType())
res = df.select(add(col('a'), col('b')), power2(col('a')), mul(col('b'), col('c')))
expected = df.select(expr('a + b'), expr('power(2, a)'), expr('b * c'))
self.assertEquals(expected.collect(), res.collect())
def test_vectorized_udf_exception(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
raise_exception = pandas_udf(lambda x: x * (1 / 0), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'division( or modulo)? by zero'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_invalid_length(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
raise_exception = pandas_udf(lambda _: pd.Series(1), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Result vector from pandas_udf was not the required length'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_mix_udf(self):
from pyspark.sql.functions import pandas_udf, udf, col
df = self.spark.range(10)
row_by_row_udf = udf(lambda x: x, LongType())
pd_udf = pandas_udf(lambda x: x, LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Can not mix vectorized and non-vectorized UDFs'):
df.select(row_by_row_udf(col('id')), pd_udf(col('id'))).collect()
def test_vectorized_udf_chained(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: x + 1, LongType())
g = pandas_udf(lambda x: x - 1, LongType())
res = df.select(g(f(col('id'))))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_wrong_return_type(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x * 1.0, MapType(LongType(), LongType()))
def test_vectorized_udf_return_scalar(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
f = pandas_udf(lambda x: 1.0, DoubleType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Return.*type.*Series'):
df.select(f(col('id'))).collect()
def test_vectorized_udf_decorator(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.range(10)
@pandas_udf(returnType=LongType())
def identity(x):
return x
res = df.select(identity(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_empty_partition(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda x: x, LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_varargs(self):
from pyspark.sql.functions import pandas_udf, col
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda *v: v[0], LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_unsupported_types(self):
from pyspark.sql.functions import pandas_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x, MapType(StringType(), IntegerType()))
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*BinaryType'):
pandas_udf(lambda x: x, BinaryType())
def test_vectorized_udf_dates(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import date
schema = StructType().add("idx", LongType()).add("date", DateType())
data = [(0, date(1969, 1, 1),),
(1, date(2012, 2, 2),),
(2, None,),
(3, date(2100, 4, 4),)]
df = self.spark.createDataFrame(data, schema=schema)
date_copy = pandas_udf(lambda t: t, returnType=DateType())
df = df.withColumn("date_copy", date_copy(col("date")))
@pandas_udf(returnType=StringType())
def check_data(idx, date, date_copy):
import pandas as pd
msgs = []
is_equal = date.isnull()
for i in range(len(idx)):
if (is_equal[i] and data[idx[i]][1] is None) or \
date[i] == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"date values are not equal (date='%s': data[%d][1]='%s')"
% (date[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data",
check_data(col("idx"), col("date"), col("date_copy"))).collect()
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "date" col
self.assertEquals(data[i][1], result[i][2]) # "date_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_timestamps(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import datetime
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(0, datetime(1969, 1, 1, 1, 1, 1)),
(1, datetime(2012, 2, 2, 2, 2, 2)),
(2, None),
(3, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
# Check that a timestamp passed through a pandas_udf will not be altered by timezone calc
f_timestamp_copy = pandas_udf(lambda t: t, returnType=TimestampType())
df = df.withColumn("timestamp_copy", f_timestamp_copy(col("timestamp")))
@pandas_udf(returnType=StringType())
def check_data(idx, timestamp, timestamp_copy):
import pandas as pd
msgs = []
is_equal = timestamp.isnull() # use this array to check values are equal
for i in range(len(idx)):
# Check that timestamps are as expected in the UDF
if (is_equal[i] and data[idx[i]][1] is None) or \
timestamp[i].to_pydatetime() == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"timestamp values are not equal (timestamp='%s': data[%d][1]='%s')"
% (timestamp[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data", check_data(col("idx"), col("timestamp"),
col("timestamp_copy"))).collect()
# Check that collection values are correct
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "timestamp" col
self.assertEquals(data[i][1], result[i][2]) # "timestamp_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_return_timestamp_tz(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
df = self.spark.range(10)
@pandas_udf(returnType=TimestampType())
def gen_timestamps(id):
ts = [pd.Timestamp(i, unit='D', tz='America/Los_Angeles') for i in id]
return pd.Series(ts)
result = df.withColumn("ts", gen_timestamps(col("id"))).collect()
spark_ts_t = TimestampType()
for r in result:
i, ts = r
ts_tz = pd.Timestamp(i, unit='D', tz='America/Los_Angeles').to_pydatetime()
expected = spark_ts_t.fromInternal(spark_ts_t.toInternal(ts_tz))
self.assertEquals(expected, ts)
def test_vectorized_udf_check_config(self):
from pyspark.sql.functions import pandas_udf, col
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 3}):
df = self.spark.range(10, numPartitions=1)
@pandas_udf(returnType=LongType())
def check_records_per_batch(x):
return pd.Series(x.size).repeat(x.size)
result = df.select(check_records_per_batch(col("id"))).collect()
for (r,) in result:
self.assertTrue(r <= 3)
def test_vectorized_udf_timestamps_respect_session_timezone(self):
from pyspark.sql.functions import pandas_udf, col
from datetime import datetime
import pandas as pd
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(1, datetime(1969, 1, 1, 1, 1, 1)),
(2, datetime(2012, 2, 2, 2, 2, 2)),
(3, None),
(4, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
f_timestamp_copy = pandas_udf(lambda ts: ts, TimestampType())
internal_value = pandas_udf(
lambda ts: ts.apply(lambda ts: ts.value if ts is not pd.NaT else None), LongType())
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_la = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_la = df_la.select(col("idx"), col("internal_value")).collect()
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
diff = 3 * 60 * 60 * 1000 * 1000 * 1000
result_la_corrected = \
df_la.select(col("idx"), col("tscopy"), col("internal_value") + diff).collect()
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_ny = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_ny = df_ny.select(col("idx"), col("tscopy"), col("internal_value")).collect()
self.assertNotEqual(result_ny, result_la)
self.assertEqual(result_ny, result_la_corrected)
def test_nondeterministic_vectorized_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
from pyspark.sql.functions import udf, pandas_udf, col
@pandas_udf('double')
def plus_ten(v):
return v + 10
random_udf = self.nondeterministic_vectorized_udf
df = self.spark.range(10).withColumn('rand', random_udf(col('id')))
result1 = df.withColumn('plus_ten(rand)', plus_ten(df['rand'])).toPandas()
self.assertEqual(random_udf.deterministic, False)
self.assertTrue(result1['plus_ten(rand)'].equals(result1['rand'] + 10))
def test_nondeterministic_vectorized_udf_in_aggregate(self):
from pyspark.sql.functions import pandas_udf, sum
df = self.spark.range(10)
random_udf = self.nondeterministic_vectorized_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.groupby(df.id).agg(sum(random_udf(df.id))).collect()
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.agg(sum(random_udf(df.id))).collect()
def test_register_vectorized_udf_basic(self):
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import pandas_udf, col, expr
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'))
original_add = pandas_udf(lambda x, y: x + y, IntegerType())
self.assertEqual(original_add.deterministic, True)
self.assertEqual(original_add.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
new_add = self.spark.catalog.registerFunction("add1", original_add)
res1 = df.select(new_add(col('a'), col('b')))
res2 = self.spark.sql(
"SELECT add1(t.a, t.b) FROM (SELECT id as a, id as b FROM range(10)) t")
expected = df.select(expr('a + b'))
self.assertEquals(expected.collect(), res1.collect())
self.assertEquals(expected.collect(), res2.collect())
# Regression test for SPARK-23314
def test_timestamp_dst(self):
from pyspark.sql.functions import pandas_udf
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda x: x, 'timestamp')
result = df.withColumn('time', foo_udf(df.time))
self.assertEquals(df.collect(), result.collect())
@unittest.skipIf(sys.version_info[:2] < (3, 5), "Type hints are supported from Python 3.5.")
def test_type_annotation(self):
from pyspark.sql.functions import pandas_udf
# Regression test to check if type hints can be used. See SPARK-23569.
# Note that it throws an error during compilation in lower Python versions if 'exec'
# is not used. Also, note that we explicitly use another dictionary to avoid modifications
# in the current 'locals()'.
#
# Hyukjin: I think it's an ugly way to test issues about syntax specific in
# higher versions of Python, which we shouldn't encourage. This was the last resort
# I could come up with at that time.
_locals = {}
exec(
"import pandas as pd\ndef noop(col: pd.Series) -> pd.Series: return col",
_locals)
df = self.spark.range(1).select(pandas_udf(f=_locals['noop'], returnType='bigint')('id'))
self.assertEqual(df.first()[0], 0)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class GroupedMapPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i) for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))).drop('vs')
def test_supported_types(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType, array, col
df = self.data.withColumn("arr", array(col("id")))
# Different forms of group map pandas UDF, results of these are the same
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType())),
StructField('v1', DoubleType()),
StructField('v2', LongType())])
udf1 = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf2 = pandas_udf(
lambda _, pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf3 = pandas_udf(
lambda key, pdf: pdf.assign(id=key[0], v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
output_schema,
PandasUDFType.GROUPED_MAP
)
result1 = df.groupby('id').apply(udf1).sort('id').toPandas()
expected1 = df.toPandas().groupby('id').apply(udf1.func).reset_index(drop=True)
result2 = df.groupby('id').apply(udf2).sort('id').toPandas()
expected2 = expected1
result3 = df.groupby('id').apply(udf3).sort('id').toPandas()
expected3 = expected1
self.assertPandasEqual(expected1, result1)
self.assertPandasEqual(expected2, result2)
self.assertPandasEqual(expected3, result3)
def test_register_grouped_map_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
foo_udf = pandas_udf(lambda x: x, "id long", PandasUDFType.GROUPED_MAP)
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'f must be either SQL_BATCHED_UDF or '
'SQL_SCALAR_PANDAS_UDF'):
self.spark.catalog.registerFunction("foo_udf", foo_udf)
def test_decorator(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
def foo(pdf):
return pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_coerce(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
foo = pandas_udf(
lambda pdf: pdf,
'id long, v double',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
expected = expected.assign(v=expected.v.astype('float64'))
self.assertPandasEqual(expected, result)
def test_complex_groupby(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby(col('id') % 2 == 0).apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = pdf.groupby(pdf['id'] % 2 == 0).apply(normalize.func)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
self.assertPandasEqual(expected, result)
def test_empty_groupby(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby().apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = normalize.func(pdf)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
self.assertPandasEqual(expected, result)
def test_datatype_string(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = self.data
foo_udf = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo_udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo_udf.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_wrong_return_type(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*MapType'):
pandas_udf(
lambda pdf: pdf,
'id long, v map<int, int>',
PandasUDFType.GROUPED_MAP)
def test_wrong_args(self):
from pyspark.sql.functions import udf, pandas_udf, sum, PandasUDFType
df = self.data
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(lambda x: x)
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(udf(lambda x: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(sum(df.v))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(df.v + 1)
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
df.groupby('id').apply(
pandas_udf(lambda: 1, StructType([StructField("d", DoubleType())])))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(pandas_udf(lambda x, y: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf.*GROUPED_MAP'):
df.groupby('id').apply(
pandas_udf(lambda x, y: x, DoubleType(), PandasUDFType.SCALAR))
def test_unsupported_types(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
schema = StructType(
[StructField("id", LongType(), True),
StructField("map", MapType(StringType(), IntegerType()), True)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*MapType'):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
schema = StructType(
[StructField("id", LongType(), True),
StructField("arr_ts", ArrayType(TimestampType()), True)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*ArrayType.*TimestampType'):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
# Regression test for SPARK-23314
def test_timestamp_dst(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda pdf: pdf, 'time timestamp', PandasUDFType.GROUPED_MAP)
result = df.groupby('time').apply(foo_udf).sort('time')
self.assertPandasEqual(df.toPandas(), result.toPandas())
def test_udf_with_key(self):
from pyspark.sql.functions import pandas_udf, col, PandasUDFType
df = self.data
pdf = df.toPandas()
def foo1(key, pdf):
import numpy as np
assert type(key) == tuple
assert type(key[0]) == np.int64
return pdf.assign(v1=key[0],
v2=pdf.v * key[0],
v3=pdf.v * pdf.id,
v4=pdf.v * pdf.id.mean())
def foo2(key, pdf):
import numpy as np
assert type(key) == tuple
assert type(key[0]) == np.int64
assert type(key[1]) == np.int32
return pdf.assign(v1=key[0],
v2=key[1],
v3=pdf.v * key[0],
v4=pdf.v + key[1])
def foo3(key, pdf):
assert type(key) == tuple
assert len(key) == 0
return pdf.assign(v1=pdf.v * pdf.id)
# v2 is int because numpy.int64 * pd.Series<int32> results in pd.Series<int32>
# v3 is long because pd.Series<int64> * pd.Series<int32> results in pd.Series<int64>
udf1 = pandas_udf(
foo1,
'id long, v int, v1 long, v2 int, v3 long, v4 double',
PandasUDFType.GROUPED_MAP)
udf2 = pandas_udf(
foo2,
'id long, v int, v1 long, v2 int, v3 int, v4 int',
PandasUDFType.GROUPED_MAP)
udf3 = pandas_udf(
foo3,
'id long, v int, v1 long',
PandasUDFType.GROUPED_MAP)
# Test groupby column
result1 = df.groupby('id').apply(udf1).sort('id', 'v').toPandas()
expected1 = pdf.groupby('id')\
.apply(lambda x: udf1.func((x.id.iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected1, result1)
# Test groupby expression
result2 = df.groupby(df.id % 2).apply(udf1).sort('id', 'v').toPandas()
expected2 = pdf.groupby(pdf.id % 2)\
.apply(lambda x: udf1.func((x.id.iloc[0] % 2,), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected2, result2)
# Test complex groupby
result3 = df.groupby(df.id, df.v % 2).apply(udf2).sort('id', 'v').toPandas()
expected3 = pdf.groupby([pdf.id, pdf.v % 2])\
.apply(lambda x: udf2.func((x.id.iloc[0], (x.v % 2).iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected3, result3)
# Test empty groupby
result4 = df.groupby().apply(udf3).sort('id', 'v').toPandas()
expected4 = udf3.func((), pdf)
self.assertPandasEqual(expected4, result4)
@unittest.skipIf(
not _have_pandas or not _have_pyarrow,
_pandas_requirement_message or _pyarrow_requirement_message)
class GroupedAggPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
from pyspark.sql.functions import array, explode, col, lit
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
from pyspark.sql.functions import udf
@udf('double')
def plus_one(v):
assert isinstance(v, (int, float))
return v + 1
return plus_one
@property
def pandas_scalar_plus_two(self):
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.SCALAR)
def plus_two(v):
assert isinstance(v, pd.Series)
return v + 2
return plus_two
@property
def pandas_agg_mean_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_sum_udf(self):
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def sum(v):
return v.sum()
return sum
@property
def pandas_agg_weighted_mean_udf(self):
import numpy as np
from pyspark.sql.functions import pandas_udf, PandasUDFType
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def weighted_mean(v, w):
return np.average(v, weights=w)
return weighted_mean
def test_manual(self):
from pyspark.sql.functions import pandas_udf, array
df = self.data
sum_udf = self.pandas_agg_sum_udf
mean_udf = self.pandas_agg_mean_udf
mean_arr_udf = pandas_udf(
self.pandas_agg_mean_udf.func,
ArrayType(self.pandas_agg_mean_udf.returnType),
self.pandas_agg_mean_udf.evalType)
result1 = df.groupby('id').agg(
sum_udf(df.v),
mean_udf(df.v),
mean_arr_udf(array(df.v))).sort('id')
expected1 = self.spark.createDataFrame(
[[0, 245.0, 24.5, [24.5]],
[1, 255.0, 25.5, [25.5]],
[2, 265.0, 26.5, [26.5]],
[3, 275.0, 27.5, [27.5]],
[4, 285.0, 28.5, [28.5]],
[5, 295.0, 29.5, [29.5]],
[6, 305.0, 30.5, [30.5]],
[7, 315.0, 31.5, [31.5]],
[8, 325.0, 32.5, [32.5]],
[9, 335.0, 33.5, [33.5]]],
['id', 'sum(v)', 'avg(v)', 'avg(array(v))'])
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_basic(self):
from pyspark.sql.functions import col, lit, sum, mean
df = self.data
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
# Groupby one column and aggregate one UDF with literal
result1 = df.groupby('id').agg(weighted_mean_udf(df.v, lit(1.0))).sort('id')
expected1 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
# Groupby one expression and aggregate one UDF with literal
result2 = df.groupby((col('id') + 1)).agg(weighted_mean_udf(df.v, lit(1.0)))\
.sort(df.id + 1)
expected2 = df.groupby((col('id') + 1))\
.agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort(df.id + 1)
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
# Groupby one column and aggregate one UDF without literal
result3 = df.groupby('id').agg(weighted_mean_udf(df.v, df.w)).sort('id')
expected3 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, w)')).sort('id')
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
# Groupby one expression and aggregate one UDF without literal
result4 = df.groupby((col('id') + 1).alias('id'))\
.agg(weighted_mean_udf(df.v, df.w))\
.sort('id')
expected4 = df.groupby((col('id') + 1).alias('id'))\
.agg(mean(df.v).alias('weighted_mean(v, w)'))\
.sort('id')
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
def test_unsupported_types(self):
from pyspark.sql.types import DoubleType, MapType
from pyspark.sql.functions import pandas_udf, PandasUDFType
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
pandas_udf(
lambda x: x,
ArrayType(ArrayType(TimestampType())),
PandasUDFType.GROUPED_AGG)
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf('mean double, std double', PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return v.mean(), v.std()
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf(MapType(DoubleType(), DoubleType()), PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return {v.mean(): v.std()}
def test_alias(self):
from pyspark.sql.functions import mean
df = self.data
mean_udf = self.pandas_agg_mean_udf
result1 = df.groupby('id').agg(mean_udf(df.v).alias('mean_alias'))
expected1 = df.groupby('id').agg(mean(df.v).alias('mean_alias'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
"""
Test mixing group aggregate pandas UDF with sql expression.
"""
from pyspark.sql.functions import sum, mean
df = self.data
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF with sql expression
result1 = (df.groupby('id')
.agg(sum_udf(df.v) + 1)
.sort('id'))
expected1 = (df.groupby('id')
.agg(sum(df.v) + 1)
.sort('id'))
# Mix group aggregate pandas UDF with sql expression (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(df.v + 1))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(df.v + 1))
.sort('id'))
# Wrap group aggregate pandas UDF with two sql expressions
result3 = (df.groupby('id')
.agg(sum_udf(df.v + 1) + 2)
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(df.v + 1) + 2)
.sort('id'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
def test_mixed_udfs(self):
"""
Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF.
"""
from pyspark.sql.functions import sum, mean
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF and python UDF
result1 = (df.groupby('id')
.agg(plus_one(sum_udf(df.v)))
.sort('id'))
expected1 = (df.groupby('id')
.agg(plus_one(sum(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and python UDF (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(plus_one(df.v)))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(plus_one(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF
result3 = (df.groupby('id')
.agg(sum_udf(plus_two(df.v)))
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(plus_two(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF (order swapped)
result4 = (df.groupby('id')
.agg(plus_two(sum_udf(df.v)))
.sort('id'))
expected4 = (df.groupby('id')
.agg(plus_two(sum(df.v)))
.sort('id'))
# Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby
result5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum_udf(plus_one(df.v))))
.sort('plus_one(id)'))
expected5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum(plus_one(df.v))))
.sort('plus_one(id)'))
# Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in
# groupby
result6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum_udf(plus_two(df.v))))
.sort('plus_two(id)'))
expected6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum(plus_two(df.v))))
.sort('plus_two(id)'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
self.assertPandasEqual(expected5.toPandas(), result5.toPandas())
self.assertPandasEqual(expected6.toPandas(), result6.toPandas())
def test_multiple_udfs(self):
"""
Test multiple group aggregate pandas UDFs in one agg function.
"""
from pyspark.sql.functions import col, lit, sum, mean
df = self.data
mean_udf = self.pandas_agg_mean_udf
sum_udf = self.pandas_agg_sum_udf
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
result1 = (df.groupBy('id')
.agg(mean_udf(df.v),
sum_udf(df.v),
weighted_mean_udf(df.v, df.w))
.sort('id')
.toPandas())
expected1 = (df.groupBy('id')
.agg(mean(df.v),
sum(df.v),
mean(df.v).alias('weighted_mean(v, w)'))
.sort('id')
.toPandas())
self.assertPandasEqual(expected1, result1)
def test_complex_groupby(self):
from pyspark.sql.functions import lit, sum
df = self.data
sum_udf = self.pandas_agg_sum_udf
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
# groupby one expression
result1 = df.groupby(df.v % 2).agg(sum_udf(df.v))
expected1 = df.groupby(df.v % 2).agg(sum(df.v))
# empty groupby
result2 = df.groupby().agg(sum_udf(df.v))
expected2 = df.groupby().agg(sum(df.v))
# groupby one column and one sql expression
result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v))
expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v))
# groupby one python UDF
result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v))
expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v))
# groupby one scalar pandas UDF
result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v))
expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v))
# groupby one expression and one python UDF
result6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum_udf(df.v))
expected6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum(df.v))
# groupby one expression and one scalar pandas UDF
result7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum_udf(df.v)).sort('sum(v)')
expected7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum(df.v)).sort('sum(v)')
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
self.assertPandasEqual(expected5.toPandas(), result5.toPandas())
self.assertPandasEqual(expected6.toPandas(), result6.toPandas())
self.assertPandasEqual(expected7.toPandas(), result7.toPandas())
def test_complex_expressions(self):
from pyspark.sql.functions import col, sum
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Test complex expressions with sql expression, python UDF and
# group aggregate pandas UDF
result1 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_one(sum_udf(col('v1'))),
sum_udf(plus_one(col('v2'))))
.sort('id')
.toPandas())
expected1 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_one(sum(col('v1'))),
sum(plus_one(col('v2'))))
.sort('id')
.toPandas())
# Test complex expressions with sql expression, scala pandas UDF and
# group aggregate pandas UDF
result2 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_two(sum_udf(col('v1'))),
sum_udf(plus_two(col('v2'))))
.sort('id')
.toPandas())
expected2 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_two(sum(col('v1'))),
sum(plus_two(col('v2'))))
.sort('id')
.toPandas())
# Test sequential groupby aggregate
result3 = (df.groupby('id')
.agg(sum_udf(df.v).alias('v'))
.groupby('id')
.agg(sum_udf(col('v')))
.sort('id')
.toPandas())
expected3 = (df.groupby('id')
.agg(sum(df.v).alias('v'))
.groupby('id')
.agg(sum(col('v')))
.sort('id')
.toPandas())
self.assertPandasEqual(expected1, result1)
self.assertPandasEqual(expected2, result2)
self.assertPandasEqual(expected3, result3)
def test_retain_group_columns(self):
from pyspark.sql.functions import sum, lit, col
with self.sql_conf({"spark.sql.retainGroupColumns": False}):
df = self.data
sum_udf = self.pandas_agg_sum_udf
result1 = df.groupby(df.id).agg(sum_udf(df.v))
expected1 = df.groupby(df.id).agg(sum(df.v))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_invalid_args(self):
from pyspark.sql.functions import mean
df = self.data
plus_one = self.python_plus_one
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'nor.*aggregate function'):
df.groupby(df.id).agg(plus_one(df.v)).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'aggregate function.*argument.*aggregate function'):
df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'mixture.*aggregate function.*group aggregate pandas UDF'):
df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'), verbosity=2)
else:
unittest.main(verbosity=2)
| apache-2.0 |
fyffyt/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
ComparativeGenomicsToolkit/Comparative-Annotation-Toolkit | cat/transmap_classify.py | 1 | 9195 | """
Classify transMap transcripts producing the TransMapEvaluation table for each genome's database
1. AlnExtendsOffConfig: Does this alignment run off the end of a contig?
2. AlnPartialMap: Did this transcript not map completely?
3. AlnAbutsUnknownBases: Does this alignment have Ns immediately touching any exons?
4. PercentN: Percent of bases aligned to Ns
5. TransMapCoverage
6. TransMapIdentity
7. TransMapGoodness
8. TransMapOriginalIntronsPercent: The number of transMap introns within a wiggle distance of a intron in the parent
transcript in transcript coordinates.
9. Synteny. Count of the # of genes that match the reference in both directions (+/- 5 genes)
10. ValidStart -- start with ATG?
11. ValidStop -- valid stop codon (in frame)?
12. ProperOrf -- is the orf a multiple of 3?
"""
import bisect
import collections
import pandas as pd
import tools.bio
import tools.nameConversions
import tools.psl
import tools.dataOps
import tools.fileOps
import tools.transcripts
import tools.toilInterface
import tools.procOps
import tools.tm2hints
import tools.mathOps
def transmap_classify(tm_eval_args):
"""
Wrapper function that runs alignment classification based on transMap PSLs, genePreds and the genome FASTA.
:param tm_eval_args: argparse Namespace produced by EvaluateTransMap.get_args()
:return: DataFrame
"""
psl_dict = tools.psl.get_alignment_dict(tm_eval_args.filtered_tm_psl)
ref_psl_dict = tools.psl.get_alignment_dict(tm_eval_args.ref_psl)
gp_dict = tools.transcripts.get_gene_pred_dict(tm_eval_args.filtered_tm_gp)
ref_gp_dict = tools.transcripts.get_gene_pred_dict(tm_eval_args.annotation_gp)
fasta = tools.bio.get_sequence_dict(tm_eval_args.fasta)
synteny_scores = synteny(ref_gp_dict, gp_dict)
r = []
for aln_id, tx in gp_dict.items():
aln = psl_dict[aln_id]
tx_id = tools.nameConversions.strip_alignment_numbers(aln_id)
ref_aln = ref_psl_dict[tx_id]
gene_id = ref_gp_dict[tx_id].name2
r.append([aln_id, tx_id, gene_id, 'AlnExtendsOffContig', aln_extends_off_contig(aln)])
r.append([aln_id, tx_id, gene_id, 'AlnPartialMap', alignment_partial_map(aln)])
r.append([aln_id, tx_id, gene_id, 'AlnAbutsUnknownBases', aln_abuts_unknown_bases(tx, fasta)])
r.append([aln_id, tx_id, gene_id, 'PercentN', aln.percent_n])
r.append([aln_id, tx_id, gene_id, 'TransMapCoverage', 100 * aln.coverage])
r.append([aln_id, tx_id, gene_id, 'TransMapIdentity', 100 * aln.identity])
r.append([aln_id, tx_id, gene_id, 'TransMapGoodness', 100 * (1 - aln.badness)])
r.append([aln_id, tx_id, gene_id, 'TransMapOriginalIntronsPercent', percent_original_introns(aln, tx, ref_aln)])
r.append([aln_id, tx_id, gene_id, 'Synteny', synteny_scores[aln_id]])
r.append([aln_id, tx_id, gene_id, 'ValidStart', tools.transcripts.has_start_codon(fasta, tx)])
r.append([aln_id, tx_id, gene_id, 'ValidStop', tools.transcripts.has_stop_codon(fasta, tx)])
r.append([aln_id, tx_id, gene_id, 'ProperOrf', tx.cds_size % 3 == 0])
df = pd.DataFrame(r, columns=['AlignmentId', 'TranscriptId', 'GeneId', 'classifier', 'value'])
df.value = pd.to_numeric(df.value)
return df.set_index(['GeneId', 'TranscriptId', 'AlignmentId', 'classifier'])
###
# Classifiers
###
def aln_extends_off_contig(aln):
"""
Does the alignment extend off of a contig or scaffold?
aligned: # unaligned: - whatever: . edge: |
query |---#####....
target |#####....
OR
aligned: # unaligned: - whatever: . edge: |
query ...######---|
target ...######|
:param aln: PslRow object
:return: boolean
"""
if aln.t_start == 0 and aln.q_start != 0 or aln.t_end == aln.t_size and aln.q_end != aln.q_size:
return True
else:
return False
def alignment_partial_map(aln):
"""
Does the query sequence not map entirely?
a.q_size != a.q_end - a.q_start
:param aln: PslRow object
:return: boolean
"""
return aln.q_size != aln.q_end - aln.q_start
def aln_abuts_unknown_bases(tx, fasta):
"""
Do any exons in this alignment immediately touch Ns?
:param tx: a GenePredTranscript object
:param fasta: pyfasta Fasta object for genome
:return: boolean
"""
chrom = tx.chromosome
for exon in tx.exon_intervals:
if exon.start == 0: # we are at the edge of the contig
left_base = None
else:
left_base = fasta[chrom][exon.start - 1]
if exon.stop >= len(fasta[chrom]): # we are at the edge of the contig
right_base = None
else:
right_base = fasta[chrom][exon.stop]
if left_base == 'N' or right_base == 'N':
return True
return False
def synteny(ref_gp_dict, gp_dict):
"""
Attempts to evaluate the synteny of these transcripts. For each transcript, compares the 3 genes up and down stream
in the reference genome and counts how many match the transMap results.
:param ref_gp_dict: Dictionary of GenePredTranscript objects from the reference annotation
:param gp_dict: Dictionary of GenePredTranscript objects from the transMap output
:return:
"""
def create_interval_dict(tx_dict):
"""
Creates a dict mapping chromosome sequences to gene intervals [chrom][gene_id]: [list of tx intervals]
Skips huge intervals to avoid mapping issues
"""
interval_dict = collections.defaultdict(lambda: collections.defaultdict(list))
for tx in tx_dict.values():
interval_dict[tx.chromosome][tx.name2].append(tx.interval)
return interval_dict
def merge_interval_dict(interval_dict):
"""Merges the above intervals into the one genic interval."""
merged_interval_dict = collections.defaultdict(dict)
for chrom in interval_dict:
for gene_id, gene_intervals in interval_dict[chrom].items():
merged_intervals = tools.intervals.gap_merge_intervals(gene_intervals, float('inf'))
assert len(merged_intervals) == 1
merged_interval = merged_intervals[0]
merged_interval.data = gene_id
merged_interval_dict[chrom][gene_id] = merged_interval
return merged_interval_dict
def sort_interval_dict(merged_interval_dict):
"""Sorts the dict produced by create_interval_dict so that we can do list bisection"""
sorted_interval_dict = {}
for chrom in merged_interval_dict:
sorted_interval_dict[chrom] = sorted(merged_interval_dict[chrom].values())
return sorted_interval_dict
def make_ref_interval_map(ref_intervals):
"""Creates a dictionary mapping reference intervals to their name"""
ref_interval_map = {}
for interval_list in ref_intervals.values():
for interval in interval_list:
assert interval.data not in ref_interval_map
ref_interval_map[interval.data] = interval
return ref_interval_map
# create dictionaries mapping chromosome names to all genic intervals present on the chromosome
tm_chrom_intervals = sort_interval_dict(merge_interval_dict(create_interval_dict(gp_dict)))
ref_chrom_intervals = sort_interval_dict(merge_interval_dict(create_interval_dict(ref_gp_dict)))
# convert the reference to a map that is per-name so that we know where to look
ref_interval_map = make_ref_interval_map(ref_chrom_intervals)
# synteny score algorithm
scores = {}
for tx in gp_dict.values():
# find the genes from -5 to +5 in the target genome
target_intervals = tm_chrom_intervals[tx.chromosome]
target_position = bisect.bisect_left(target_intervals, tx.interval)
target_genes = {x.data for x in target_intervals[target_position - 5: target_position + 5]}
# find the same gene list in the reference genome
ref_interval = ref_interval_map[tx.name2]
ref_intervals = ref_chrom_intervals[ref_interval.chromosome]
ref_position = bisect.bisect_left(ref_intervals, ref_interval)
reference_genes = {x.data for x in ref_intervals[ref_position - 5: ref_position + 5]}
scores[tx.name] = len(reference_genes & target_genes)
return scores
def percent_original_introns(aln, tx, ref_aln):
"""
Calculates the intron support vector, using code from tm2hints, but shrinking the fuzz distance to match the
alignment classifiers.
Returns the number of introns that are within wiggle distance
:param aln: PslRow object representing the transMapped transcript
:param tx: GenePredTranscript object representing the transMapped transcript
:param ref_aln: PslRow object representing the reference transcript
:return: float between 0 and 100
"""
ref_starts = tools.tm2hints.fix_ref_q_starts(ref_aln)
c = 0
for i in tx.intron_intervals:
if tools.tm2hints.is_fuzzy_intron(i, aln, ref_starts, fuzz_distance=7):
c += 1
return 100 * tools.mathOps.format_ratio(c, len(tx.intron_intervals), resolve_nan=None)
| apache-2.0 |
pgmpy/pgmpy | pgmpy/tests/test_models/test_BayesianNetwork.py | 2 | 45449 | import unittest
import networkx as nx
import pandas as pd
import numpy as np
import numpy.testing as np_test
from pgmpy.models import BayesianNetwork, MarkovNetwork
from pgmpy.base import DAG
import pgmpy.tests.help_functions as hf
from pgmpy.factors.discrete import (
TabularCPD,
JointProbabilityDistribution,
DiscreteFactor,
)
from pgmpy.independencies import Independencies
from pgmpy.estimators import (
BayesianEstimator,
BaseEstimator,
MaximumLikelihoodEstimator,
)
from pgmpy.base import DAG
from pgmpy.utils import get_example_model
from pgmpy.sampling import BayesianModelSampling
class TestBaseModelCreation(unittest.TestCase):
def setUp(self):
self.G = BayesianNetwork()
def test_class_init_without_data(self):
self.assertIsInstance(self.G, nx.DiGraph)
def test_class_init_with_data_string(self):
self.g = BayesianNetwork([("a", "b"), ("b", "c")])
self.assertListEqual(sorted(self.g.nodes()), ["a", "b", "c"])
self.assertListEqual(
hf.recursive_sorted(self.g.edges()), [["a", "b"], ["b", "c"]]
)
def test_class_init_with_data_nonstring(self):
BayesianNetwork([(1, 2), (2, 3)])
def test_add_node_string(self):
self.G.add_node("a")
self.assertListEqual(list(self.G.nodes()), ["a"])
def test_add_node_nonstring(self):
self.G.add_node(1)
def test_add_nodes_from_string(self):
self.G.add_nodes_from(["a", "b", "c", "d"])
self.assertListEqual(sorted(self.G.nodes()), ["a", "b", "c", "d"])
def test_add_nodes_from_non_string(self):
self.G.add_nodes_from([1, 2, 3, 4])
def test_add_edge_string(self):
self.G.add_edge("d", "e")
self.assertListEqual(sorted(self.G.nodes()), ["d", "e"])
self.assertListEqual(list(self.G.edges()), [("d", "e")])
self.G.add_nodes_from(["a", "b", "c"])
self.G.add_edge("a", "b")
self.assertListEqual(
hf.recursive_sorted(self.G.edges()), [["a", "b"], ["d", "e"]]
)
def test_add_edge_nonstring(self):
self.G.add_edge(1, 2)
def test_add_edge_selfloop(self):
self.assertRaises(ValueError, self.G.add_edge, "a", "a")
def test_add_edge_result_cycle(self):
self.G.add_edges_from([("a", "b"), ("a", "c")])
self.assertRaises(ValueError, self.G.add_edge, "c", "a")
def test_add_edges_from_string(self):
self.G.add_edges_from([("a", "b"), ("b", "c")])
self.assertListEqual(sorted(self.G.nodes()), ["a", "b", "c"])
self.assertListEqual(
hf.recursive_sorted(self.G.edges()), [["a", "b"], ["b", "c"]]
)
self.G.add_nodes_from(["d", "e", "f"])
self.G.add_edges_from([("d", "e"), ("e", "f")])
self.assertListEqual(sorted(self.G.nodes()), ["a", "b", "c", "d", "e", "f"])
self.assertListEqual(
hf.recursive_sorted(self.G.edges()),
hf.recursive_sorted([("a", "b"), ("b", "c"), ("d", "e"), ("e", "f")]),
)
def test_add_edges_from_nonstring(self):
self.G.add_edges_from([(1, 2), (2, 3)])
def test_add_edges_from_self_loop(self):
self.assertRaises(ValueError, self.G.add_edges_from, [("a", "a")])
def test_add_edges_from_result_cycle(self):
self.assertRaises(
ValueError, self.G.add_edges_from, [("a", "b"), ("b", "c"), ("c", "a")]
)
def test_update_node_parents_bm_constructor(self):
self.g = BayesianNetwork([("a", "b"), ("b", "c")])
self.assertListEqual(list(self.g.predecessors("a")), [])
self.assertListEqual(list(self.g.predecessors("b")), ["a"])
self.assertListEqual(list(self.g.predecessors("c")), ["b"])
def test_update_node_parents(self):
self.G.add_nodes_from(["a", "b", "c"])
self.G.add_edges_from([("a", "b"), ("b", "c")])
self.assertListEqual(list(self.G.predecessors("a")), [])
self.assertListEqual(list(self.G.predecessors("b")), ["a"])
self.assertListEqual(list(self.G.predecessors("c")), ["b"])
def tearDown(self):
del self.G
class TestBayesianModelMethods(unittest.TestCase):
def setUp(self):
self.G = BayesianNetwork([("a", "d"), ("b", "d"), ("d", "e"), ("b", "c")])
self.G1 = BayesianNetwork([("diff", "grade"), ("intel", "grade")])
diff_cpd = TabularCPD("diff", 2, values=[[0.2], [0.8]])
intel_cpd = TabularCPD("intel", 3, values=[[0.5], [0.3], [0.2]])
grade_cpd = TabularCPD(
"grade",
3,
values=[
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8],
],
evidence=["diff", "intel"],
evidence_card=[2, 3],
)
self.G1.add_cpds(diff_cpd, intel_cpd, grade_cpd)
self.G2 = BayesianNetwork([("d", "g"), ("g", "l"), ("i", "g"), ("i", "l")])
self.G3 = BayesianNetwork(
[
("Pop", "EC"),
("Urb", "EC"),
("GDP", "EC"),
("EC", "FFEC"),
("EC", "REC"),
("EC", "EI"),
("REC", "CO2"),
("REC", "CH4"),
("REC", "N2O"),
("FFEC", "CO2"),
("FFEC", "CH4"),
("FFEC", "N2O"),
]
)
def test_moral_graph(self):
moral_graph = self.G.moralize()
self.assertListEqual(sorted(moral_graph.nodes()), ["a", "b", "c", "d", "e"])
for edge in moral_graph.edges():
self.assertTrue(
edge in [("a", "b"), ("a", "d"), ("b", "c"), ("d", "b"), ("e", "d")]
or (edge[1], edge[0])
in [("a", "b"), ("a", "d"), ("b", "c"), ("d", "b"), ("e", "d")]
)
def test_moral_graph_with_edge_present_over_parents(self):
G = BayesianNetwork(
[("a", "d"), ("d", "e"), ("b", "d"), ("b", "c"), ("a", "b")]
)
moral_graph = G.moralize()
self.assertListEqual(sorted(moral_graph.nodes()), ["a", "b", "c", "d", "e"])
for edge in moral_graph.edges():
self.assertTrue(
edge in [("a", "b"), ("c", "b"), ("d", "a"), ("d", "b"), ("d", "e")]
or (edge[1], edge[0])
in [("a", "b"), ("c", "b"), ("d", "a"), ("d", "b"), ("d", "e")]
)
def test_get_ancestors_of_success(self):
ancenstors1 = self.G2._get_ancestors_of("g")
ancenstors2 = self.G2._get_ancestors_of("d")
ancenstors3 = self.G2._get_ancestors_of(["i", "l"])
self.assertEqual(ancenstors1, {"d", "i", "g"})
self.assertEqual(ancenstors2, {"d"})
self.assertEqual(ancenstors3, {"g", "i", "l", "d"})
def test_get_ancestors_of_failure(self):
self.assertRaises(ValueError, self.G2._get_ancestors_of, "h")
def test_get_cardinality(self):
self.assertDictEqual(
self.G1.get_cardinality(), {"diff": 2, "intel": 3, "grade": 3}
)
def test_get_cardinality_with_node(self):
self.assertEqual(self.G1.get_cardinality("diff"), 2)
self.assertEqual(self.G1.get_cardinality("intel"), 3)
self.assertEqual(self.G1.get_cardinality("grade"), 3)
def test_local_independencies(self):
self.assertEqual(
self.G.local_independencies("a"), Independencies(["a", ["b", "c"]])
)
self.assertEqual(
self.G.local_independencies("c"),
Independencies(["c", ["a", "d", "e"], "b"]),
)
self.assertEqual(
self.G.local_independencies("d"), Independencies(["d", "c", ["b", "a"]])
)
self.assertEqual(
self.G.local_independencies("e"),
Independencies(["e", ["c", "b", "a"], "d"]),
)
self.assertEqual(self.G.local_independencies("b"), Independencies(["b", "a"]))
self.assertEqual(self.G1.local_independencies("grade"), Independencies())
def test_get_independencies(self):
chain = BayesianNetwork([("X", "Y"), ("Y", "Z")])
self.assertEqual(
chain.get_independencies(), Independencies(("X", "Z", "Y"), ("Z", "X", "Y"))
)
fork = BayesianNetwork([("Y", "X"), ("Y", "Z")])
self.assertEqual(
fork.get_independencies(), Independencies(("X", "Z", "Y"), ("Z", "X", "Y"))
)
collider = BayesianNetwork([("X", "Y"), ("Z", "Y")])
self.assertEqual(
collider.get_independencies(), Independencies(("X", "Z"), ("Z", "X"))
)
# Latent variables
fork = BayesianNetwork([("Y", "X"), ("Y", "Z")], latents=["Y"])
self.assertEqual(
fork.get_independencies(include_latents=True),
Independencies(("X", "Z", "Y"), ("Z", "X", "Y")),
)
self.assertEqual(
fork.get_independencies(include_latents=False), Independencies()
)
def test_is_imap(self):
val = [
0.01,
0.01,
0.08,
0.006,
0.006,
0.048,
0.004,
0.004,
0.032,
0.04,
0.04,
0.32,
0.024,
0.024,
0.192,
0.016,
0.016,
0.128,
]
JPD = JointProbabilityDistribution(["diff", "intel", "grade"], [2, 3, 3], val)
fac = DiscreteFactor(["diff", "intel", "grade"], [2, 3, 3], val)
self.assertTrue(self.G1.is_imap(JPD))
self.assertRaises(TypeError, self.G1.is_imap, fac)
def test_markov_blanet(self):
G = DAG(
[
("x", "y"),
("z", "y"),
("y", "w"),
("y", "v"),
("u", "w"),
("s", "v"),
("w", "t"),
("w", "m"),
("v", "n"),
("v", "q"),
]
)
self.assertEqual(
set(G.get_markov_blanket("y")), set(["s", "w", "x", "u", "z", "v"])
)
def test_markov_blanket_G3(self):
self.assertEqual(set(self.G3.get_markov_blanket("CH4")), set(["FFEC", "REC"]))
def test_get_immoralities(self):
G = BayesianNetwork([("x", "y"), ("z", "y"), ("x", "z"), ("w", "y")])
self.assertEqual(G.get_immoralities(), {("w", "x"), ("w", "z")})
G1 = BayesianNetwork([("x", "y"), ("z", "y"), ("z", "x"), ("w", "y")])
self.assertEqual(G1.get_immoralities(), {("w", "x"), ("w", "z")})
G2 = BayesianNetwork(
[("x", "y"), ("z", "y"), ("x", "z"), ("w", "y"), ("w", "x")]
)
self.assertEqual(G2.get_immoralities(), {("w", "z")})
def test_is_iequivalent(self):
G = BayesianNetwork([("x", "y"), ("z", "y"), ("x", "z"), ("w", "y")])
self.assertRaises(TypeError, G.is_iequivalent, MarkovNetwork())
G1 = BayesianNetwork([("V", "W"), ("W", "X"), ("X", "Y"), ("Z", "Y")])
G2 = BayesianNetwork([("W", "V"), ("X", "W"), ("X", "Y"), ("Z", "Y")])
self.assertTrue(G1.is_iequivalent(G2))
G3 = BayesianNetwork([("W", "V"), ("W", "X"), ("Y", "X"), ("Z", "Y")])
self.assertFalse(G3.is_iequivalent(G2))
def test_copy(self):
model_copy = self.G1.copy()
self.assertEqual(sorted(self.G1.nodes()), sorted(model_copy.nodes()))
self.assertEqual(sorted(self.G1.edges()), sorted(model_copy.edges()))
self.assertNotEqual(
id(self.G1.get_cpds("diff")), id(model_copy.get_cpds("diff"))
)
self.G1.remove_cpds("diff")
diff_cpd = TabularCPD("diff", 2, values=[[0.3], [0.7]])
self.G1.add_cpds(diff_cpd)
self.assertNotEqual(self.G1.get_cpds("diff"), model_copy.get_cpds("diff"))
self.G1.remove_node("intel")
self.assertNotEqual(sorted(self.G1.nodes()), sorted(model_copy.nodes()))
self.assertNotEqual(sorted(self.G1.edges()), sorted(model_copy.edges()))
def test_get_random(self):
model = BayesianNetwork.get_random(n_nodes=5, edge_prob=0.5)
self.assertEqual(len(model.nodes()), 5)
self.assertEqual(len(model.cpds), 5)
for cpd in model.cpds:
self.assertTrue(np.allclose(np.sum(cpd.get_values(), axis=0), 1, atol=0.01))
model = BayesianNetwork.get_random(n_nodes=5, edge_prob=0.6, n_states=5)
self.assertEqual(len(model.nodes()), 5)
self.assertEqual(len(model.cpds), 5)
for cpd in model.cpds:
self.assertTrue(np.allclose(np.sum(cpd.get_values(), axis=0), 1, atol=0.01))
model = BayesianNetwork.get_random(
n_nodes=5, edge_prob=0.6, n_states=range(2, 7)
)
self.assertEqual(len(model.nodes()), 5)
self.assertEqual(len(model.cpds), 5)
for cpd in model.cpds:
self.assertTrue(np.allclose(np.sum(cpd.get_values(), axis=0), 1, atol=0.01))
def test_remove_node(self):
self.G1.remove_node("diff")
self.assertEqual(sorted(self.G1.nodes()), sorted(["grade", "intel"]))
self.assertRaises(ValueError, self.G1.get_cpds, "diff")
def test_remove_nodes_from(self):
self.G1.remove_nodes_from(["diff", "grade"])
self.assertEqual(sorted(self.G1.nodes()), sorted(["intel"]))
self.assertRaises(ValueError, self.G1.get_cpds, "diff")
self.assertRaises(ValueError, self.G1.get_cpds, "grade")
def tearDown(self):
del self.G
del self.G1
class TestBayesianModelCPD(unittest.TestCase):
def setUp(self):
self.G = BayesianNetwork([("d", "g"), ("i", "g"), ("g", "l"), ("i", "s")])
self.G2 = DAG([("d", "g"), ("i", "g"), ("g", "l"), ("i", "s")])
self.G_latent = DAG(
[("d", "g"), ("i", "g"), ("g", "l"), ("i", "s")], latents=["d", "g"]
)
def test_active_trail_nodes(self):
self.assertEqual(sorted(self.G2.active_trail_nodes("d")["d"]), ["d", "g", "l"])
self.assertEqual(
sorted(self.G2.active_trail_nodes("i")["i"]), ["g", "i", "l", "s"]
)
self.assertEqual(
sorted(self.G2.active_trail_nodes(["d", "i"])["d"]), ["d", "g", "l"]
)
# For model with latent variables
self.assertEqual(
sorted(self.G_latent.active_trail_nodes("d", include_latents=True)["d"]),
["d", "g", "l"],
)
self.assertEqual(
sorted(self.G_latent.active_trail_nodes("i", include_latents=True)["i"]),
["g", "i", "l", "s"],
)
self.assertEqual(
sorted(
self.G_latent.active_trail_nodes(["d", "i"], include_latents=True)["d"]
),
["d", "g", "l"],
)
self.assertEqual(
sorted(self.G_latent.active_trail_nodes("d", include_latents=False)["d"]),
["l"],
)
self.assertEqual(
sorted(self.G_latent.active_trail_nodes("i", include_latents=False)["i"]),
["i", "l", "s"],
)
self.assertEqual(
sorted(
self.G_latent.active_trail_nodes(["d", "i"], include_latents=False)["d"]
),
["l"],
)
def test_active_trail_nodes_args(self):
self.assertEqual(
sorted(self.G2.active_trail_nodes(["d", "l"], observed="g")["d"]),
["d", "i", "s"],
)
self.assertEqual(
sorted(self.G2.active_trail_nodes(["d", "l"], observed="g")["l"]), ["l"]
)
self.assertEqual(
sorted(self.G2.active_trail_nodes("s", observed=["i", "l"])["s"]), ["s"]
)
self.assertEqual(
sorted(self.G2.active_trail_nodes("s", observed=["d", "l"])["s"]),
["g", "i", "s"],
)
def test_is_dconnected_triplets(self):
self.assertTrue(self.G.is_dconnected("d", "l"))
self.assertTrue(self.G.is_dconnected("g", "s"))
self.assertFalse(self.G.is_dconnected("d", "i"))
self.assertTrue(self.G.is_dconnected("d", "i", observed="g"))
self.assertFalse(self.G.is_dconnected("d", "l", observed="g"))
self.assertFalse(self.G.is_dconnected("i", "l", observed="g"))
self.assertTrue(self.G.is_dconnected("d", "i", observed="l"))
self.assertFalse(self.G.is_dconnected("g", "s", observed="i"))
def test_is_dconnected(self):
self.assertFalse(self.G.is_dconnected("d", "s"))
self.assertTrue(self.G.is_dconnected("s", "l"))
self.assertTrue(self.G.is_dconnected("d", "s", observed="g"))
self.assertFalse(self.G.is_dconnected("s", "l", observed="g"))
def test_is_dconnected_args(self):
self.assertFalse(self.G.is_dconnected("s", "l", "i"))
self.assertFalse(self.G.is_dconnected("s", "l", "g"))
self.assertTrue(self.G.is_dconnected("d", "s", "l"))
self.assertFalse(self.G.is_dconnected("d", "s", ["i", "l"]))
def test_get_cpds(self):
cpd_d = TabularCPD("d", 2, values=np.random.rand(2, 1))
cpd_i = TabularCPD("i", 2, values=np.random.rand(2, 1))
cpd_g = TabularCPD(
"g",
2,
values=np.random.rand(2, 4),
evidence=["d", "i"],
evidence_card=[2, 2],
)
cpd_l = TabularCPD(
"l", 2, values=np.random.rand(2, 2), evidence=["g"], evidence_card=[2]
)
cpd_s = TabularCPD(
"s", 2, values=np.random.rand(2, 2), evidence=["i"], evidence_card=[2]
)
self.G.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)
self.assertEqual(self.G.get_cpds("d").variable, "d")
def test_get_cpds1(self):
self.model = BayesianNetwork([("A", "AB")])
cpd_a = TabularCPD("A", 2, values=np.random.rand(2, 1))
cpd_ab = TabularCPD(
"AB", 2, values=np.random.rand(2, 2), evidence=["A"], evidence_card=[2]
)
self.model.add_cpds(cpd_a, cpd_ab)
self.assertEqual(self.model.get_cpds("A").variable, "A")
self.assertEqual(self.model.get_cpds("AB").variable, "AB")
self.assertRaises(ValueError, self.model.get_cpds, "B")
self.model.add_node("B")
self.assertIsNone(self.model.get_cpds("B"))
def test_add_single_cpd(self):
cpd_s = TabularCPD("s", 2, np.random.rand(2, 2), ["i"], [2])
self.G.add_cpds(cpd_s)
self.assertListEqual(self.G.get_cpds(), [cpd_s])
def test_add_multiple_cpds(self):
cpd_d = TabularCPD("d", 2, values=np.random.rand(2, 1))
cpd_i = TabularCPD("i", 2, values=np.random.rand(2, 1))
cpd_g = TabularCPD(
"g",
2,
values=np.random.rand(2, 4),
evidence=["d", "i"],
evidence_card=[2, 2],
)
cpd_l = TabularCPD(
"l", 2, values=np.random.rand(2, 2), evidence=["g"], evidence_card=[2]
)
cpd_s = TabularCPD(
"s", 2, values=np.random.rand(2, 2), evidence=["i"], evidence_card=[2]
)
self.G.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)
self.assertEqual(self.G.get_cpds("d"), cpd_d)
self.assertEqual(self.G.get_cpds("i"), cpd_i)
self.assertEqual(self.G.get_cpds("g"), cpd_g)
self.assertEqual(self.G.get_cpds("l"), cpd_l)
self.assertEqual(self.G.get_cpds("s"), cpd_s)
def test_check_model(self):
cpd_g = TabularCPD(
"g",
2,
values=np.array([[0.2, 0.3, 0.4, 0.6], [0.8, 0.7, 0.6, 0.4]]),
evidence=["d", "i"],
evidence_card=[2, 2],
)
cpd_s = TabularCPD(
"s",
2,
values=np.array([[0.2, 0.3], [0.8, 0.7]]),
evidence=["i"],
evidence_card=[2],
)
cpd_l = TabularCPD(
"l",
2,
values=np.array([[0.2, 0.3], [0.8, 0.7]]),
evidence=["g"],
evidence_card=[2],
)
self.G.add_cpds(cpd_g, cpd_s, cpd_l)
self.assertRaises(ValueError, self.G.check_model)
cpd_d = TabularCPD("d", 2, values=[[0.8], [0.2]])
cpd_i = TabularCPD("i", 2, values=[[0.7], [0.3]])
self.G.add_cpds(cpd_d, cpd_i)
self.assertTrue(self.G.check_model())
def test_check_model1(self):
cpd_g = TabularCPD(
"g",
2,
values=np.array([[0.2, 0.3], [0.8, 0.7]]),
evidence=["i"],
evidence_card=[2],
)
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_g = TabularCPD(
"g",
2,
values=np.array([[0.2, 0.3, 0.4, 0.6], [0.8, 0.7, 0.6, 0.4]]),
evidence=["d", "s"],
evidence_card=[2, 2],
)
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_g = TabularCPD(
"g",
2,
values=np.array([[0.2, 0.3], [0.8, 0.7]]),
evidence=["l"],
evidence_card=[2],
)
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_l = TabularCPD(
"l",
2,
values=np.array([[0.2, 0.3], [0.8, 0.7]]),
evidence=["d"],
evidence_card=[2],
)
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
cpd_l = TabularCPD(
"l",
2,
values=np.array([[0.2, 0.3, 0.4, 0.6], [0.8, 0.7, 0.6, 0.4]]),
evidence=["d", "i"],
evidence_card=[2, 2],
)
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
cpd_l = TabularCPD(
"l",
2,
values=np.array(
[
[0.2, 0.3, 0.4, 0.6, 0.2, 0.3, 0.4, 0.6],
[0.8, 0.7, 0.6, 0.4, 0.8, 0.7, 0.6, 0.4],
]
),
evidence=["g", "d", "i"],
evidence_card=[2, 2, 2],
)
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
def test_check_model2(self):
cpd_s = TabularCPD(
"s",
2,
values=np.array([[0.5, 0.3], [0.8, 0.7]]),
evidence=["i"],
evidence_card=[2],
)
self.G.add_cpds(cpd_s)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_s)
cpd_g = TabularCPD(
"g",
2,
values=np.array([[0.2, 0.3, 0.4, 0.6], [0.3, 0.7, 0.6, 0.4]]),
evidence=["d", "i"],
evidence_card=[2, 2],
)
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_l = TabularCPD(
"l",
2,
values=np.array([[0.2, 0.3], [0.1, 0.7]]),
evidence=["g"],
evidence_card=[2],
)
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
def tearDown(self):
del self.G
class TestBayesianModelFitPredict(unittest.TestCase):
def setUp(self):
self.model_disconnected = BayesianNetwork()
self.model_disconnected.add_nodes_from(["A", "B", "C", "D", "E"])
self.model_connected = BayesianNetwork(
[("A", "B"), ("C", "B"), ("C", "D"), ("B", "E")]
)
self.model2 = BayesianNetwork([("A", "C"), ("B", "C")])
self.data1 = pd.DataFrame(data={"A": [0, 0, 1], "B": [0, 1, 0], "C": [1, 1, 0]})
self.data2 = pd.DataFrame(
data={
"A": [0, np.NaN, 1],
"B": [0, 1, 0],
"C": [1, 1, np.NaN],
"D": [np.NaN, "Y", np.NaN],
}
)
# data_link - "https://www.kaggle.com/c/titanic/download/train.csv"
self.titanic_data = pd.read_csv(
"pgmpy/tests/test_estimators/testdata/titanic_train.csv", dtype=str
)
self.titanic_data2 = self.titanic_data[["Survived", "Sex", "Pclass"]]
def test_bayesian_fit(self):
print(isinstance(BayesianEstimator, BaseEstimator))
print(isinstance(MaximumLikelihoodEstimator, BaseEstimator))
self.model2.fit(
self.data1,
estimator=BayesianEstimator,
prior_type="dirichlet",
pseudo_counts={
"A": [[9], [3]],
"B": [[9], [3]],
"C": [[9, 9, 9, 9], [3, 3, 3, 3]],
},
)
self.assertEqual(
self.model2.get_cpds("B"), TabularCPD("B", 2, [[11.0 / 15], [4.0 / 15]])
)
def test_fit_update(self):
model = get_example_model("asia")
model_copy = model.copy()
data = BayesianModelSampling(model).forward_sample(int(1e3))
model.fit_update(data, n_prev_samples=int(1e3))
for var in model.nodes():
self.assertTrue(
model_copy.get_cpds(var).__eq__(model.get_cpds(var), atol=0.1)
)
model = model_copy.copy()
model.fit_update(data)
for var in model.nodes():
self.assertTrue(
model_copy.get_cpds(var).__eq__(model.get_cpds(var), atol=0.1)
)
def test_fit_missing_data(self):
self.model2.fit(
self.data2, state_names={"C": [0, 1]}, complete_samples_only=False
)
cpds = set(
[
TabularCPD("A", 2, [[0.5], [0.5]]),
TabularCPD("B", 2, [[2.0 / 3], [1.0 / 3]]),
TabularCPD(
"C",
2,
[[0, 0.5, 0.5, 0.5], [1, 0.5, 0.5, 0.5]],
evidence=["A", "B"],
evidence_card=[2, 2],
),
]
)
self.assertSetEqual(cpds, set(self.model2.get_cpds()))
def test_disconnected_fit(self):
values = pd.DataFrame(
np.random.randint(low=0, high=2, size=(1000, 5)),
columns=["A", "B", "C", "D", "E"],
)
self.model_disconnected.fit(values)
for node in ["A", "B", "C", "D", "E"]:
cpd = self.model_disconnected.get_cpds(node)
self.assertEqual(cpd.variable, node)
np_test.assert_array_equal(cpd.cardinality, np.array([2]))
value = (
values.loc[:, node].value_counts()
/ values.loc[:, node].value_counts().sum()
)
value = value.reindex(sorted(value.index)).values
np_test.assert_array_equal(cpd.values, value)
def test_predict(self):
titanic = BayesianNetwork()
titanic.add_edges_from([("Sex", "Survived"), ("Pclass", "Survived")])
titanic.fit(self.titanic_data2[500:])
p1 = titanic.predict(self.titanic_data2[["Sex", "Pclass"]][:30])
p2 = titanic.predict(self.titanic_data2[["Survived", "Pclass"]][:30])
p3 = titanic.predict(self.titanic_data2[["Survived", "Sex"]][:30])
p1_res = np.array(
[
"0",
"1",
"0",
"1",
"0",
"0",
"0",
"0",
"0",
"1",
"0",
"1",
"0",
"0",
"0",
"1",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
]
)
p2_res = np.array(
[
"male",
"female",
"female",
"female",
"male",
"male",
"male",
"male",
"female",
"female",
"female",
"female",
"male",
"male",
"male",
"female",
"male",
"female",
"male",
"female",
"male",
"female",
"female",
"female",
"male",
"female",
"male",
"male",
"female",
"male",
]
)
p3_res = np.array(
[
"3",
"1",
"1",
"1",
"3",
"3",
"3",
"3",
"1",
"1",
"1",
"1",
"3",
"3",
"3",
"1",
"3",
"1",
"3",
"1",
"3",
"1",
"1",
"1",
"3",
"1",
"3",
"3",
"1",
"3",
]
)
np_test.assert_array_equal(p1.values.ravel(), p1_res)
np_test.assert_array_equal(p2.values.ravel(), p2_res)
np_test.assert_array_equal(p3.values.ravel(), p3_res)
def test_predict_stochastic(self):
titanic = BayesianNetwork()
titanic.add_edges_from([("Sex", "Survived"), ("Pclass", "Survived")])
titanic.fit(self.titanic_data2[500:])
p1 = titanic.predict(
self.titanic_data2[["Sex", "Pclass"]][:30], stochastic=True
)
p2 = titanic.predict(
self.titanic_data2[["Survived", "Pclass"]][:30], stochastic=True
)
p3 = titanic.predict(
self.titanic_data2[["Survived", "Sex"]][:30], stochastic=True
)
# Acceptable range between 15 - 20.
# TODO: Is there a better way to test this?
self.assertTrue(p1.value_counts().values[0] <= 23)
self.assertTrue(p1.value_counts().values[0] >= 15)
self.assertTrue(p2.value_counts().values[0] <= 22)
self.assertTrue(p2.value_counts().values[0] >= 15)
self.assertTrue(p3.value_counts().values[0] <= 19)
self.assertTrue(p3.value_counts().values[0] >= 8)
def test_connected_predict(self):
np.random.seed(42)
values = pd.DataFrame(
np.array(np.random.randint(low=0, high=2, size=(1000, 5)), dtype=str),
columns=["A", "B", "C", "D", "E"],
)
fit_data = values[:800]
predict_data = values[800:].copy()
self.model_connected.fit(fit_data)
self.assertRaises(ValueError, self.model_connected.predict, predict_data)
predict_data.drop("E", axis=1, inplace=True)
e_predict = self.model_connected.predict(predict_data)
np_test.assert_array_equal(
e_predict.values.ravel(),
np.array(
[
1,
0,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
0,
0,
1,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
0,
1,
1,
1,
0,
1,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
1,
0,
1,
1,
1,
1,
1,
1,
1,
0,
1,
0,
1,
1,
1,
1,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
],
dtype=str,
),
)
def test_connected_predict_probability(self):
np.random.seed(42)
values = pd.DataFrame(
np.random.randint(low=0, high=2, size=(100, 5)),
columns=["A", "B", "C", "D", "E"],
)
fit_data = values[:80]
predict_data = values[80:].copy()
self.model_connected.fit(fit_data)
predict_data.drop("E", axis=1, inplace=True)
e_prob = self.model_connected.predict_probability(predict_data)
np_test.assert_allclose(
e_prob.values.ravel(),
np.array(
[
0.57894737,
0.42105263,
0.57894737,
0.42105263,
0.57894737,
0.42105263,
0.5,
0.5,
0.57894737,
0.42105263,
0.5,
0.5,
0.57894737,
0.42105263,
0.57894737,
0.42105263,
0.57894737,
0.42105263,
0.5,
0.5,
0.57894737,
0.42105263,
0.57894737,
0.42105263,
0.5,
0.5,
0.57894737,
0.42105263,
0.57894737,
0.42105263,
0.5,
0.5,
0.57894737,
0.42105263,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
]
),
atol=0,
)
predict_data = pd.DataFrame(
np.random.randint(low=0, high=2, size=(1, 5)),
columns=["A", "B", "C", "F", "E"],
)[:]
def test_predict_probability_errors(self):
np.random.seed(42)
values = pd.DataFrame(
np.random.randint(low=0, high=2, size=(2, 5)),
columns=["A", "B", "C", "D", "E"],
)
fit_data = values[:1]
predict_data = values[1:].copy()
self.model_connected.fit(fit_data)
self.assertRaises(
ValueError, self.model_connected.predict_probability, predict_data
)
predict_data = pd.DataFrame(
np.random.randint(low=0, high=2, size=(1, 5)),
columns=["A", "B", "C", "F", "E"],
)[:]
self.assertRaises(
ValueError, self.model_connected.predict_probability, predict_data
)
def test_do(self):
# One confounder var with treatement T and outcome C: S -> T -> C ; S -> C
model = BayesianNetwork([("S", "T"), ("T", "C"), ("S", "C")])
cpd_s = TabularCPD(
variable="S",
variable_card=2,
values=[[0.5], [0.5]],
state_names={"S": ["m", "f"]},
)
cpd_t = TabularCPD(
variable="T",
variable_card=2,
values=[[0.25, 0.75], [0.75, 0.25]],
evidence=["S"],
evidence_card=[2],
state_names={"S": ["m", "f"], "T": [0, 1]},
)
cpd_c = TabularCPD(
variable="C",
variable_card=2,
values=[[0.3, 0.4, 0.7, 0.8], [0.7, 0.6, 0.3, 0.2]],
evidence=["S", "T"],
evidence_card=[2, 2],
state_names={"S": ["m", "f"], "T": [0, 1], "C": [0, 1]},
)
model.add_cpds(cpd_s, cpd_t, cpd_c)
model_do_inplace = model.do(["T"], inplace=True)
model_do_new = model.do(["T"], inplace=False)
for m in [model_do_inplace, model_do_new]:
self.assertEqual(sorted(list(m.edges())), sorted([("S", "C"), ("T", "C")]))
self.assertEqual(len(m.cpds), 3)
np_test.assert_array_equal(
m.get_cpds(node="S").values, np.array([0.5, 0.5])
)
np_test.assert_array_equal(
m.get_cpds(node="T").values, np.array([0.5, 0.5])
)
np_test.assert_array_equal(
m.get_cpds(node="C").values,
np.array([[[0.3, 0.4], [0.7, 0.8]], [[0.7, 0.6], [0.3, 0.2]]]),
)
def test_simulate(self):
asia = get_example_model("asia")
n_samples = int(1e3)
samples = asia.simulate(n_samples=n_samples)
self.assertEqual(samples.shape[0], n_samples)
def tearDown(self):
del self.model_connected
del self.model_disconnected
class TestDAGCPDOperations(unittest.TestCase):
def setUp(self):
self.graph = BayesianNetwork()
def test_add_single_cpd(self):
cpd = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd)
self.assertListEqual(self.graph.get_cpds(), [cpd])
def test_add_multiple_cpds(self):
cpd1 = TabularCPD("diff", 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD("intel", 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertListEqual(self.graph.get_cpds(), [cpd1, cpd2, cpd3])
def test_remove_single_cpd(self):
cpd1 = TabularCPD("diff", 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD("intel", 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds(cpd1)
self.assertListEqual(self.graph.get_cpds(), [cpd2, cpd3])
def test_remove_multiple_cpds(self):
cpd1 = TabularCPD("diff", 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD("intel", 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds(cpd1, cpd3)
self.assertListEqual(self.graph.get_cpds(), [cpd2])
def test_remove_single_cpd_string(self):
cpd1 = TabularCPD("diff", 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD("intel", 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds("diff")
self.assertListEqual(self.graph.get_cpds(), [cpd2, cpd3])
def test_remove_multiple_cpds_string(self):
cpd1 = TabularCPD("diff", 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD("intel", 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds("diff", "grade")
self.assertListEqual(self.graph.get_cpds(), [cpd2])
def test_get_values_for_node(self):
cpd1 = TabularCPD("diff", 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD("intel", 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertEqual(self.graph.get_cpds("diff"), cpd1)
self.assertEqual(self.graph.get_cpds("intel"), cpd2)
self.assertEqual(self.graph.get_cpds("grade"), cpd3)
def test_get_values_raises_error(self):
cpd1 = TabularCPD("diff", 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD("intel", 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertRaises(ValueError, self.graph.get_cpds, "sat")
def tearDown(self):
del self.graph
| mit |
evgchz/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 19 | 22876 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_loss_grad_hess,
_multinomial_loss_grad_hess
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
"""Simple sanity check on a 2 classes dataset
Make sure it predicts the correct result on simple datasets.
"""
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
"""Test for appropriate exception on errors"""
assert_raises(ValueError, LogisticRegression(C=-1).fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
"""Test logistic regression with the iris dataset"""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_multinomial_binary():
"""Test multinomial LR on a binary problem."""
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
"""Test sparsify and densify members."""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
"""Test that an exception is raised on inconsistent input"""
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
"""Test that we can write to coef_ and intercept_"""
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
"""Test proper NaN handling.
Regression test for Issue #252: fit used to go into an infinite loop.
"""
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
"""Test that the path algorithm is consistent"""
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_random_state():
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0)
lr2.fit(X, y)
assert_array_almost_equal(lr1.coef_, lr2.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_loss_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_2, grad_2, hess = _logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
loss_interp_2, grad_interp_2, hess = \
_logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
"""test for LogisticRegressionCV object"""
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
loss_interp, grad_interp, hess_interp = _logistic_loss_grad_hess(
w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
loss, grad, hess = _logistic_loss_grad_hess(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
"""Test that OvR and multinomial are correct using the iris dataset."""
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=auto
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='auto')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='auto')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
"""Test that warnings are raised if model does not converge"""
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
"""Tests for the multinomial option in logistic regression"""
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=50, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
_, grad, hessp = _multinomial_loss_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_loss_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_loss_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
"""Test negative prediction when decision_function values are zero.
Liblinear predicts the positive class when decision_function values
are zero. This is a test to verify that we do not do the same.
See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
"""
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
"""Test LogRegCV with solver='liblinear' works for sparse matrices"""
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
| bsd-3-clause |
kylerbrown/scikit-learn | examples/text/document_classification_20newsgroups.py | 222 | 10500 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
shahankhatch/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 93 | 2471 | # Author: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_array_equal(xi.data, X_csr[idx].data)
assert_array_equal(xi.indices, X_csr[idx].indices)
assert_array_equal(xi.indptr, X_csr[idx].indptr)
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_array_equal(xi.data, X_csr[idx].data)
assert_array_equal(xi.indices, X_csr[idx].indices)
assert_array_equal(xi.indptr, X_csr[idx].indptr)
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
hujiajie/pa-chromium | chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py | 9 | 11452 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
def FindChrome(src_dir, options):
if options.browser_path:
return options.browser_path
# List of places that chrome could live.
# In theory we should be more careful about what platform we're actually
# building for.
# As currently constructed, this will also hork people who have debug and
# release builds sitting side by side who build locally.
mode = options.mode
chrome_locations = [
'build/%s/chrome.exe' % mode,
'chrome/%s/chrome.exe' % mode,
# For Linux buildbots. scripts/slave/extract_build.py extracts builds
# to src/sconsbuild/ rather than src/out/.
'sconsbuild/%s/chrome' % mode,
# Windows Chromium ninja builder
'out/%s/chrome.exe' % mode,
'out/%s/chrome' % mode,
# Mac Chromium make builder
'out/%s/Chromium.app/Contents/MacOS/Chromium' % mode,
# Mac release make builder
'out/%s/Google Chrome.app/Contents/MacOS/Google Chrome' % mode,
# Mac Chromium xcode builder
'xcodebuild/%s/Chromium.app/Contents/MacOS/Chromium' % mode,
# Mac release xcode builder
'xcodebuild/%s/Google Chrome.app/Contents/MacOS/Google Chrome' % mode,
]
# Pick the one with the newest timestamp.
latest_mtime = 0
latest_path = None
for chrome in chrome_locations:
chrome_filename = os.path.join(src_dir, chrome)
if os.path.exists(chrome_filename):
mtime = os.path.getmtime(chrome_filename)
if mtime > latest_mtime:
latest_mtime = mtime
latest_path = chrome_filename
if latest_path is not None:
return latest_path
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
scons = ['xvfb-run', '--auto-servernum', python, 'scons.py']
chrome_filename = FindChrome(src_dir, options)
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Download the toolchain(s).
RunCommand([python,
os.path.join(nacl_dir, 'build', 'download_toolchains.py'),
'--no-arm-trusted', '--no-pnacl', 'TOOL_REVISIONS'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/io/msgpack/test_read_size.py | 22 | 1870 | """Test Unpacker's read_array_header and read_map_header methods"""
from pandas.io.msgpack import packb, Unpacker, OutOfData
UnexpectedTypeException = ValueError
def test_read_array_header():
unpacker = Unpacker()
unpacker.feed(packb(['a', 'b', 'c']))
assert unpacker.read_array_header() == 3
assert unpacker.unpack() == b'a'
assert unpacker.unpack() == b'b'
assert unpacker.unpack() == b'c'
try:
unpacker.unpack()
assert 0, 'should raise exception'
except OutOfData:
assert 1, 'okay'
def test_read_map_header():
unpacker = Unpacker()
unpacker.feed(packb({'a': 'A'}))
assert unpacker.read_map_header() == 1
assert unpacker.unpack() == B'a'
assert unpacker.unpack() == B'A'
try:
unpacker.unpack()
assert 0, 'should raise exception'
except OutOfData:
assert 1, 'okay'
def test_incorrect_type_array():
unpacker = Unpacker()
unpacker.feed(packb(1))
try:
unpacker.read_array_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_incorrect_type_map():
unpacker = Unpacker()
unpacker.feed(packb(1))
try:
unpacker.read_map_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_correct_type_nested_array():
unpacker = Unpacker()
unpacker.feed(packb({'a': ['b', 'c', 'd']}))
try:
unpacker.read_array_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_incorrect_type_nested_map():
unpacker = Unpacker()
unpacker.feed(packb([{'a': 'b'}]))
try:
unpacker.read_map_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
| mit |
13anjou/Research-Internship | Analysis/analyseDiff.py | 1 | 1075 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import csv
from pylab import*
import numpy
import math
import matplotlib.pyplot as plt
import numbers
k = 0.2
s = 0.8
import dico as d
def main(k) :
dico = d.main()
plt.figure(figsize=(20,20))
plt.title('Mean distance to the power-law distribution versus stations', fontsize = 40)
plt.legend()
with open('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Resultats\\log_log\\10_{}\\toutes\\ecartPowerLaw_{}.csv'.format(k,k), 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';',quotechar=',', quoting=csv.QUOTE_MINIMAL)
for row in reader :
scatter(enlever(row[0]),enlever(row[1]))
yticks(fontsize=40)
xticks(fontsize=40)
plt.xlabel("station", fontsize=40)
plt.ylabel("mean distance to the power-law distribution", fontsize=40)
savefig('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\DistancePowerLaw.png')
clf()
if __name__ == "__main__" :
main(k)
def enlever(s) :
res = str()
for l in s :
if l ==' ' :
pass
else :
res=res + l
return(res) | gpl-2.0 |
RTHMaK/RPGOne | doc/examples/edges/plot_active_contours.py | 3 | 3296 | """
====================
Active Contour Model
====================
The active contour model is a method to fit open or closed splines to lines or
edges in an image. It works by minimising an energy that is in part defined by
the image and part by the spline's shape: length and smoothness. The
minimization is done implicitly in the shape energy and explicitly in the
image energy.
In the following two examples the active contour model is used (1) to segment
the face of a person from the rest of an image by fitting a closed curve
to the edges of the face and (2) to find the darkest curve between two fixed
points while obeying smoothness considerations. Typically it is a good idea to
smooth images a bit before analyzing, as done in the following examples.
.. [1] *Snakes: Active contour models*. Kass, M.; Witkin, A.; Terzopoulos, D.
International Journal of Computer Vision 1 (4): 321 (1988).
We initialize a circle around the astronaut's face and use the default boundary
condition ``bc='periodic'`` to fit a closed curve. The default parameters
``w_line=0, w_edge=1`` will make the curve search towards edges, such as the
boundaries of the face.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import rgb2gray
from skimage import data
from skimage.filters import gaussian
from skimage.segmentation import active_contour
# Test scipy version, since active contour is only possible
# with recent scipy version
import scipy
scipy_version = list(map(int, scipy.__version__.split('.')))
new_scipy = scipy_version[0] > 0 or \
(scipy_version[0] == 0 and scipy_version[1] >= 14)
img = data.astronaut()
img = rgb2gray(img)
s = np.linspace(0, 2*np.pi, 400)
x = 220 + 100*np.cos(s)
y = 100 + 100*np.sin(s)
init = np.array([x, y]).T
if not new_scipy:
print('You are using an old version of scipy. '
'Active contours is implemented for scipy versions '
'0.14.0 and above.')
if new_scipy:
snake = active_contour(gaussian(img, 3),
init, alpha=0.015, beta=10, gamma=0.001)
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111)
plt.gray()
ax.imshow(img)
ax.plot(init[:, 0], init[:, 1], '--r', lw=3)
ax.plot(snake[:, 0], snake[:, 1], '-b', lw=3)
ax.set_xticks([]), ax.set_yticks([])
ax.axis([0, img.shape[1], img.shape[0], 0])
"""
.. image:: PLOT2RST.current_figure
Here we initialize a straight line between two points, `(5, 136)` and
`(424, 50)`, and require that the spline has its end points there by giving
the boundary condition `bc='fixed'`. We furthermore make the algorithm search
for dark lines by giving a negative `w_line` value.
"""
img = data.text()
x = np.linspace(5, 424, 100)
y = np.linspace(136, 50, 100)
init = np.array([x, y]).T
if new_scipy:
snake = active_contour(gaussian(img, 1), init, bc='fixed',
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1)
fig = plt.figure(figsize=(9, 5))
ax = fig.add_subplot(111)
plt.gray()
ax.imshow(img)
ax.plot(init[:, 0], init[:, 1], '--r', lw=3)
ax.plot(snake[:, 0], snake[:, 1], '-b', lw=3)
ax.set_xticks([]), ax.set_yticks([])
ax.axis([0, img.shape[1], img.shape[0], 0])
plt.show()
"""
.. image:: PLOT2RST.current_figure
"""
| apache-2.0 |
bsipocz/bokeh | examples/compat/mpl/subplots.py | 13 | 1798 | """
Edward Tufte uses this example from Anscombe to show 4 datasets of x
and y that have the same mean, standard deviation, and regression
line, but which are qualitatively different.
matplotlib fun for a rainy day
"""
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import show
x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y1 = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])
y2 = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74])
y3 = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73])
x4 = np.array([8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8])
y4 = np.array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89])
def fit(x):
return 3 + 0.5 * x
xfit = np.linspace(np.amin(x), np.amax(x), len(x))
plt.subplot(221)
plt.plot(x, y1, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.ylabel('I', fontsize=20)
plt.subplot(222)
plt.plot(x, y2, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), yticklabels=[], xticks=(0, 10, 20))
plt.ylabel('II', fontsize=20)
plt.subplot(223)
plt.plot(x, y3, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.ylabel('III', fontsize=20)
plt.setp(plt.gca(), yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.subplot(224)
xfit = np.array([np.amin(x4), np.amax(x4)])
plt.plot(x4, y4, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), yticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.ylabel('IV', fontsize=20)
# We create the figure in matplotlib and then we "pass it" to Bokeh
show(mpl.to_bokeh(name="subplots"))
| bsd-3-clause |
alexei-matveev/ase-local | ase/gui/graphs.py | 2 | 4772 | #!/usr/bin/env python
from math import sqrt
import gtk
from gettext import gettext as _
from ase.gui.widgets import pack, help
graph_help_text = _("""\
Help for plot ...
Symbols:
<c>e</c>:\t\t\t\ttotal energy
<c>epot</c>:\t\t\tpotential energy
<c>ekin</c>:\t\t\tkinetic energy
<c>fmax</c>:\t\t\tmaximum force
<c>fave</c>:\t\t\taverage force
<c>R[n,0-2]</c>:\t\t\tposition of atom number <c>n</c>
<c>d(n<sub>1</sub>,n<sub>2</sub>)</c>:\t\t\tdistance between two atoms <c>n<sub>1</sub></c> and <c>n<sub>2</sub></c>
<c>i</c>:\t\t\t\tcurrent image number
<c>E[i]</c>:\t\t\t\tenergy of image number <c>i</c>
<c>F[n,0-2]</c>:\t\t\tforce on atom number <c>n</c>
<c>V[n,0-2]</c>:\t\t\tvelocity of atom number <c>n</c>
<c>M[n]</c>:\t\t\tmagnetic moment of atom number <c>n</c>
<c>A[0-2,0-2]</c>:\t\tunit-cell basis vectors
<c>s</c>:\t\t\t\tpath length
<c>a(n1,n2,n3)</c>:\t\tangle between atoms <c>n<sub>1</sub></c>, <c>n<sub>2</sub></c> and <c>n<sub>3</sub></c>, centered on <c>n<sub>2</sub></c>
<c>dih(n1,n2,n3,n4)</c>:\tdihedral angle between <c>n<sub>1</sub></c>, <c>n<sub>2</sub></c>, <c>n<sub>3</sub></c> and <c>n<sub>4</sub></c>
<c>T</c>:\t\t\t\ttemperature (K)\
""")
class Graphs(gtk.Window):
def __init__(self, gui):
gtk.Window.__init__(self)
#self.window.set_position(gtk.WIN_POS_CENTER)
#self.window.connect("destroy", lambda w: gtk.main_quit())
#self.window.connect('delete_event', self.exit)
self.set_title('Graphs')
vbox = gtk.VBox()
self.expr = pack(vbox, [gtk.Entry(64),
help(graph_help_text)])[0]
self.expr.connect('activate', self.plot)
completion = gtk.EntryCompletion()
self.liststore = gtk.ListStore(str)
for s in ['fmax', 's, e-E[0]', 'i, d(0,1)']:
self.liststore.append([s])
completion.set_model(self.liststore)
self.expr.set_completion(completion)
completion.set_text_column(0)
button = pack(vbox, [gtk.Button(_('Plot')),
gtk.Label(' x, y1, y2, ...')])[0]
button.connect('clicked', self.plot, 'xy')
button = pack(vbox, [gtk.Button(_('Plot')),
gtk.Label(' y1, y2, ...')])[0]
button.connect('clicked', self.plot, 'y')
save_button = gtk.Button(stock=gtk.STOCK_SAVE)
save_button.connect('clicked',self.save)
clear_button = gtk.Button(_('clear'))
clear_button.connect('clicked', self.clear)
pack(vbox, [save_button,clear_button])
self.add(vbox)
vbox.show()
self.show()
self.gui = gui
def plot(self, button=None, type=None, expr=None):
if expr is None:
expr = self.expr.get_text()
else:
self.expr.set_text(expr)
if expr not in [row[0] for row in self.liststore]:
self.liststore.append([expr])
data = self.gui.images.graph(expr)
import matplotlib
matplotlib.interactive(True)
matplotlib.use('GTKAgg')
#matplotlib.use('GTK', warn=False)# Not avail. in 0.91 (it is in 0.98)
import pylab
pylab.ion()
x = 2.5
self.gui.graphs.append(pylab.figure(figsize=(x * 2.5**0.5, x)))
i = self.gui.frame
m = len(data)
if type is None:
if m == 1:
type = 'y'
else:
type = 'xy'
if type == 'y':
for j in range(m):
pylab.plot(data[j])
pylab.plot([i], [data[j, i]], 'o')
else:
for j in range(1, m):
pylab.plot(data[0], data[j])
pylab.plot([data[0, i]], [data[j, i]], 'o')
pylab.title(expr)
#pylab.show()
python = plot
def save(self, filename):
chooser = gtk.FileChooserDialog(
_('Save data to file ... '), None, gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
save = chooser.run()
if save == gtk.RESPONSE_OK:
filename = chooser.get_filename()
expr = self.expr.get_text()
data = self.gui.images.graph(expr)
expr = '# '+expr
fd = open(filename,'w')
fd.write("%s \n" % (expr))
for s in range(len(data[0])):
for i in range(len(data)):
val = data[i,s]
fd.write("%12.8e\t" % (val))
fd.write("\n")
fd.close()
chooser.destroy()
def clear(self, button):
import pylab
for graph in self.gui.graphs:
pylab.close(graph)
self.gui.graphs = []
| gpl-2.0 |
DonJayamanne/pythonVSCode | pythonFiles/vscode_datascience_helpers/dataframes/vscodeDataFrameHelpers.py | 1 | 1298 | import pandas as _VSCODE_pd
import builtins as _VSCODE_builtins
# Function that converts the var passed in into a pandas data frame if possible
def _VSCODE_convertToDataFrame(df):
if isinstance(df, list):
df = _VSCODE_pd.DataFrame(df)
elif isinstance(df, _VSCODE_pd.Series):
df = _VSCODE_pd.Series.to_frame(df)
elif isinstance(df, dict):
df = _VSCODE_pd.Series(df)
df = _VSCODE_pd.Series.to_frame(df)
elif hasattr(df, "toPandas"):
df = df.toPandas()
else:
"""Disabling bandit warning for try, except, pass. We want to swallow all exceptions here to not crash on
variable fetching"""
try:
temp = _VSCODE_pd.DataFrame(df)
df = temp
except: # nosec
pass
return df
# Function to compute row count for a value
def _VSCODE_getRowCount(var):
if hasattr(var, "shape"):
try:
# Get a bit more restrictive with exactly what we want to count as a shape, since anything can define it
if isinstance(var.shape, tuple):
return var.shape[0]
except TypeError:
return 0
elif hasattr(var, "__len__"):
try:
return _VSCODE_builtins.len(var)
except TypeError:
return 0
| mit |
oztalha/TR-2014 | scrapers/scrape-theplazz.py | 2 | 1332 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 08 15:41:01 2015
@author: Talha
"""
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import pandas as pd
import time
#initialize variables
df = pd.DataFrame(columns=('title', 'twcount', 'href'))
driver = webdriver.Chrome()
# thePlazz.com Headlines 'http://theplazz.com/category/headlines/'
driver.get('file:///Users/toz/Documents/workspace/TR-2014/data/thePlazz.html')
time.sleep(60) #have to wait till the page is loaded completely
# This is how I retrieved thePlazz.html file, i.e.
# I added the two lines above for your convenience, myself never used
#driver.get('http://theplazz.com/category/headlines/')
#for i in range(600):
# driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# time.sleep(3)
news = driver.find_elements_by_xpath("//div[@class='post-top']")
for i,haber in enumerate(news):
title = haber.find_element_by_tag_name("h3").find_element_by_tag_name("a")
twcount = int(haber.find_element_by_xpath("div[@class='post-meta']").find_element_by_tag_name("a").text)
print i, title.text , twcount, title.get_attribute("href")
df.loc[len(df)+1]=[title.text , twcount, title.get_attribute("href")]
df['twcount']=df['twcount'].astype(int)
df.to_csv("US-news-org.csv",encoding='utf-8',index=False) | mit |
mtp401/airflow | airflow/hooks/base_hook.py | 5 | 2004 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import logging
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.exceptions import AirflowException
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
def get_connections(cls, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
session.expunge_all()
session.close()
return db
@classmethod
def get_connection(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
else:
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
| apache-2.0 |
chengsoonong/crowdastro | crowdastro/experiment/experiment_yan.py | 1 | 5284 | """Runs the Yan algorithm on a simulated crowd classification task.
Matthew Alger
The Australian National University
2016
"""
import argparse
import collections
import csv
import logging
import matplotlib
import matplotlib.pyplot as plt
import numpy
import sklearn
import sklearn.cluster
import sklearn.cross_validation
from . import runners
from .. import __version__
from ..crowd.util import majority_vote
from ..plot import vertical_scatter_ba
from .results import Results
def main(input_csv_path, results_h5_path, overwrite=False, plot=False,
seed=0, shuffle_seed=0):
numpy.random.seed(seed)
with open(input_csv_path, 'r') as f:
reader = csv.reader(f)
features = []
labels = []
for row in reader:
label = row[-1] == '4'
labels.append(label)
feature = [float(i) if i != '?' else 0 for i in row[1:-1]]
features.append(feature)
features = numpy.array(features)
labels = numpy.array(labels)
n_splits = 3
n_labellers = 5
mask_rate = 0.5 # Lower = less masked.
n_examples, n_params = features.shape
n_params += 1 # Bias term.
n_params += n_labellers * n_params # w.
methods = ['Yan', 'LR', 'LR(Groundtruth)']
model = '{} crowdastro.crowd.yan.YanClassifier,'.format(
__version__) + \
'{} sklearn.linear_model.LogisticRegression'.format(
sklearn.__version__)
results = Results(results_h5_path, methods, n_splits, n_examples,
n_params, model)
# Generate the crowd labels. Cluster the data into T clusters and assign
# each cluster to a labeller. That labeller is 100% accurate in that
# cluster and 25% accurate everywhere else.
km = sklearn.cluster.KMeans(n_clusters=n_labellers)
km.fit(features)
classes = km.predict(features)
crowd_labels = numpy.tile(labels, (n_labellers, 1))
for labeller in range(n_labellers):
for i in range(n_examples):
if classes[i] == labeller:
crowd_labels[labeller, i] = labels[i]
elif numpy.random.random() < 0.75:
crowd_labels[labeller, i] = 1 - labels[i]
else:
crowd_labels[labeller, i] = labels[i]
# Randomly mask a percentage of the elements.
mask = numpy.random.binomial(1, mask_rate, size=crowd_labels.shape)
crowd_labels = numpy.ma.MaskedArray(crowd_labels, mask=mask)
# Compute a majority vote of the crowd labels to use for LR.
mv = majority_vote(crowd_labels)
all_features = {
'Yan': features,
'LR': features,
'LR(Groundtruth)': features,
}
targets = {
'LR': mv,
'Yan': crowd_labels,
'LR(Groundtruth)': labels,
}
ss = sklearn.cross_validation.ShuffleSplit(n_examples, n_iter=n_splits,
test_size=0.25, random_state=shuffle_seed)
for split_id, (train, test) in enumerate(ss):
logging.info('Test {}/{}'.format(split_id + 1, n_splits))
for method_id, method in enumerate(methods):
logging.info('Method {} ({}/{})'.format(method, method_id + 1,
len(methods)))
if method.startswith('LR'):
runners.lr(results, method, split_id, all_features[method],
targets[method], sorted(test),
overwrite=overwrite)
elif method == 'Yan':
runners.yan(results, method, split_id,
all_features[method], targets[method],
sorted(test), overwrite=overwrite,
n_restarts=5)
else:
raise ValueError('Unexpected method: {}'.format(method))
if plot:
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['font.serif'] = ['Palatino Linotype']
plt.figure(figsize=(8, 4)) # Make the plot a little shorter.
vertical_scatter_ba(results, labels, violin=False, minorticks=False)
plt.ylim((0, 1))
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', default='data/breast_cancer_wisconsin.csv',
help='Input breast cancer data CSV')
parser.add_argument('--results', default='data/results_yan.h5',
help='HDF5 results data file')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite existing results')
parser.add_argument('--verbose', '-v', action='store_true',
help='Verbose output')
parser.add_argument('--plot', action='store_true', help='Generate a plot')
args = parser.parse_args()
if args.verbose:
logging.root.setLevel(logging.DEBUG)
else:
logging.root.setLevel(logging.INFO)
main(args.input, args.results, overwrite=args.overwrite, plot=args.plot)
| mit |
DonBeo/scikit-learn | examples/classification/plot_classifier_comparison.py | 181 | 4699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
python-control/python-control | control/freqplot.py | 1 | 56813 | # freqplot.py - frequency domain plots for control systems
#
# Author: Richard M. Murray
# Date: 24 May 09
#
# This file contains some standard control system plots: Bode plots,
# Nyquist plots and pole-zero diagrams. The code for Nichols charts
# is in nichols.py.
#
# Copyright (c) 2010 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import warnings
from .ctrlutil import unwrap
from .bdalg import feedback
from .margins import stability_margins
from .exception import ControlMIMONotImplemented
from .statesp import StateSpace
from .xferfcn import TransferFunction
from . import config
__all__ = ['bode_plot', 'nyquist_plot', 'gangof4_plot', 'singular_values_plot',
'bode', 'nyquist', 'gangof4']
# Default values for module parameter variables
_freqplot_defaults = {
'freqplot.feature_periphery_decades': 1,
'freqplot.number_of_samples': 1000,
'freqplot.dB': False, # Plot gain in dB
'freqplot.deg': True, # Plot phase in degrees
'freqplot.Hz': False, # Plot frequency in Hertz
'freqplot.grid': True, # Turn on grid for gain and phase
'freqplot.wrap_phase': False, # Wrap the phase plot at a given value
# deprecations
'deprecated.bode.dB': 'freqplot.dB',
'deprecated.bode.deg': 'freqplot.deg',
'deprecated.bode.Hz': 'freqplot.Hz',
'deprecated.bode.grid': 'freqplot.grid',
'deprecated.bode.wrap_phase': 'freqplot.wrap_phase',
}
#
# Main plotting functions
#
# This section of the code contains the functions for generating
# frequency domain plots
#
#
# Bode plot
#
def bode_plot(syslist, omega=None,
plot=True, omega_limits=None, omega_num=None,
margins=None, method='best', *args, **kwargs):
"""Bode plot for a system
Plots a Bode plot for the system over a (optional) frequency range.
Parameters
----------
syslist : linsys
List of linear input/output systems (single system is OK)
omega : array_like
List of frequencies in rad/sec to be used for frequency response
dB : bool
If True, plot result in dB. Default is false.
Hz : bool
If True, plot frequency in Hz (omega must be provided in rad/sec).
Default value (False) set by config.defaults['freqplot.Hz']
deg : bool
If True, plot phase in degrees (else radians). Default value (True)
config.defaults['freqplot.deg']
plot : bool
If True (default), plot magnitude and phase
omega_limits : array_like of two values
Limits of the to generate frequency vector.
If Hz=True the limits are in Hz otherwise in rad/s.
omega_num : int
Number of samples to plot. Defaults to
config.defaults['freqplot.number_of_samples'].
margins : bool
If True, plot gain and phase margin.
method : method to use in computing margins (see :func:`stability_margins`)
*args : :func:`matplotlib.pyplot.plot` positional properties, optional
Additional arguments for `matplotlib` plots (color, linestyle, etc)
**kwargs : :func:`matplotlib.pyplot.plot` keyword properties, optional
Additional keywords (passed to `matplotlib`)
Returns
-------
mag : ndarray (or list of ndarray if len(syslist) > 1))
magnitude
phase : ndarray (or list of ndarray if len(syslist) > 1))
phase in radians
omega : ndarray (or list of ndarray if len(syslist) > 1))
frequency in rad/sec
Other Parameters
----------------
grid : bool
If True, plot grid lines on gain and phase plots. Default is set by
`config.defaults['freqplot.grid']`.
initial_phase : float
Set the reference phase to use for the lowest frequency. If set, the
initial phase of the Bode plot will be set to the value closest to the
value specified. Units are in either degrees or radians, depending on
the `deg` parameter. Default is -180 if wrap_phase is False, 0 if
wrap_phase is True.
wrap_phase : bool or float
If wrap_phase is `False`, then the phase will be unwrapped so that it
is continuously increasing or decreasing. If wrap_phase is `True` the
phase will be restricted to the range [-180, 180) (or [:math:`-\\pi`,
:math:`\\pi`) radians). If `wrap_phase` is specified as a float, the
phase will be offset by 360 degrees if it falls below the specified
value. Default to `False`, set by config.defaults['freqplot.wrap_phase'].
The default values for Bode plot configuration parameters can be reset
using the `config.defaults` dictionary, with module name 'bode'.
Notes
-----
1. Alternatively, you may use the lower-level methods
:meth:`LTI.frequency_response` or ``sys(s)`` or ``sys(z)`` or to
generate the frequency response for a single system.
2. If a discrete time model is given, the frequency response is plotted
along the upper branch of the unit circle, using the mapping ``z =
exp(1j * omega * dt)`` where `omega` ranges from 0 to `pi/dt` and `dt`
is the discrete timebase. If timebase not specified (``dt=True``),
`dt` is set to 1.
Examples
--------
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> mag, phase, omega = bode(sys)
"""
# Make a copy of the kwargs dictionary since we will modify it
kwargs = dict(kwargs)
# Check to see if legacy 'Plot' keyword was used
if 'Plot' in kwargs:
import warnings
warnings.warn("'Plot' keyword is deprecated in bode_plot; use 'plot'",
FutureWarning)
# Map 'Plot' keyword to 'plot' keyword
plot = kwargs.pop('Plot')
# Get values for params (and pop from list to allow keyword use in plot)
dB = config._get_param(
'freqplot', 'dB', kwargs, _freqplot_defaults, pop=True)
deg = config._get_param(
'freqplot', 'deg', kwargs, _freqplot_defaults, pop=True)
Hz = config._get_param(
'freqplot', 'Hz', kwargs, _freqplot_defaults, pop=True)
grid = config._get_param(
'freqplot', 'grid', kwargs, _freqplot_defaults, pop=True)
plot = config._get_param('freqplot', 'plot', plot, True)
margins = config._get_param(
'freqplot', 'margins', margins, False)
wrap_phase = config._get_param(
'freqplot', 'wrap_phase', kwargs, _freqplot_defaults, pop=True)
initial_phase = config._get_param(
'freqplot', 'initial_phase', kwargs, None, pop=True)
omega_num = config._get_param('freqplot', 'number_of_samples', omega_num)
# If argument was a singleton, turn it into a tuple
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
omega, omega_range_given = _determine_omega_vector(
syslist, omega, omega_limits, omega_num)
if plot:
# Set up the axes with labels so that multiple calls to
# bode_plot will superimpose the data. This was implicit
# before matplotlib 2.1, but changed after that (See
# https://github.com/matplotlib/matplotlib/issues/9024).
# The code below should work on all cases.
# Get the current figure
if 'sisotool' in kwargs:
fig = kwargs['fig']
ax_mag = fig.axes[0]
ax_phase = fig.axes[2]
sisotool = kwargs['sisotool']
del kwargs['fig']
del kwargs['sisotool']
else:
fig = plt.gcf()
ax_mag = None
ax_phase = None
sisotool = False
# Get the current axes if they already exist
for ax in fig.axes:
if ax.get_label() == 'control-bode-magnitude':
ax_mag = ax
elif ax.get_label() == 'control-bode-phase':
ax_phase = ax
# If no axes present, create them from scratch
if ax_mag is None or ax_phase is None:
plt.clf()
ax_mag = plt.subplot(211, label='control-bode-magnitude')
ax_phase = plt.subplot(
212, label='control-bode-phase', sharex=ax_mag)
mags, phases, omegas, nyquistfrqs = [], [], [], []
for sys in syslist:
if not sys.issiso():
# TODO: Add MIMO bode plots.
raise ControlMIMONotImplemented(
"Bode is currently only implemented for SISO systems.")
else:
omega_sys = np.asarray(omega)
if sys.isdtime(strict=True):
nyquistfrq = math.pi / sys.dt
if not omega_range_given:
# limit up to and including nyquist frequency
omega_sys = np.hstack((
omega_sys[omega_sys < nyquistfrq], nyquistfrq))
else:
nyquistfrq = None
mag, phase, omega_sys = sys.frequency_response(omega_sys)
mag = np.atleast_1d(mag)
phase = np.atleast_1d(phase)
#
# Post-process the phase to handle initial value and wrapping
#
if initial_phase is None:
# Start phase in the range 0 to -360 w/ initial phase = -180
# If wrap_phase is true, use 0 instead (phase \in (-pi, pi])
initial_phase = -math.pi if wrap_phase is not True else 0
elif isinstance(initial_phase, (int, float)):
# Allow the user to override the default calculation
if deg:
initial_phase = initial_phase/180. * math.pi
else:
raise ValueError("initial_phase must be a number.")
# Shift the phase if needed
if abs(phase[0] - initial_phase) > math.pi:
phase -= 2*math.pi * \
round((phase[0] - initial_phase) / (2*math.pi))
# Phase wrapping
if wrap_phase is False:
phase = unwrap(phase) # unwrap the phase
elif wrap_phase is True:
pass # default calculation OK
elif isinstance(wrap_phase, (int, float)):
phase = unwrap(phase) # unwrap the phase first
if deg:
wrap_phase *= math.pi/180.
# Shift the phase if it is below the wrap_phase
phase += 2*math.pi * np.maximum(
0, np.ceil((wrap_phase - phase)/(2*math.pi)))
else:
raise ValueError("wrap_phase must be bool or float.")
mags.append(mag)
phases.append(phase)
omegas.append(omega_sys)
nyquistfrqs.append(nyquistfrq)
# Get the dimensions of the current axis, which we will divide up
# TODO: Not current implemented; just use subplot for now
if plot:
nyquistfrq_plot = None
if Hz:
omega_plot = omega_sys / (2. * math.pi)
if nyquistfrq:
nyquistfrq_plot = nyquistfrq / (2. * math.pi)
else:
omega_plot = omega_sys
if nyquistfrq:
nyquistfrq_plot = nyquistfrq
phase_plot = phase * 180. / math.pi if deg else phase
mag_plot = mag
if nyquistfrq_plot:
# append data for vertical nyquist freq indicator line.
# if this extra nyquist lime is is plotted in a single plot
# command then line order is preserved when
# creating a legend eg. legend(('sys1', 'sys2'))
omega_nyq_line = np.array((np.nan, nyquistfrq, nyquistfrq))
omega_plot = np.hstack((omega_plot, omega_nyq_line))
mag_nyq_line = np.array((
np.nan, 0.7*min(mag_plot), 1.3*max(mag_plot)))
mag_plot = np.hstack((mag_plot, mag_nyq_line))
phase_range = max(phase_plot) - min(phase_plot)
phase_nyq_line = np.array(
(np.nan,
min(phase_plot) - 0.2 * phase_range,
max(phase_plot) + 0.2 * phase_range))
phase_plot = np.hstack((phase_plot, phase_nyq_line))
#
# Magnitude plot
#
if dB:
ax_mag.semilogx(omega_plot, 20 * np.log10(mag_plot),
*args, **kwargs)
else:
ax_mag.loglog(omega_plot, mag_plot, *args, **kwargs)
# Add a grid to the plot + labeling
ax_mag.grid(grid and not margins, which='both')
ax_mag.set_ylabel("Magnitude (dB)" if dB else "Magnitude")
#
# Phase plot
#
# Plot the data
ax_phase.semilogx(omega_plot, phase_plot, *args, **kwargs)
# Show the phase and gain margins in the plot
if margins:
# Compute stability margins for the system
margin = stability_margins(sys, method=method)
gm, pm, Wcg, Wcp = (margin[i] for i in (0, 1, 3, 4))
# Figure out sign of the phase at the first gain crossing
# (needed if phase_wrap is True)
phase_at_cp = phases[0][(np.abs(omegas[0] - Wcp)).argmin()]
if phase_at_cp >= 0.:
phase_limit = 180.
else:
phase_limit = -180.
if Hz:
Wcg, Wcp = Wcg/(2*math.pi), Wcp/(2*math.pi)
# Draw lines at gain and phase limits
ax_mag.axhline(y=0 if dB else 1, color='k', linestyle=':',
zorder=-20)
ax_phase.axhline(y=phase_limit if deg else
math.radians(phase_limit),
color='k', linestyle=':', zorder=-20)
mag_ylim = ax_mag.get_ylim()
phase_ylim = ax_phase.get_ylim()
# Annotate the phase margin (if it exists)
if pm != float('inf') and Wcp != float('nan'):
if dB:
ax_mag.semilogx(
[Wcp, Wcp], [0., -1e5],
color='k', linestyle=':', zorder=-20)
else:
ax_mag.loglog(
[Wcp, Wcp], [1., 1e-8],
color='k', linestyle=':', zorder=-20)
if deg:
ax_phase.semilogx(
[Wcp, Wcp], [1e5, phase_limit + pm],
color='k', linestyle=':', zorder=-20)
ax_phase.semilogx(
[Wcp, Wcp], [phase_limit + pm, phase_limit],
color='k', zorder=-20)
else:
ax_phase.semilogx(
[Wcp, Wcp], [1e5, math.radians(phase_limit) +
math.radians(pm)],
color='k', linestyle=':', zorder=-20)
ax_phase.semilogx(
[Wcp, Wcp], [math.radians(phase_limit) +
math.radians(pm),
math.radians(phase_limit)],
color='k', zorder=-20)
# Annotate the gain margin (if it exists)
if gm != float('inf') and Wcg != float('nan'):
if dB:
ax_mag.semilogx(
[Wcg, Wcg], [-20.*np.log10(gm), -1e5],
color='k', linestyle=':', zorder=-20)
ax_mag.semilogx(
[Wcg, Wcg], [0, -20*np.log10(gm)],
color='k', zorder=-20)
else:
ax_mag.loglog(
[Wcg, Wcg], [1./gm, 1e-8], color='k',
linestyle=':', zorder=-20)
ax_mag.loglog(
[Wcg, Wcg], [1., 1./gm], color='k', zorder=-20)
if deg:
ax_phase.semilogx(
[Wcg, Wcg], [0, phase_limit],
color='k', linestyle=':', zorder=-20)
else:
ax_phase.semilogx(
[Wcg, Wcg], [0, math.radians(phase_limit)],
color='k', linestyle=':', zorder=-20)
ax_mag.set_ylim(mag_ylim)
ax_phase.set_ylim(phase_ylim)
if sisotool:
ax_mag.text(
0.04, 0.06,
'G.M.: %.2f %s\nFreq: %.2f %s' %
(20*np.log10(gm) if dB else gm,
'dB ' if dB else '',
Wcg, 'Hz' if Hz else 'rad/s'),
horizontalalignment='left',
verticalalignment='bottom',
transform=ax_mag.transAxes,
fontsize=8 if int(mpl.__version__[0]) == 1 else 6)
ax_phase.text(
0.04, 0.06,
'P.M.: %.2f %s\nFreq: %.2f %s' %
(pm if deg else math.radians(pm),
'deg' if deg else 'rad',
Wcp, 'Hz' if Hz else 'rad/s'),
horizontalalignment='left',
verticalalignment='bottom',
transform=ax_phase.transAxes,
fontsize=8 if int(mpl.__version__[0]) == 1 else 6)
else:
plt.suptitle(
"Gm = %.2f %s(at %.2f %s), "
"Pm = %.2f %s (at %.2f %s)" %
(20*np.log10(gm) if dB else gm,
'dB ' if dB else '',
Wcg, 'Hz' if Hz else 'rad/s',
pm if deg else math.radians(pm),
'deg' if deg else 'rad',
Wcp, 'Hz' if Hz else 'rad/s'))
# Add a grid to the plot + labeling
ax_phase.set_ylabel("Phase (deg)" if deg else "Phase (rad)")
def gen_zero_centered_series(val_min, val_max, period):
v1 = np.ceil(val_min / period - 0.2)
v2 = np.floor(val_max / period + 0.2)
return np.arange(v1, v2 + 1) * period
if deg:
ylim = ax_phase.get_ylim()
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], 45.))
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], 15.), minor=True)
else:
ylim = ax_phase.get_ylim()
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], math.pi / 4.))
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], math.pi / 12.), minor=True)
ax_phase.grid(grid and not margins, which='both')
# ax_mag.grid(which='minor', alpha=0.3)
# ax_mag.grid(which='major', alpha=0.9)
# ax_phase.grid(which='minor', alpha=0.3)
# ax_phase.grid(which='major', alpha=0.9)
# Label the frequency axis
ax_phase.set_xlabel("Frequency (Hz)" if Hz
else "Frequency (rad/sec)")
if len(syslist) == 1:
return mags[0], phases[0], omegas[0]
else:
return mags, phases, omegas
#
# Nyquist plot
#
# Default values for module parameter variables
_nyquist_defaults = {
'nyquist.mirror_style': '--',
'nyquist.arrows': 2,
'nyquist.arrow_size': 8,
'nyquist.indent_radius': 1e-1,
'nyquist.indent_direction': 'right',
}
def nyquist_plot(syslist, omega=None, plot=True, omega_limits=None,
omega_num=None, label_freq=0, color=None,
return_contour=False, warn_nyquist=True, *args, **kwargs):
"""Nyquist plot for a system
Plots a Nyquist plot for the system over a (optional) frequency range.
The curve is computed by evaluating the Nyqist segment along the positive
imaginary axis, with a mirror image generated to reflect the negative
imaginary axis. Poles on or near the imaginary axis are avoided using a
small indentation. The portion of the Nyquist contour at infinity is not
explicitly computed (since it maps to a constant value for any system with
a proper transfer function).
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK). Nyquist
curves for each system are plotted on the same graph.
plot : boolean
If True, plot magnitude
omega : array_like
Set of frequencies to be evaluated, in rad/sec.
omega_limits : array_like of two values
Limits to the range of frequencies. Ignored if omega is provided, and
auto-generated if omitted.
omega_num : int
Number of frequency samples to plot. Defaults to
config.defaults['freqplot.number_of_samples'].
color : string
Used to specify the color of the line and arrowhead.
mirror_style : string or False
Linestyle for mirror image of the Nyquist curve. If `False` then
omit completely. Default linestyle ('--') is determined by
config.defaults['nyquist.mirror_style'].
return_contour : bool
If 'True', return the contour used to evaluate the Nyquist plot.
label_freq : int
Label every nth frequency on the plot. If not specified, no labels
are generated.
arrows : int or 1D/2D array of floats
Specify the number of arrows to plot on the Nyquist curve. If an
integer is passed. that number of equally spaced arrows will be
plotted on each of the primary segment and the mirror image. If a 1D
array is passed, it should consist of a sorted list of floats between
0 and 1, indicating the location along the curve to plot an arrow. If
a 2D array is passed, the first row will be used to specify arrow
locations for the primary curve and the second row will be used for
the mirror image.
arrow_size : float
Arrowhead width and length (in display coordinates). Default value is
8 and can be set using config.defaults['nyquist.arrow_size'].
arrow_style : matplotlib.patches.ArrowStyle
Define style used for Nyquist curve arrows (overrides `arrow_size`).
indent_radius : float
Amount to indent the Nyquist contour around poles that are at or near
the imaginary axis.
indent_direction : str
For poles on the imaginary axis, set the direction of indentation to
be 'right' (default), 'left', or 'none'.
warn_nyquist : bool, optional
If set to 'False', turn off warnings about frequencies above Nyquist.
*args : :func:`matplotlib.pyplot.plot` positional properties, optional
Additional arguments for `matplotlib` plots (color, linestyle, etc)
**kwargs : :func:`matplotlib.pyplot.plot` keyword properties, optional
Additional keywords (passed to `matplotlib`)
Returns
-------
count : int (or list of int if len(syslist) > 1)
Number of encirclements of the point -1 by the Nyquist curve. If
multiple systems are given, an array of counts is returned.
contour : ndarray (or list of ndarray if len(syslist) > 1)), optional
The contour used to create the primary Nyquist curve segment. To
obtain the Nyquist curve values, evaluate system(s) along contour.
Notes
-----
1. If a discrete time model is given, the frequency response is computed
along the upper branch of the unit circle, using the mapping ``z =
exp(1j * omega * dt)`` where `omega` ranges from 0 to `pi/dt` and `dt`
is the discrete timebase. If timebase not specified (``dt=True``),
`dt` is set to 1.
2. If a continuous-time system contains poles on or near the imaginary
axis, a small indentation will be used to avoid the pole. The radius
of the indentation is given by `indent_radius` and it is taken to the
right of stable poles and the left of unstable poles. If a pole is
exactly on the imaginary axis, the `indent_direction` parameter can be
used to set the direction of indentation. Setting `indent_direction`
to `none` will turn off indentation. If `return_contour` is True, the
exact contour used for evaluation is returned.
Examples
--------
>>> sys = ss([[1, -2], [3, -4]], [[5], [7]], [[6, 8]], [[9]])
>>> count = nyquist_plot(sys)
"""
# Check to see if legacy 'Plot' keyword was used
if 'Plot' in kwargs:
warnings.warn("'Plot' keyword is deprecated in nyquist_plot; "
"use 'plot'", FutureWarning)
# Map 'Plot' keyword to 'plot' keyword
plot = kwargs.pop('Plot')
# Check to see if legacy 'labelFreq' keyword was used
if 'labelFreq' in kwargs:
warnings.warn("'labelFreq' keyword is deprecated in nyquist_plot; "
"use 'label_freq'", FutureWarning)
# Map 'labelFreq' keyword to 'label_freq' keyword
label_freq = kwargs.pop('labelFreq')
# Check to see if legacy 'arrow_width' or 'arrow_length' were used
if 'arrow_width' in kwargs or 'arrow_length' in kwargs:
warnings.warn(
"'arrow_width' and 'arrow_length' keywords are deprecated in "
"nyquist_plot; use `arrow_size` instead", FutureWarning)
kwargs['arrow_size'] = \
(kwargs.get('arrow_width', 0) + kwargs.get('arrow_length', 0)) / 2
kwargs.pop('arrow_width', False)
kwargs.pop('arrow_length', False)
# Get values for params (and pop from list to allow keyword use in plot)
omega_num = config._get_param('freqplot', 'number_of_samples', omega_num)
mirror_style = config._get_param(
'nyquist', 'mirror_style', kwargs, _nyquist_defaults, pop=True)
arrows = config._get_param(
'nyquist', 'arrows', kwargs, _nyquist_defaults, pop=True)
arrow_size = config._get_param(
'nyquist', 'arrow_size', kwargs, _nyquist_defaults, pop=True)
arrow_style = config._get_param('nyquist', 'arrow_style', kwargs, None)
indent_radius = config._get_param(
'nyquist', 'indent_radius', kwargs, _nyquist_defaults, pop=True)
indent_direction = config._get_param(
'nyquist', 'indent_direction', kwargs, _nyquist_defaults, pop=True)
# If argument was a singleton, turn it into a list
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
omega, omega_range_given = _determine_omega_vector(
syslist, omega, omega_limits, omega_num)
if not omega_range_given:
# Start contour at zero frequency
omega[0] = 0.
# Go through each system and keep track of the results
counts, contours = [], []
for sys in syslist:
if not sys.issiso():
# TODO: Add MIMO nyquist plots.
raise ControlMIMONotImplemented(
"Nyquist plot currently only supports SISO systems.")
# Figure out the frequency range
omega_sys = np.asarray(omega)
# Determine the contour used to evaluate the Nyquist curve
if sys.isdtime(strict=True):
# Transform frequencies in for discrete-time systems
nyquistfrq = math.pi / sys.dt
if not omega_range_given:
# limit up to and including nyquist frequency
omega_sys = np.hstack((
omega_sys[omega_sys < nyquistfrq], nyquistfrq))
# Issue a warning if we are sampling above Nyquist
if np.any(omega_sys * sys.dt > np.pi) and warn_nyquist:
warnings.warn("evaluation above Nyquist frequency")
# Transform frequencies to continuous domain
contour = np.exp(1j * omega * sys.dt)
else:
contour = 1j * omega_sys
# Bend the contour around any poles on/near the imaginary axis
if isinstance(sys, (StateSpace, TransferFunction)) \
and sys.isctime() and indent_direction != 'none':
poles = sys.pole()
if contour[1].imag > indent_radius \
and 0. in poles and not omega_range_given:
# add some points for quarter circle around poles at origin
contour = np.concatenate(
(1j * np.linspace(0., indent_radius, 50),
contour[1:]))
for i, s in enumerate(contour):
# Find the nearest pole
p = poles[(np.abs(poles - s)).argmin()]
# See if we need to indent around it
if abs(s - p) < indent_radius:
if p.real < 0 or \
(p.real == 0 and indent_direction == 'right'):
# Indent to the right
contour[i] += \
np.sqrt(indent_radius ** 2 - (s-p).imag ** 2)
elif p.real > 0 or \
(p.real == 0 and indent_direction == 'left'):
# Indent to the left
contour[i] -= \
np.sqrt(indent_radius ** 2 - (s-p).imag ** 2)
else:
ValueError("unknown value for indent_direction")
# TODO: add code to indent around discrete poles on unit circle
# Compute the primary curve
resp = sys(contour)
# Compute CW encirclements of -1 by integrating the (unwrapped) angle
phase = -unwrap(np.angle(resp + 1))
count = int(np.round(np.sum(np.diff(phase)) / np.pi, 0))
counts.append(count)
contours.append(contour)
if plot:
# Parse the arrows keyword
if isinstance(arrows, int):
N = arrows
# Space arrows out, starting midway along each "region"
arrow_pos = np.linspace(0.5/N, 1 + 0.5/N, N, endpoint=False)
elif isinstance(arrows, (list, np.ndarray)):
arrow_pos = np.sort(np.atleast_1d(arrows))
elif not arrows:
arrow_pos = []
else:
raise ValueError("unknown or unsupported arrow location")
# Set the arrow style
if arrow_style is None:
arrow_style = mpl.patches.ArrowStyle(
'simple', head_width=arrow_size, head_length=arrow_size)
# Save the components of the response
x, y = resp.real, resp.imag
# Plot the primary curve
p = plt.plot(x, y, '-', color=color, *args, **kwargs)
c = p[0].get_color()
ax = plt.gca()
_add_arrows_to_line2D(
ax, p[0], arrow_pos, arrowstyle=arrow_style, dir=1)
# Plot the mirror image
if mirror_style is not False:
p = plt.plot(x, -y, mirror_style, color=c, *args, **kwargs)
_add_arrows_to_line2D(
ax, p[0], arrow_pos, arrowstyle=arrow_style, dir=-1)
# Mark the -1 point
plt.plot([-1], [0], 'r+')
# Label the frequencies of the points
if label_freq:
ind = slice(None, None, label_freq)
for xpt, ypt, omegapt in zip(x[ind], y[ind], omega_sys[ind]):
# Convert to Hz
f = omegapt / (2 * np.pi)
# Factor out multiples of 1000 and limit the
# result to the range [-8, 8].
pow1000 = max(min(get_pow1000(f), 8), -8)
# Get the SI prefix.
prefix = gen_prefix(pow1000)
# Apply the text. (Use a space before the text to
# prevent overlap with the data.)
#
# np.round() is used because 0.99... appears
# instead of 1.0, and this would otherwise be
# truncated to 0.
plt.text(xpt, ypt, ' ' +
str(int(np.round(f / 1000 ** pow1000, 0))) + ' ' +
prefix + 'Hz')
if plot:
ax = plt.gca()
ax.set_xlabel("Real axis")
ax.set_ylabel("Imaginary axis")
ax.grid(color="lightgray")
# "Squeeze" the results
if len(syslist) == 1:
counts, contours = counts[0], contours[0]
# Return counts and (optionally) the contour we used
return (counts, contours) if return_contour else counts
# Internal function to add arrows to a curve
def _add_arrows_to_line2D(
axes, line, arrow_locs=[0.2, 0.4, 0.6, 0.8],
arrowstyle='-|>', arrowsize=1, dir=1, transform=None):
"""
Add arrows to a matplotlib.lines.Line2D at selected locations.
Parameters:
-----------
axes: Axes object as returned by axes command (or gca)
line: Line2D object as returned by plot command
arrow_locs: list of locations where to insert arrows, % of total length
arrowstyle: style of the arrow
arrowsize: size of the arrow
transform: a matplotlib transform instance, default to data coordinates
Returns:
--------
arrows: list of arrows
Based on https://stackoverflow.com/questions/26911898/
"""
if not isinstance(line, mpl.lines.Line2D):
raise ValueError("expected a matplotlib.lines.Line2D object")
x, y = line.get_xdata(), line.get_ydata()
arrow_kw = {
"arrowstyle": arrowstyle,
}
color = line.get_color()
use_multicolor_lines = isinstance(color, np.ndarray)
if use_multicolor_lines:
raise NotImplementedError("multicolor lines not supported")
else:
arrow_kw['color'] = color
linewidth = line.get_linewidth()
if isinstance(linewidth, np.ndarray):
raise NotImplementedError("multiwidth lines not supported")
else:
arrow_kw['linewidth'] = linewidth
if transform is None:
transform = axes.transData
# Compute the arc length along the curve
s = np.cumsum(np.sqrt(np.diff(x) ** 2 + np.diff(y) ** 2))
arrows = []
for loc in arrow_locs:
n = np.searchsorted(s, s[-1] * loc)
# Figure out what direction to paint the arrow
if dir == 1:
arrow_tail = (x[n], y[n])
arrow_head = (np.mean(x[n:n + 2]), np.mean(y[n:n + 2]))
elif dir == -1:
# Orient the arrow in the other direction on the segment
arrow_tail = (x[n + 1], y[n + 1])
arrow_head = (np.mean(x[n:n + 2]), np.mean(y[n:n + 2]))
else:
raise ValueError("unknown value for keyword 'dir'")
p = mpl.patches.FancyArrowPatch(
arrow_tail, arrow_head, transform=transform, lw=0,
**arrow_kw)
axes.add_patch(p)
arrows.append(p)
return arrows
#
# Gang of Four plot
#
# TODO: think about how (and whether) to handle lists of systems
def gangof4_plot(P, C, omega=None, **kwargs):
"""Plot the "Gang of 4" transfer functions for a system
Generates a 2x2 plot showing the "Gang of 4" sensitivity functions
[T, PS; CS, S]
Parameters
----------
P, C : LTI
Linear input/output systems (process and control)
omega : array
Range of frequencies (list or bounds) in rad/sec
**kwargs : :func:`matplotlib.pyplot.plot` keyword properties, optional
Additional keywords (passed to `matplotlib`)
Returns
-------
None
"""
if not P.issiso() or not C.issiso():
# TODO: Add MIMO go4 plots.
raise ControlMIMONotImplemented(
"Gang of four is currently only implemented for SISO systems.")
# Get the default parameter values
dB = config._get_param(
'freqplot', 'dB', kwargs, _freqplot_defaults, pop=True)
Hz = config._get_param(
'freqplot', 'Hz', kwargs, _freqplot_defaults, pop=True)
grid = config._get_param(
'freqplot', 'grid', kwargs, _freqplot_defaults, pop=True)
# Compute the senstivity functions
L = P * C
S = feedback(1, L)
T = L * S
# Select a default range if none is provided
# TODO: This needs to be made more intelligent
if omega is None:
omega = _default_frequency_range((P, C, S))
# Set up the axes with labels so that multiple calls to
# gangof4_plot will superimpose the data. See details in bode_plot.
plot_axes = {'t': None, 's': None, 'ps': None, 'cs': None}
for ax in plt.gcf().axes:
label = ax.get_label()
if label.startswith('control-gangof4-'):
key = label[len('control-gangof4-'):]
if key not in plot_axes:
raise RuntimeError(
"unknown gangof4 axis type '{}'".format(label))
plot_axes[key] = ax
# if any of the axes are missing, start from scratch
if any((ax is None for ax in plot_axes.values())):
plt.clf()
plot_axes = {'s': plt.subplot(221, label='control-gangof4-s'),
'ps': plt.subplot(222, label='control-gangof4-ps'),
'cs': plt.subplot(223, label='control-gangof4-cs'),
't': plt.subplot(224, label='control-gangof4-t')}
#
# Plot the four sensitivity functions
#
omega_plot = omega / (2. * math.pi) if Hz else omega
# TODO: Need to add in the mag = 1 lines
mag_tmp, phase_tmp, omega = S.frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['s'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['s'].loglog(omega_plot, mag, **kwargs)
plot_axes['s'].set_ylabel("$|S|$" + " (dB)" if dB else "")
plot_axes['s'].tick_params(labelbottom=False)
plot_axes['s'].grid(grid, which='both')
mag_tmp, phase_tmp, omega = (P * S).frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['ps'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['ps'].loglog(omega_plot, mag, **kwargs)
plot_axes['ps'].tick_params(labelbottom=False)
plot_axes['ps'].set_ylabel("$|PS|$" + " (dB)" if dB else "")
plot_axes['ps'].grid(grid, which='both')
mag_tmp, phase_tmp, omega = (C * S).frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['cs'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['cs'].loglog(omega_plot, mag, **kwargs)
plot_axes['cs'].set_xlabel(
"Frequency (Hz)" if Hz else "Frequency (rad/sec)")
plot_axes['cs'].set_ylabel("$|CS|$" + " (dB)" if dB else "")
plot_axes['cs'].grid(grid, which='both')
mag_tmp, phase_tmp, omega = T.frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['t'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['t'].loglog(omega_plot, mag, **kwargs)
plot_axes['t'].set_xlabel(
"Frequency (Hz)" if Hz else "Frequency (rad/sec)")
plot_axes['t'].set_ylabel("$|T|$" + " (dB)" if dB else "")
plot_axes['t'].grid(grid, which='both')
plt.tight_layout()
#
# Singular values plot
#
def singular_values_plot(syslist, omega=None,
plot=True, omega_limits=None, omega_num=None,
*args, **kwargs):
"""Singular value plot for a system
Plots a Singular Value plot for the system over a (optional) frequency range.
Parameters
----------
syslist : linsys
List of linear systems (single system is OK).
omega : array_like
List of frequencies in rad/sec to be used for frequency response.
plot : bool
If True (default), generate the singular values plot.
omega_limits : array_like of two values
Limits of the frequency vector to generate.
If Hz=True the limits are in Hz otherwise in rad/s.
omega_num : int
Number of samples to plot.
Default value (1000) set by config.defaults['freqplot.number_of_samples'].
dB : bool
If True, plot result in dB.
Default value (False) set by config.defaults['freqplot.dB'].
Hz : bool
If True, plot frequency in Hz (omega must be provided in rad/sec).
Default value (False) set by config.defaults['freqplot.Hz']
Returns
-------
sigma : ndarray (or list of ndarray if len(syslist) > 1))
singular values
omega : ndarray (or list of ndarray if len(syslist) > 1))
frequency in rad/sec
Other Parameters
----------------
grid : bool
If True, plot grid lines on gain and phase plots. Default is set by
`config.defaults['freqplot.grid']`.
Examples
--------
>>> import numpy as np
>>> den = [75, 1]
>>> sys = TransferFunction([[[87.8], [-86.4]], [[108.2], [-109.6]]], [[den, den], [den, den]])
>>> omega = np.logspace(-4, 1, 1000)
>>> sigma, omega = singular_values_plot(sys, plot=True)
>>> singular_values_plot(sys, 0.0, plot=False)
(array([[197.20868123],
[ 1.39141948]]), array([0.]))
"""
# Make a copy of the kwargs dictionary since we will modify it
kwargs = dict(kwargs)
# Get values for params (and pop from list to allow keyword use in plot)
dB = config._get_param(
'freqplot', 'dB', kwargs, _freqplot_defaults, pop=True)
Hz = config._get_param(
'freqplot', 'Hz', kwargs, _freqplot_defaults, pop=True)
grid = config._get_param(
'freqplot', 'grid', kwargs, _freqplot_defaults, pop=True)
plot = config._get_param(
'freqplot', 'plot', plot, True)
omega_num = config._get_param('freqplot', 'number_of_samples', omega_num)
# If argument was a singleton, turn it into a tuple
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
omega, omega_range_given = _determine_omega_vector(
syslist, omega, omega_limits, omega_num)
omega = np.atleast_1d(omega)
if plot:
fig = plt.gcf()
ax_sigma = None
# Get the current axes if they already exist
for ax in fig.axes:
if ax.get_label() == 'control-sigma':
ax_sigma = ax
# If no axes present, create them from scratch
if ax_sigma is None:
plt.clf()
ax_sigma = plt.subplot(111, label='control-sigma')
# color cycle handled manually as all singular values
# of the same systems are expected to be of the same color
color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
color_offset = 0
if len(ax_sigma.lines) > 0:
last_color = ax_sigma.lines[-1].get_color()
if last_color in color_cycle:
color_offset = color_cycle.index(last_color) + 1
sigmas, omegas, nyquistfrqs = [], [], []
for idx_sys, sys in enumerate(syslist):
omega_sys = np.asarray(omega)
if sys.isdtime(strict=True):
nyquistfrq = math.pi / sys.dt
if not omega_range_given:
# limit up to and including nyquist frequency
omega_sys = np.hstack((
omega_sys[omega_sys < nyquistfrq], nyquistfrq))
omega_complex = np.exp(1j * omega_sys * sys.dt)
else:
nyquistfrq = None
omega_complex = 1j*omega_sys
fresp = sys(omega_complex, squeeze=False)
fresp = fresp.transpose((2, 0, 1))
sigma = np.linalg.svd(fresp, compute_uv=False)
sigmas.append(sigma.transpose()) # return shape is "channel first"
omegas.append(omega_sys)
nyquistfrqs.append(nyquistfrq)
if plot:
color = color_cycle[(idx_sys + color_offset) % len(color_cycle)]
color = kwargs.pop('color', color)
nyquistfrq_plot = None
if Hz:
omega_plot = omega_sys / (2. * math.pi)
if nyquistfrq:
nyquistfrq_plot = nyquistfrq / (2. * math.pi)
else:
omega_plot = omega_sys
if nyquistfrq:
nyquistfrq_plot = nyquistfrq
sigma_plot = sigma
if dB:
ax_sigma.semilogx(omega_plot, 20 * np.log10(sigma_plot),
color=color, *args, **kwargs)
else:
ax_sigma.loglog(omega_plot, sigma_plot,
color=color, *args, **kwargs)
if nyquistfrq_plot is not None:
ax_sigma.axvline(x=nyquistfrq_plot, color=color)
# Add a grid to the plot + labeling
if plot:
ax_sigma.grid(grid, which='both')
ax_sigma.set_ylabel("Singular Values (dB)" if dB else "Singular Values")
ax_sigma.set_xlabel("Frequency (Hz)" if Hz else "Frequency (rad/sec)")
if len(syslist) == 1:
return sigmas[0], omegas[0]
else:
return sigmas, omegas
#
# Utility functions
#
# This section of the code contains some utility functions for
# generating frequency domain plots
#
# Determine the frequency range to be used
def _determine_omega_vector(syslist, omega_in, omega_limits, omega_num):
"""Determine the frequency range for a frequency-domain plot
according to a standard logic.
If omega_in and omega_limits are both None, then omega_out is computed
on omega_num points according to a default logic defined by
_default_frequency_range and tailored for the list of systems syslist, and
omega_range_given is set to False.
If omega_in is None but omega_limits is an array-like of 2 elements, then
omega_out is computed with the function np.logspace on omega_num points
within the interval [min, max] = [omega_limits[0], omega_limits[1]], and
omega_range_given is set to True.
If omega_in is not None, then omega_out is set to omega_in,
and omega_range_given is set to True
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK)
omega_in : 1D array_like or None
Frequency range specified by the user
omega_limits : 1D array_like or None
Frequency limits specified by the user
omega_num : int
Number of points to be used for the frequency
range (if the frequency range is not user-specified)
Returns
-------
omega_out : 1D array
Frequency range to be used
omega_range_given : bool
True if the frequency range was specified by the user, either through
omega_in or through omega_limits. False if both omega_in
and omega_limits are None.
"""
omega_range_given = True
if omega_in is None:
if omega_limits is None:
omega_range_given = False
# Select a default range if none is provided
omega_out = _default_frequency_range(syslist,
number_of_samples=omega_num)
else:
omega_limits = np.asarray(omega_limits)
if len(omega_limits) != 2:
raise ValueError("len(omega_limits) must be 2")
omega_out = np.logspace(np.log10(omega_limits[0]),
np.log10(omega_limits[1]),
num=omega_num, endpoint=True)
else:
omega_out = np.copy(omega_in)
return omega_out, omega_range_given
# Compute reasonable defaults for axes
def _default_frequency_range(syslist, Hz=None, number_of_samples=None,
feature_periphery_decades=None):
"""Compute a default frequency range for frequency domain plots.
This code looks at the poles and zeros of all of the systems that
we are plotting and sets the frequency range to be one decade above
and below the min and max feature frequencies, rounded to the nearest
integer. If no features are found, it returns logspace(-1, 1)
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK)
Hz : bool
If True, the limits (first and last value) of the frequencies
are set to full decades in Hz so it fits plotting with logarithmic
scale in Hz otherwise in rad/s. Omega is always returned in rad/sec.
number_of_samples : int, optional
Number of samples to generate. The default value is read from
``config.defaults['freqplot.number_of_samples']. If None, then the
default from `numpy.logspace` is used.
feature_periphery_decades : float, optional
Defines how many decades shall be included in the frequency range on
both sides of features (poles, zeros). The default value is read from
``config.defaults['freqplot.feature_periphery_decades']``.
Returns
-------
omega : array
Range of frequencies in rad/sec
Examples
--------
>>> from matlab import ss
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> omega = _default_frequency_range(sys)
"""
# Set default values for options
number_of_samples = config._get_param(
'freqplot', 'number_of_samples', number_of_samples)
feature_periphery_decades = config._get_param(
'freqplot', 'feature_periphery_decades', feature_periphery_decades, 1)
# Find the list of all poles and zeros in the systems
features = np.array(())
freq_interesting = []
# detect if single sys passed by checking if it is sequence-like
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
for sys in syslist:
try:
# Add new features to the list
if sys.isctime():
features_ = np.concatenate((np.abs(sys.pole()),
np.abs(sys.zero())))
# Get rid of poles and zeros at the origin
toreplace = features_ == 0.0
if np.any(toreplace):
features_ = features_[~toreplace]
elif sys.isdtime(strict=True):
fn = math.pi * 1. / sys.dt
# TODO: What distance to the Nyquist frequency is appropriate?
freq_interesting.append(fn * 0.9)
features_ = np.concatenate((sys.pole(),
sys.zero()))
# Get rid of poles and zeros on the real axis (imag==0)
# * origin and real < 0
# * at 1.: would result in omega=0. (logaritmic plot!)
toreplace = (features_.imag == 0.0) & (
(features_.real <= 0.) |
(np.abs(features_.real - 1.0) < 1.e-10))
if np.any(toreplace):
features_ = features_[~toreplace]
# TODO: improve
features_ = np.abs(np.log(features_) / (1.j * sys.dt))
else:
# TODO
raise NotImplementedError(
"type of system in not implemented now")
features = np.concatenate((features, features_))
except NotImplementedError:
pass
# Make sure there is at least one point in the range
if features.shape[0] == 0:
features = np.array([1.])
if Hz:
features /= 2. * math.pi
features = np.log10(features)
lsp_min = np.floor(np.min(features) - feature_periphery_decades)
lsp_max = np.ceil(np.max(features) + feature_periphery_decades)
lsp_min += np.log10(2. * math.pi)
lsp_max += np.log10(2. * math.pi)
else:
features = np.log10(features)
lsp_min = np.floor(np.min(features) - feature_periphery_decades)
lsp_max = np.ceil(np.max(features) + feature_periphery_decades)
if freq_interesting:
lsp_min = min(lsp_min, np.log10(min(freq_interesting)))
lsp_max = max(lsp_max, np.log10(max(freq_interesting)))
# TODO: Add a check in discrete case to make sure we don't get aliasing
# (Attention: there is a list of system but only one omega vector)
# Set the range to be an order of magnitude beyond any features
if number_of_samples:
omega = np.logspace(
lsp_min, lsp_max, num=number_of_samples, endpoint=True)
else:
omega = np.logspace(lsp_min, lsp_max, endpoint=True)
return omega
#
# Utility functions to create nice looking labels (KLD 5/23/11)
#
def get_pow1000(num):
"""Determine exponent for which significand of a number is within the
range [1, 1000).
"""
# Based on algorithm from http://www.mail-archive.com/
# [email protected]/msg14433.html, accessed 2010/11/7
# by Jason Heeris 2009/11/18
from decimal import Decimal
from math import floor
dnum = Decimal(str(num))
if dnum == 0:
return 0
elif dnum < 0:
dnum = -dnum
return int(floor(dnum.log10() / 3))
def gen_prefix(pow1000):
"""Return the SI prefix for a power of 1000.
"""
# Prefixes according to Table 5 of [BIPM 2006] (excluding hecto,
# deca, deci, and centi).
if pow1000 < -8 or pow1000 > 8:
raise ValueError(
"Value is out of the range covered by the SI prefixes.")
return ['Y', # yotta (10^24)
'Z', # zetta (10^21)
'E', # exa (10^18)
'P', # peta (10^15)
'T', # tera (10^12)
'G', # giga (10^9)
'M', # mega (10^6)
'k', # kilo (10^3)
'', # (10^0)
'm', # milli (10^-3)
r'$\mu$', # micro (10^-6)
'n', # nano (10^-9)
'p', # pico (10^-12)
'f', # femto (10^-15)
'a', # atto (10^-18)
'z', # zepto (10^-21)
'y'][8 - pow1000] # yocto (10^-24)
def find_nearest_omega(omega_list, omega):
omega_list = np.asarray(omega_list)
return omega_list[(np.abs(omega_list - omega)).argmin()]
# Function aliases
bode = bode_plot
nyquist = nyquist_plot
gangof4 = gangof4_plot
| bsd-3-clause |
vincentchoqueuse/parametrix | examples/ex_communication_SER_MC.py | 1 | 1316 | from parametrix.monte_carlo.classifiers import MC_Simulations_SER
from parametrix.communication.signal_models import M_AWGN
from parametrix.communication.classifiers import C_symbols_AWGN_ML
from parametrix.communication.statistics import S_AWGN_SER
import matplotlib.pyplot as plt
import numpy as np
""" This scripts shows the evolution of the Symbol Error Rate versus SNR per bits for several modulation typewith different model order. The experimental SERs are also compared with the theoretical ones.
Several modulation types are available:
* PAM,
* PSK,
* QAM.
"""
#show signal scatterplot for illustration
modulation_type="PAM"
signal=M_AWGN(modulation_type,4,N=1000)
signal.plot()
print("--- Monte Carlo Simulations ---")
plt.figure()
for M in [2,4,16]:
#create signal
signal=M_AWGN(modulation_type,M,N=10000)
#create detector
detector=C_symbols_AWGN_ML(modulation_type,M,name="%s %d" %(modulation_type,M))
#create statistic
SER=S_AWGN_SER(name="%s %d" %(modulation_type,M))
#perform monte carlo simulation
mc=MC_Simulations_SER("SNRb",np.arange(0.,25,2),detector,statistic_list=SER)
mc.hold_on=True #keep the figure
mc.trials(signal,nb_trials=1,verbose=1,plot=1)
plt.ylim([0.000001,1])
plt.legend(loc=3)
plt.show()
| bsd-3-clause |
costypetrisor/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
Odingod/mne-python | examples/preprocessing/plot_find_ecg_artifacts.py | 19 | 1304 | """
==================
Find ECG artifacts
==================
Locate QRS component of ECG.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
proj=False)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###############################################################################
# Plot ECG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
| bsd-3-clause |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tests/test_multilevel.py | 7 | 92692 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101,W0141
import datetime
import itertools
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull, Timestamp
from pandas.types.common import is_float_dtype, is_integer_dtype
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, product as
cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'], inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_append_index(self):
tm._skip_if_no_pytz()
idx1 = Index([1.1, 1.2, 1.3])
idx2 = pd.date_range('2011-01-01', freq='D', periods=3,
tz='Asia/Tokyo')
idx3 = Index(['A', 'B', 'C'])
midx_lv2 = MultiIndex.from_arrays([idx1, idx2])
midx_lv3 = MultiIndex.from_arrays([idx1, idx2, idx3])
result = idx1.append(midx_lv2)
# GH 7112
import pytz
tz = pytz.timezone('Asia/Tokyo')
expected_tuples = [(1.1, datetime.datetime(2011, 1, 1, tzinfo=tz)),
(1.2, datetime.datetime(2011, 1, 2, tzinfo=tz)),
(1.3, datetime.datetime(2011, 1, 3, tzinfo=tz))]
expected = Index([1.1, 1.2, 1.3] + expected_tuples)
self.assert_index_equal(result, expected)
result = midx_lv2.append(idx1)
expected = Index(expected_tuples + [1.1, 1.2, 1.3])
self.assert_index_equal(result, expected)
result = midx_lv2.append(midx_lv2)
expected = MultiIndex.from_arrays([idx1.append(idx1),
idx2.append(idx2)])
self.assert_index_equal(result, expected)
result = midx_lv2.append(midx_lv3)
self.assert_index_equal(result, expected)
result = midx_lv3.append(midx_lv2)
expected = Index._simple_new(
np.array([(1.1, datetime.datetime(2011, 1, 1, tzinfo=tz), 'A'),
(1.2, datetime.datetime(2011, 1, 2, tzinfo=tz), 'B'),
(1.3, datetime.datetime(2011, 1, 3, tzinfo=tz), 'C')] +
expected_tuples), None)
self.assert_index_equal(result, expected)
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assertIsInstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assertIsInstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']), np.array(
['x', 'y', 'x', 'y'])])
tm.assertIsInstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'], ['x', 'y', 'x', 'y']])
tm.assertIsInstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assertIsInstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected, check_names=False)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(level='month').transform(
np.sum)
expected = op(self.ymd['A'], broadcasted)
expected.name = 'A'
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
unpickled = self.round_trip_pickle(frame)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEqual(result.index.names, self.frame.index.names)
def test_sorting_repr_8017(self):
np.random.seed(0)
data = np.random.randn(3, 4)
for gen, extra in [([1., 3., 2., 5.], 4.), ([1, 3, 2, 5], 4),
([Timestamp('20130101'), Timestamp('20130103'),
Timestamp('20130102'), Timestamp('20130105')],
Timestamp('20130104')),
(['1one', '3one', '2one', '5one'], '4one')]:
columns = MultiIndex.from_tuples([('red', i) for i in gen])
df = DataFrame(data, index=list('def'), columns=columns)
df2 = pd.concat([df,
DataFrame('world', index=list('def'),
columns=MultiIndex.from_tuples(
[('red', extra)]))], axis=1)
# check that the repr is good
# make sure that we have a correct sparsified repr
# e.g. only 1 header of read
self.assertEqual(str(df2).splitlines()[0].split(), ['red'])
# GH 8017
# sorting fails after columns added
# construct single-dtype then sort
result = df.copy().sort_index(axis=1)
expected = df.iloc[:, [0, 2, 1, 3]]
assert_frame_equal(result, expected)
result = df2.sort_index(axis=1)
expected = df2.iloc[:, [0, 2, 1, 4, 3]]
assert_frame_equal(result, expected)
# setitem then sort
result = df.copy()
result[('red', extra)] = 'world'
result = result.sort_index(axis=1)
assert_frame_equal(result, expected)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assertTrue(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
# TODO(wesm): unused?
# result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEqual(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assertTrue(isnull(s.values[42:65]).all())
self.assertTrue(notnull(s.values[:42]).all())
self.assertTrue(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assertTrue(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assertTrue((cp.values[:4] == 0).all())
self.assertTrue((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'], 'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'], ['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
# ---------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
sliced_a1 = df['A', '1']
sliced_a2 = df['A', '2']
sliced_b1 = df['B', '1']
assert_series_equal(sliced_a1, sliced_b1, check_names=False)
assert_series_equal(sliced_a2, sliced_b1, check_names=False)
self.assertEqual(sliced_a1.name, ('A', '1'))
self.assertEqual(sliced_a2.name, ('A', '2'))
self.assertEqual(sliced_b1.name, ('B', '1'))
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp_idx = MultiIndex.from_tuples([(0, 1, 0)], names=['a', 'b', 'c'])
xp = Series(['x'], index=xp_idx, name='data')
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a', 'abcde', 1),
('b', 'bbcde', 2),
('y', 'yzcde', 25),
('z', 'xbcde', 24),
('z', None, 26),
('z', 'zbcde', 25),
('z', 'ybcde', 26),
]
df = DataFrame(acc,
columns=['a1', 'a2', 'cnt']).set_index(['a1', 'a2'])
expected = DataFrame({'cnt': [24, 26, 25, 26]}, index=Index(
['xbcde', np.nan, 'zbcde', 'ybcde'], name='a2'))
result = df.xs('z', level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 1,
0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'), (
'p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep=r'\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep=r'\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assertTrue((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assertTrue((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assertIsInstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEqual(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEqual(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
# preserve names
self.assertEqual(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int32)
# it works!
result = df.sortlevel(0)
self.assertTrue((result.dtypes.values == df.dtypes.values).all())
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple
for tuple in cart_product(
['foo', 'bar'], [10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples, names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assertTrue(is_integer_dtype(deleveled['prm1']))
self.assertTrue(is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEqual(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assertIsInstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assertIsInstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count()
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
tm.assert_index_equal(result.columns,
pd.Index(['A', 'B', 'C'], name='exp'))
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'], ['one', 'two',
'three', 'four']],
labels=[[0, 0, 0, 2, 2], [2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0], name='A')
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_get_level_number_out_of_bounds(self):
with assertRaisesRegexp(IndexError, "Too many levels"):
self.frame.index._get_level_number(2)
with assertRaisesRegexp(IndexError, "not a valid level number"):
self.frame.index._get_level_number(-3)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked.unstack()
# test that ints work
self.ymd.astype(int).unstack()
# test that int32 work
self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0), (
1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort_index(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
# GH10417
def check(left, right):
assert_series_equal(left, right)
self.assertFalse(left.index.is_unique)
li, ri = left.index, right.index
tm.assert_index_equal(li, ri)
df = DataFrame(np.arange(12).reshape(4, 3),
index=list('abab'),
columns=['1st', '2nd', '3rd'])
mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd', '3rd']],
labels=[np.tile(
np.arange(2).repeat(3), 2), np.tile(
np.arange(3), 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
df.columns = ['1st', '2nd', '1st']
mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd']], labels=[np.tile(
np.arange(2).repeat(3), 2), np.tile(
[0, 1, 0], 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
tpls = ('a', 2), ('b', 1), ('a', 1), ('b', 2)
df.index = MultiIndex.from_tuples(tpls)
mi = MultiIndex(levels=[['a', 'b'], [1, 2], ['1st', '2nd']],
labels=[np.tile(
np.arange(2).repeat(3), 2), np.repeat(
[1, 0, 1], [3, 6, 3]), np.tile(
[0, 1, 0], 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
result = df['foo'].stack()
assert_series_equal(stacked['foo'], result, check_names=False)
self.assertIs(result.name, None)
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive', 'activ', 'activ',
'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEqual(unstacked.index.name, 'first')
self.assertEqual(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEqual(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEqual(unstacked.columns.names, expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEqual(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_stack_names_and_numbers(self):
unstacked = self.ymd.unstack(['year', 'month'])
# Can't use mixture of names and numbers to stack
with assertRaisesRegexp(ValueError, "level should contain"):
unstacked.stack([0, 'month'])
def test_stack_multiple_out_of_bounds(self):
# nlevels == 3
unstacked = self.ymd.unstack(['year', 'month'])
with assertRaisesRegexp(IndexError, "Too many levels"):
unstacked.stack([2, 3])
with assertRaisesRegexp(IndexError, "not a valid level number"):
unstacked.stack([-4, -3])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'],
freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02',
'2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10',
'2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(
['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU').mean()
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').mean().stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'], 'B': ['b1', 'b2'], 'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 1
]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
result = applied.reindex(expected.index)
assert_series_equal(result, expected, check_names=False)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEqual(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]],
names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'), (
'f2', 's1'), ('f2', 's2'), ('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assertTrue((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assertFalse(np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False
) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel()
swapped2 = self.frame['A'].swaplevel(0)
swapped3 = self.frame['A'].swaplevel(0, 1)
swapped4 = self.frame['A'].swaplevel('first', 'second')
self.assertFalse(swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
assert_series_equal(swapped, swapped3)
assert_series_equal(swapped, swapped4)
back = swapped.swaplevel()
back2 = swapped.swaplevel(0)
back3 = swapped.swaplevel(0, 1)
back4 = swapped.swaplevel('second', 'first')
self.assertTrue(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
assert_series_equal(back, back3)
assert_series_equal(back, back4)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame, 'ItemB': self.frame * 2})
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
for result in (panel.swaplevel(axis='major'),
panel.swaplevel(0, axis='major'),
panel.swaplevel(0, 1, axis='major')):
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assertIsInstance(df.columns, MultiIndex)
self.assertTrue((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3], index=MultiIndex.from_tuples([("A", 1), (
"A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6], index=MultiIndex.from_tuples([("Z", 1), (
"Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
self.assertTrue(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]])
self.assertFalse(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]])
self.assertFalse(index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assertTrue((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assertTrue((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.ix['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index._tuple_index)]
result = s['qux']
result2 = s.ix['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
assert_series_equal(result, expect, check_names=False)
self.assertEqual(result.index.name, 'b')
result = series.count(level='a')
expect = self.series.count(level=0)
assert_series_equal(result, expect, check_names=False)
self.assertEqual(result.index.name, 'a')
self.assertRaises(KeyError, series.count, 'x')
self.assertRaises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var', 'sem']
def test_series_group_min_max(self):
for op, level, skipna in cart_product(self.AGG_FUNCTIONS, lrange(2),
[False, True]):
grouped = self.series.groupby(level=level)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
assert_series_equal(leftside, rightside)
def test_frame_group_ops(self):
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
self.assert_index_equal(leftside._get_axis(axis), level_index)
self.assert_index_equal(rightside._get_axis(axis), level_index)
assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10), np.tile(
np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
assert_frame_equal(result, expected, check_names=False
) # TODO groupby with level_values drops names
self.assertEqual(result.index.names, self.ymd.index.names[:2])
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'), (
'bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df.consolidate()
def test_ix_preserve_names(self):
result = self.ymd.ix[2000]
result2 = self.ymd['A'].ix[2000]
self.assertEqual(result.index.names, self.ymd.index.names[1:])
self.assertEqual(result2.index.names, self.ymd.index.names[1:])
result = self.ymd.ix[2000, 2]
result2 = self.ymd['A'].ix[2000, 2]
self.assertEqual(result.index.name, self.ymd.index.names[2])
self.assertEqual(result2.index.name, self.ymd.index.names[2])
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.ix[2000, 4] = 0
exp.ix[2000, 4].values[:] = 0
assert_frame_equal(df, exp)
df['A'].ix[2000, 4] = 1
exp['A'].ix[2000, 4].values[:] = 1
assert_frame_equal(df, exp)
df.ix[2000] = 5
exp.ix[2000].values[:] = 5
assert_frame_equal(df, exp)
# this works...for now
df['A'].ix[14] = 5
self.assertEqual(df['A'][14], 5)
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
self.assertEqual(unstacked['A', 1].dtype, np.float64)
self.assertEqual(unstacked['E', 1].dtype, np.object_)
self.assertEqual(unstacked['F', 1].dtype, np.float64)
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
self.assertEqual(result.shape, (500, 2))
# test roundtrip
stacked = result.stack()
assert_series_equal(s, stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
self.assertEqual(result.shape, (500, 2))
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)] +
[labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
self.assertEqual(result.shape, (500, 2))
def test_getitem_lowerdim_corner(self):
self.assertRaises(KeyError, self.frame.ix.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.ix[('bar', 'three'), 'B'] = 0
self.assertEqual(self.frame.sortlevel().ix[('bar', 'three'), 'B'], 0)
# ---------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
raise nose.SkipTest("skipping for now")
result = self.ymd.ix[2000, 0]
expected = self.ymd.ix[2000]['A']
assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.ix[2000, 0] = 0
# self.assertTrue((self.ymd.ix[2000]['A'] == 0).all())
# Pretty sure the second (and maybe even the first) is already wrong.
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6))
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6), 0)
# ---------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0), (
'foo', 'qux', 0)], [0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.ix[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertRaises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.ix[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'), (
'foo', 'qux')], [0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.ix[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = frame.ix[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
self.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
assert_frame_equal(result, expected)
def test_mixed_depth_get(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', '']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'a')
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, ('routine1', 'result1'))
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
assert_frame_equal(result, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.ix[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop('a')
expected = df2.pop(('a', '', ''))
assert_series_equal(expected, result, check_names=False)
assert_frame_equal(df1, df2)
self.assertEqual(result.name, 'a')
expected = df1['top']
df1 = df1.drop(['top'], axis=1)
result = df2.pop('top')
assert_frame_equal(expected, result)
assert_frame_equal(df1, df2)
def test_reindex_level_partial_selection(self):
result = self.frame.reindex(['foo', 'qux'], level=0)
expected = self.frame.ix[[0, 1, 2, 7, 8, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.reindex_axis(['foo', 'qux'], axis=1, level=0)
assert_frame_equal(result, expected.T)
result = self.frame.ix[['foo', 'qux']]
assert_frame_equal(result, expected)
result = self.frame['A'].ix[['foo', 'qux']]
assert_series_equal(result, expected['A'])
result = self.frame.T.ix[:, ['foo', 'qux']]
assert_frame_equal(result, expected.T)
def test_setitem_multiple_partial(self):
expected = self.frame.copy()
result = self.frame.copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame.copy()
result = self.frame.copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
def test_drop_level(self):
result = self.frame.drop(['bar', 'qux'], level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]]
assert_frame_equal(result, expected)
result = self.frame.drop(['two'], level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.drop(['bar', 'qux'], axis=1, level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]].T
assert_frame_equal(result, expected)
result = self.frame.T.drop(['two'], axis=1, level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]].T
assert_frame_equal(result, expected)
def test_drop_level_nonunique_datetime(self):
# GH 12701
idx = pd.Index([2, 3, 4, 4, 5], name='id')
idxdt = pd.to_datetime(['201603231400',
'201603231500',
'201603231600',
'201603231600',
'201603231700'])
df = DataFrame(np.arange(10).reshape(5, 2),
columns=list('ab'), index=idx)
df['tstamp'] = idxdt
df = df.set_index('tstamp', append=True)
ts = pd.Timestamp('201603231600')
self.assertFalse(df.index.is_unique)
result = df.drop(ts, level='tstamp')
expected = df.loc[idx != 4]
assert_frame_equal(result, expected)
def test_drop_preserve_names(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[1, 2, 3, 1, 2, 3]],
names=['one', 'two'])
df = DataFrame(np.random.randn(6, 3), index=index)
result = df.drop([(0, 2)])
self.assertEqual(result.index.names, ('one', 'two'))
def test_unicode_repr_issues(self):
levels = [Index([u('a/\u03c3'), u('b/\u03c3'), u('c/\u03c3')]),
Index([0, 1])]
labels = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, labels=labels)
repr(index.levels)
# NumPy bug
# repr(index.get_level_values(1))
def test_unicode_repr_level_names(self):
index = MultiIndex.from_tuples([(0, 0), (1, 1)],
names=[u('\u0394'), 'i1'])
s = Series(lrange(2), index=index)
df = DataFrame(np.random.randn(2, 4), index=index)
repr(s)
repr(df)
def test_dataframe_insert_column_all_na(self):
# GH #1534
mix = MultiIndex.from_tuples([('1a', '2a'), ('1a', '2b'), ('1a', '2c')
])
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mix)
s = Series({(1, 1): 1, (1, 2): 2})
df['new'] = s
self.assertTrue(df['new'].isnull().all())
def test_join_segfault(self):
# 1532
df1 = DataFrame({'a': [1, 1], 'b': [1, 2], 'x': [1, 2]})
df2 = DataFrame({'a': [2, 2], 'b': [1, 2], 'y': [1, 2]})
df1 = df1.set_index(['a', 'b'])
df2 = df2.set_index(['a', 'b'])
# it works!
for how in ['left', 'right', 'outer']:
df1.join(df2, how=how)
def test_set_column_scalar_with_ix(self):
subset = self.frame.index[[1, 4, 5]]
self.frame.ix[subset] = 99
self.assertTrue((self.frame.ix[subset].values == 99).all())
col = self.frame['B']
col[subset] = 97
self.assertTrue((self.frame.ix[subset, 'B'] == 97).all())
def test_frame_dict_constructor_empty_series(self):
s1 = Series([
1, 2, 3, 4
], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2), (2, 4)]))
s2 = Series([
1, 2, 3, 4
], index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)]))
s3 = Series()
# it works!
DataFrame({'foo': s1, 'bar': s2, 'baz': s3})
DataFrame.from_dict({'foo': s1, 'baz': s3, 'bar': s2})
def test_indexing_ambiguity_bug_1678(self):
columns = MultiIndex.from_tuples([('Ohio', 'Green'), ('Ohio', 'Red'), (
'Colorado', 'Green')])
index = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)
])
frame = DataFrame(np.arange(12).reshape((4, 3)), index=index,
columns=columns)
result = frame.ix[:, 1]
exp = frame.loc[:, ('Ohio', 'Red')]
tm.assertIsInstance(result, Series)
assert_series_equal(result, exp)
def test_nonunique_assignment_1750(self):
df = DataFrame([[1, 1, "x", "X"], [1, 1, "y", "Y"], [1, 2, "z", "Z"]],
columns=list("ABCD"))
df = df.set_index(['A', 'B'])
ix = MultiIndex.from_tuples([(1, 1)])
df.ix[ix, "C"] = '_'
self.assertTrue((df.xs((1, 1))['C'] == '_').all())
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n),
MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
self.assertEqual(s[("a", 5)], 5)
self.assertEqual(s[("a", 6)], 6)
self.assertEqual(s[("a", 7)], 7)
_index._SIZE_CUTOFF = old_cutoff
def test_multiindex_na_repr(self):
# only an issue with long columns
from numpy import nan
df3 = DataFrame({
'A' * 30: {('A', 'A0006000', 'nuit'): 'A0006000'},
'B' * 30: {('A', 'A0006000', 'nuit'): nan},
'C' * 30: {('A', 'A0006000', 'nuit'): nan},
'D' * 30: {('A', 'A0006000', 'nuit'): nan},
'E' * 30: {('A', 'A0006000', 'nuit'): 'A'},
'F' * 30: {('A', 'A0006000', 'nuit'): nan},
})
idf = df3.set_index(['A' * 30, 'C' * 30])
repr(idf)
def test_assign_index_sequences(self):
# #2200
df = DataFrame({"a": [1, 2, 3],
"b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ("faz", "boo")
df.index = l
repr(df)
# this travels an improper code path
l[0] = ["faz", "boo"]
df.index = l
repr(df)
def test_tuples_have_na(self):
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0,
1, 2, 3]])
self.assertTrue(isnull(index[4][0]))
self.assertTrue(isnull(index.values[4][0]))
def test_duplicate_groupby_issues(self):
idx_tp = [('600809', '20061231'), ('600809', '20070331'),
('600809', '20070630'), ('600809', '20070331')]
dt = ['demo', 'demo', 'demo', 'demo']
idx = MultiIndex.from_tuples(idx_tp, names=['STK_ID', 'RPT_Date'])
s = Series(dt, index=idx)
result = s.groupby(s.index).first()
self.assertEqual(len(result), 3)
def test_duplicate_mi(self):
# GH 4516
df = DataFrame([['foo', 'bar', 1.0, 1], ['foo', 'bar', 2.0, 2],
['bah', 'bam', 3.0, 3],
['bah', 'bam', 4.0, 4], ['foo', 'bar', 5.0, 5],
['bah', 'bam', 6.0, 6]],
columns=list('ABCD'))
df = df.set_index(['A', 'B'])
df = df.sortlevel(0)
expected = DataFrame([['foo', 'bar', 1.0, 1], ['foo', 'bar', 2.0, 2],
['foo', 'bar', 5.0, 5]],
columns=list('ABCD')).set_index(['A', 'B'])
result = df.loc[('foo', 'bar')]
assert_frame_equal(result, expected)
def test_duplicated_drop_duplicates(self):
# GH 4060
idx = MultiIndex.from_arrays(([1, 2, 3, 1, 2, 3], [1, 1, 1, 1, 2, 2]))
expected = np.array(
[False, False, False, True, False, False], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([1, 2, 3, 2, 3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(), expected)
expected = np.array([True, False, False, False, False, False])
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([2, 3, 1, 2, 3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep='last'), expected)
expected = np.array([True, False, False, True, False, False])
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([2, 3, 2, 3], [1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep=False), expected)
# deprecate take_last
expected = np.array([True, False, False, False, False, False])
with tm.assert_produces_warning(FutureWarning):
duplicated = idx.duplicated(take_last=True)
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([2, 3, 1, 2, 3], [1, 1, 1, 2, 2]))
with tm.assert_produces_warning(FutureWarning):
tm.assert_index_equal(
idx.drop_duplicates(take_last=True), expected)
def test_multiindex_set_index(self):
# segfault in #3308
d = {'t1': [2, 2.5, 3], 't2': [4, 5, 6]}
df = DataFrame(d)
tuples = [(0, 1), (0, 2), (1, 2)]
df['tuples'] = tuples
index = MultiIndex.from_tuples(df['tuples'])
# it works!
df.set_index(index)
def test_datetimeindex(self):
idx1 = pd.DatetimeIndex(
['2013-04-01 9:00', '2013-04-02 9:00', '2013-04-03 9:00'
] * 2, tz='Asia/Tokyo')
idx2 = pd.date_range('2010/01/01', periods=6, freq='M',
tz='US/Eastern')
idx = MultiIndex.from_arrays([idx1, idx2])
expected1 = pd.DatetimeIndex(['2013-04-01 9:00', '2013-04-02 9:00',
'2013-04-03 9:00'], tz='Asia/Tokyo')
self.assert_index_equal(idx.levels[0], expected1)
self.assert_index_equal(idx.levels[1], idx2)
# from datetime combos
# GH 7888
date1 = datetime.date.today()
date2 = datetime.datetime.today()
date3 = Timestamp.today()
for d1, d2 in itertools.product(
[date1, date2, date3], [date1, date2, date3]):
index = pd.MultiIndex.from_product([[d1], [d2]])
self.assertIsInstance(index.levels[0], pd.DatetimeIndex)
self.assertIsInstance(index.levels[1], pd.DatetimeIndex)
def test_constructor_with_tz(self):
index = pd.DatetimeIndex(['2013/01/01 09:00', '2013/01/02 09:00'],
name='dt1', tz='US/Pacific')
columns = pd.DatetimeIndex(['2014/01/01 09:00', '2014/01/02 09:00'],
name='dt2', tz='Asia/Tokyo')
result = MultiIndex.from_arrays([index, columns])
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
result = MultiIndex.from_arrays([Series(index), Series(columns)])
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
def test_set_index_datetime(self):
# GH 3950
df = pd.DataFrame(
{'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'datetime': ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
'value': range(6)})
df.index = pd.to_datetime(df.pop('datetime'), utc=True)
df.index = df.index.tz_localize('UTC').tz_convert('US/Pacific')
expected = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], name='datetime')
expected = expected.tz_localize('UTC').tz_convert('US/Pacific')
df = df.set_index('label', append=True)
self.assert_index_equal(df.index.levels[0], expected)
self.assert_index_equal(df.index.levels[1],
pd.Index(['a', 'b'], name='label'))
df = df.swaplevel(0, 1)
self.assert_index_equal(df.index.levels[0],
pd.Index(['a', 'b'], name='label'))
self.assert_index_equal(df.index.levels[1], expected)
df = DataFrame(np.random.random(6))
idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
tz='US/Eastern')
idx2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-01 09:00',
'2012-04-01 09:00', '2012-04-02 09:00',
'2012-04-02 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
idx3 = pd.date_range('2011-01-01 09:00', periods=6, tz='Asia/Tokyo')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='US/Eastern')
expected2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
self.assert_index_equal(df.index.levels[0], expected1)
self.assert_index_equal(df.index.levels[1], expected2)
self.assert_index_equal(df.index.levels[2], idx3)
# GH 7092
self.assert_index_equal(df.index.get_level_values(0), idx1)
self.assert_index_equal(df.index.get_level_values(1), idx2)
self.assert_index_equal(df.index.get_level_values(2), idx3)
def test_reset_index_datetime(self):
# GH 3950
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx1 = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz,
name='idx1')
idx2 = pd.Index(range(5), name='idx2', dtype='int64')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(
{'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5, dtype='int64'),
'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(
lambda d: pd.Timestamp(d, tz=tz))
assert_frame_equal(df.reset_index(), expected)
idx3 = pd.date_range('1/1/2012', periods=5, freq='MS',
tz='Europe/Paris', name='idx3')
idx = pd.MultiIndex.from_arrays([idx1, idx2, idx3])
df = pd.DataFrame(
{'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5, dtype='int64'),
'idx3': [datetime.datetime(2012, 1, 1),
datetime.datetime(2012, 2, 1),
datetime.datetime(2012, 3, 1),
datetime.datetime(2012, 4, 1),
datetime.datetime(2012, 5, 1)],
'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'idx3', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(
lambda d: pd.Timestamp(d, tz=tz))
expected['idx3'] = expected['idx3'].apply(
lambda d: pd.Timestamp(d, tz='Europe/Paris'))
assert_frame_equal(df.reset_index(), expected)
# GH 7793
idx = pd.MultiIndex.from_product([['a', 'b'], pd.date_range(
'20130101', periods=3, tz=tz)])
df = pd.DataFrame(
np.arange(6, dtype='int64').reshape(
6, 1), columns=['a'], index=idx)
expected = pd.DataFrame({'level_0': 'a a a b b b'.split(),
'level_1': [
datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 2),
datetime.datetime(2013, 1, 3)] * 2,
'a': np.arange(6, dtype='int64')},
columns=['level_0', 'level_1', 'a'])
expected['level_1'] = expected['level_1'].apply(
lambda d: pd.Timestamp(d, freq='D', tz=tz))
assert_frame_equal(df.reset_index(), expected)
def test_reset_index_period(self):
# GH 7746
idx = pd.MultiIndex.from_product([pd.period_range('20130101',
periods=3, freq='M'),
['a', 'b', 'c']],
names=['month', 'feature'])
df = pd.DataFrame(np.arange(9, dtype='int64')
.reshape(-1, 1),
index=idx, columns=['a'])
expected = pd.DataFrame({
'month': ([pd.Period('2013-01', freq='M')] * 3 +
[pd.Period('2013-02', freq='M')] * 3 +
[pd.Period('2013-03', freq='M')] * 3),
'feature': ['a', 'b', 'c'] * 3,
'a': np.arange(9, dtype='int64')
}, columns=['month', 'feature', 'a'])
assert_frame_equal(df.reset_index(), expected)
def test_set_index_period(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = pd.period_range('2011-01-01', periods=3, freq='M')
idx1 = idx1.append(idx1)
idx2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
idx2 = idx2.append(idx2).append(idx2)
idx3 = pd.period_range('2005', periods=6, freq='A')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.period_range('2011-01-01', periods=3, freq='M')
expected2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
self.assert_index_equal(df.index.levels[0], expected1)
self.assert_index_equal(df.index.levels[1], expected2)
self.assert_index_equal(df.index.levels[2], idx3)
self.assert_index_equal(df.index.get_level_values(0), idx1)
self.assert_index_equal(df.index.get_level_values(1), idx2)
self.assert_index_equal(df.index.get_level_values(2), idx3)
def test_repeat(self):
# GH 9361
# fixed by # GH 7891
m_idx = pd.MultiIndex.from_tuples([(1, 2), (3, 4), (5, 6), (7, 8)])
data = ['a', 'b', 'c', 'd']
m_df = pd.Series(data, index=m_idx)
assert m_df.repeat(3).shape == (3 * len(data), )
def test_iloc_mi(self):
# GH 13797
# Test if iloc can handle integer locations in MultiIndexed DataFrame
data = [
['str00', 'str01'],
['str10', 'str11'],
['str20', 'srt21'],
['str30', 'str31'],
['str40', 'str41']
]
mi = pd.MultiIndex.from_tuples(
[('CC', 'A'),
('CC', 'B'),
('CC', 'B'),
('BB', 'a'),
('BB', 'b')
])
expected = pd.DataFrame(data)
df_mi = pd.DataFrame(data, index=mi)
result = pd.DataFrame([[df_mi.iloc[r, c] for c in range(2)]
for r in range(5)])
assert_frame_equal(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
jakobworldpeace/scikit-learn | sklearn/discriminant_analysis.py | 27 | 26804 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
.. versionadded:: 0.17
tol : float, optional
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals)
)[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self._max_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) +
np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Get the maximum number of components
if self.n_components is None:
self._max_components = len(self.classes_) - 1
else:
self._max_components = min(len(self.classes_) - 1,
self.n_components)
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
rs2/pandas | pandas/tests/tslibs/test_liboffsets.py | 3 | 5095 | """
Tests for helper functions in the cython tslibs.offsets
"""
from datetime import datetime
import pytest
from pandas._libs.tslibs.ccalendar import get_firstbday, get_lastbday
import pandas._libs.tslibs.offsets as liboffsets
from pandas._libs.tslibs.offsets import roll_qtrday
from pandas import Timestamp
@pytest.fixture(params=["start", "end", "business_start", "business_end"])
def day_opt(request):
return request.param
@pytest.mark.parametrize(
"dt,exp_week_day,exp_last_day",
[
(datetime(2017, 11, 30), 3, 30), # Business day.
(datetime(1993, 10, 31), 6, 29), # Non-business day.
],
)
def test_get_last_bday(dt, exp_week_day, exp_last_day):
assert dt.weekday() == exp_week_day
assert get_lastbday(dt.year, dt.month) == exp_last_day
@pytest.mark.parametrize(
"dt,exp_week_day,exp_first_day",
[
(datetime(2017, 4, 1), 5, 3), # Non-weekday.
(datetime(1993, 10, 1), 4, 1), # Business day.
],
)
def test_get_first_bday(dt, exp_week_day, exp_first_day):
assert dt.weekday() == exp_week_day
assert get_firstbday(dt.year, dt.month) == exp_first_day
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(0, 15, datetime(2017, 11, 15)),
(0, None, datetime(2017, 11, 30)),
(1, "start", datetime(2017, 12, 1)),
(-145, "end", datetime(2005, 10, 31)),
(0, "business_end", datetime(2017, 11, 30)),
(0, "business_start", datetime(2017, 11, 1)),
],
)
def test_shift_month_dt(months, day_opt, expected):
dt = datetime(2017, 11, 30)
assert liboffsets.shift_month(dt, months, day_opt=day_opt) == expected
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(1, "start", Timestamp("1929-06-01")),
(-3, "end", Timestamp("1929-02-28")),
(25, None, Timestamp("1931-06-5")),
(-1, 31, Timestamp("1929-04-30")),
],
)
def test_shift_month_ts(months, day_opt, expected):
ts = Timestamp("1929-05-05")
assert liboffsets.shift_month(ts, months, day_opt=day_opt) == expected
def test_shift_month_error():
dt = datetime(2017, 11, 15)
day_opt = "this should raise"
with pytest.raises(ValueError, match=day_opt):
liboffsets.shift_month(dt, 3, day_opt=day_opt)
@pytest.mark.parametrize(
"other,expected",
[
# Before March 1.
(datetime(2017, 2, 10), {2: 1, -7: -7, 0: 0}),
# After March 1.
(Timestamp("2014-03-15", tz="US/Eastern"), {2: 2, -7: -6, 0: 1}),
],
)
@pytest.mark.parametrize("n", [2, -7, 0])
def test_roll_qtrday_year(other, expected, n):
month = 3
day_opt = "start" # `other` will be compared to March 1.
assert roll_qtrday(other, n, month, day_opt, modby=12) == expected[n]
@pytest.mark.parametrize(
"other,expected",
[
# Before June 30.
(datetime(1999, 6, 29), {5: 4, -7: -7, 0: 0}),
# After June 30.
(Timestamp(2072, 8, 24, 6, 17, 18), {5: 5, -7: -6, 0: 1}),
],
)
@pytest.mark.parametrize("n", [5, -7, 0])
def test_roll_qtrday_year2(other, expected, n):
month = 6
day_opt = "end" # `other` will be compared to June 30.
assert roll_qtrday(other, n, month, day_opt, modby=12) == expected[n]
def test_get_day_of_month_error():
# get_day_of_month is not directly exposed.
# We test it via roll_qtrday.
dt = datetime(2017, 11, 15)
day_opt = "foo"
with pytest.raises(ValueError, match=day_opt):
# To hit the raising case we need month == dt.month and n > 0.
roll_qtrday(dt, n=3, month=11, day_opt=day_opt, modby=12)
@pytest.mark.parametrize(
"month",
[3, 5], # (other.month % 3) < (month % 3) # (other.month % 3) > (month % 3)
)
@pytest.mark.parametrize("n", [4, -3])
def test_roll_qtr_day_not_mod_unequal(day_opt, month, n):
expected = {3: {-3: -2, 4: 4}, 5: {-3: -3, 4: 3}}
other = Timestamp(2072, 10, 1, 6, 17, 18) # Saturday.
assert roll_qtrday(other, n, month, day_opt, modby=3) == expected[month][n]
@pytest.mark.parametrize(
"other,month,exp_dict",
[
# Monday.
(datetime(1999, 5, 31), 2, {-1: {"start": 0, "business_start": 0}}),
# Saturday.
(
Timestamp(2072, 10, 1, 6, 17, 18),
4,
{2: {"end": 1, "business_end": 1, "business_start": 1}},
),
# First business day.
(
Timestamp(2072, 10, 3, 6, 17, 18),
4,
{2: {"end": 1, "business_end": 1}, -1: {"start": 0}},
),
],
)
@pytest.mark.parametrize("n", [2, -1])
def test_roll_qtr_day_mod_equal(other, month, exp_dict, n, day_opt):
# All cases have (other.month % 3) == (month % 3).
expected = exp_dict.get(n, {}).get(day_opt, n)
assert roll_qtrday(other, n, month, day_opt, modby=3) == expected
@pytest.mark.parametrize(
"n,expected", [(42, {29: 42, 1: 42, 31: 41}), (-4, {29: -4, 1: -3, 31: -4})]
)
@pytest.mark.parametrize("compare", [29, 1, 31])
def test_roll_convention(n, expected, compare):
assert liboffsets.roll_convention(29, n, compare) == expected[compare]
| bsd-3-clause |
davidgbe/scikit-learn | sklearn/tree/tree.py | 59 | 34839 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"presort-best": _splitter.PresortBestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
rsignell-usgs/notebook | NEXRAD/THREDDS_NEXRAD.py | 1 | 7648 |
# coding: utf-8
# # Using Python to Access NEXRAD Level 2 Data from Unidata THREDDS Server
# This is a modified version of Ryan May's notebook here:
# http://nbviewer.jupyter.org/gist/dopplershift/356f2e14832e9b676207
#
# The TDS provides a mechanism to query for available data files, as well as provides access to the data as native volume files, through OPeNDAP, and using its own CDMRemote protocol. Since we're using Python, we can take advantage of Unidata's Siphon package, which provides an easy API for talking to THREDDS servers.
#
# Bookmark these resources for when you want to use Siphon later!
# + [latest Siphon documentation](http://siphon.readthedocs.org/en/latest/)
# + [Siphon github repo](https://github.com/Unidata/siphon)
# + [TDS documentation](http://www.unidata.ucar.edu/software/thredds/current/tds/TDS.html)
# ## Downloading the single latest volume
#
# Just a bit of initial set-up to use inline figures and quiet some warnings.
# In[1]:
import matplotlib
import warnings
warnings.filterwarnings("ignore", category=matplotlib.cbook.MatplotlibDeprecationWarning)
get_ipython().magic(u'matplotlib inline')
# First we'll create an instance of RadarServer to point to the appropriate radar server access URL.
# In[2]:
# The archive of data on S3 URL did not work for me, despite .edu domain
#url = 'http://thredds-aws.unidata.ucar.edu/thredds/radarServer/nexrad/level2/S3/'
#Trying motherlode URL
url = 'http://thredds.ucar.edu/thredds/radarServer/nexrad/level2/IDD/'
from siphon.radarserver import RadarServer
rs = RadarServer(url)
# Next, we'll create a new query object to help request the data. Using the chaining methods, let's ask for the latest data at the radar KLVX (Louisville, KY). We see that when the query is represented as a string, it shows the encoded URL.
# In[3]:
from datetime import datetime, timedelta
query = rs.query()
query.stations('KLVX').time(datetime.utcnow())
# We can use the RadarServer instance to check our query, to make sure we have required parameters and that we have chosen valid station(s) and variable(s)
#
# In[4]:
rs.validate_query(query)
# Make the request, which returns an instance of TDSCatalog; this handles parsing the returned XML information.
# In[5]:
catalog = rs.get_catalog(query)
# We can look at the datasets on the catalog to see what data we found by the query. We find one volume in the return, since we asked for the volume nearest to a single time.
# In[6]:
catalog.datasets
# We can pull that dataset out of the dictionary and look at the available access URLs. We see URLs for OPeNDAP, CDMRemote, and HTTPServer (direct download).
# In[7]:
ds = list(catalog.datasets.values())[0]
ds.access_urls
# We'll use the CDMRemote reader in Siphon and pass it the appropriate access URL.
# In[8]:
from siphon.cdmr import Dataset
data = Dataset(ds.access_urls['CdmRemote'])
# We define some helper functions to make working with the data easier. One takes the raw data and converts it to floating point values with the missing data points appropriately marked. The other helps with converting the polar coordinates (azimuth and range) to Cartesian (x and y).
# In[9]:
import numpy as np
def raw_to_masked_float(var, data):
# Values come back signed. If the _Unsigned attribute is set, we need to convert
# from the range [-127, 128] to [0, 255].
if var._Unsigned:
data = data & 255
# Mask missing points
data = np.ma.array(data, mask=data==0)
# Convert to float using the scale and offset
return data * var.scale_factor + var.add_offset
def polar_to_cartesian(az, rng):
az_rad = np.deg2rad(az)[:, None]
x = rng * np.sin(az_rad)
y = rng * np.cos(az_rad)
return x, y
# The CDMRemote reader provides an interface that is almost identical to the usual python NetCDF interface. We pull out the variables we need for azimuth and range, as well as the data itself.
# In[10]:
sweep = 0
ref_var = data.variables['Reflectivity_HI']
ref_data = ref_var[sweep]
rng = data.variables['distanceR_HI'][:]
az = data.variables['azimuthR_HI'][sweep]
# Then convert the raw data to floating point values and the polar coordinates to Cartesian.
# In[11]:
ref = raw_to_masked_float(ref_var, ref_data)
x, y = polar_to_cartesian(az, rng)
# MetPy is a Python package for meteorology (Documentation: http://metpy.readthedocs.org and GitHub: http://github.com/MetPy/MetPy). We import MetPy and use it to get the colortable and value mapping information for the NWS Reflectivity data.
# In[12]:
from metpy.plots import ctables # For NWS colortable
ref_norm, ref_cmap = ctables.registry.get_with_steps('NWSReflectivity', 5, 5)
# Finally, we plot them up using matplotlib and cartopy. We create a helper function for making a map to keep things simpler later.
# In[13]:
import matplotlib.pyplot as plt
import cartopy
def new_map(fig, lon, lat):
# Create projection centered on the radar. This allows us to use x
# and y relative to the radar.
proj = cartopy.crs.LambertConformal(central_longitude=lon, central_latitude=lat)
# New axes with the specified projection
ax = fig.add_subplot(1, 1, 1, projection=proj)
# Add coastlines
ax.coastlines('50m', 'black', linewidth=2, zorder=2)
# Grab state borders
state_borders = cartopy.feature.NaturalEarthFeature(
category='cultural', name='admin_1_states_provinces_lines',
scale='50m', facecolor='none')
ax.add_feature(state_borders, edgecolor='black', linewidth=1, zorder=3)
return ax
# ## Download a collection of historical data
# This time we'll make a query based on a longitude, latitude point and using a time range.
# In[14]:
# Our specified time
#dt = datetime(2012, 10, 29, 15) # Superstorm Sandy
#dt = datetime(2016, 6, 18, 1)
dt = datetime(2016, 6, 8, 18)
query = rs.query()
query.lonlat_point(-73.687, 41.175).time_range(dt, dt + timedelta(hours=1))
# The specified longitude, latitude are in NY and the TDS helpfully finds the closest station to that point. We can see that for this time range we obtained multiple datasets.
# In[15]:
cat = rs.get_catalog(query)
cat.datasets
# Grab the first dataset so that we can get the longitude and latitude of the station and make a map for plotting. We'll go ahead and specify some longitude and latitude bounds for the map.
# In[16]:
ds = list(cat.datasets.values())[0]
data = Dataset(ds.access_urls['CdmRemote'])
# Pull out the data of interest
sweep = 0
rng = data.variables['distanceR_HI'][:]
az = data.variables['azimuthR_HI'][sweep]
ref_var = data.variables['Reflectivity_HI']
# Convert data to float and coordinates to Cartesian
ref = raw_to_masked_float(ref_var, ref_var[sweep])
x, y = polar_to_cartesian(az, rng)
# Use the function to make a new map and plot a colormapped view of the data
# In[17]:
fig = plt.figure(figsize=(10, 10))
ax = new_map(fig, data.StationLongitude, data.StationLatitude)
# Set limits in lat/lon space
ax.set_extent([-77, -70, 38, 43])
# Add ocean and land background
ocean = cartopy.feature.NaturalEarthFeature('physical', 'ocean', scale='50m',
edgecolor='face',
facecolor=cartopy.feature.COLORS['water'])
land = cartopy.feature.NaturalEarthFeature('physical', 'land', scale='50m',
edgecolor='face',
facecolor=cartopy.feature.COLORS['land'])
ax.add_feature(ocean, zorder=-1)
ax.add_feature(land, zorder=-1)
ax.pcolormesh(x, y, ref, cmap=ref_cmap, norm=ref_norm, zorder=0);
# In[ ]:
| mit |
arokem/scipy | scipy/integrate/odepack.py | 2 | 10805 | # Author: Travis Oliphant
from __future__ import division, print_function, absolute_import
__all__ = ['odeint']
import numpy as np
from . import _odepack
from copy import copy
import warnings
class ODEintWarning(Warning):
pass
_msgs = {2: "Integration successful.",
1: "Nothing was done; the integration time was 0.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error).",
-8: "Run terminated (internal error)."
}
def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
mxords=5, printmessg=0, tfirst=False):
"""
Integrate a system of ordinary differential equations.
.. note:: For new code, use `scipy.integrate.solve_ivp` to solve a
differential equation.
Solve a system of ordinary differential equations using lsoda from the
FORTRAN library odepack.
Solves the initial value problem for stiff or non-stiff systems
of first order ode-s::
dy/dt = func(y, t, ...) [or func(t, y, ...)]
where y can be a vector.
.. note:: By default, the required order of the first two arguments of
`func` are in the opposite order of the arguments in the system
definition function used by the `scipy.integrate.ode` class and
the function `scipy.integrate.solve_ivp`. To use a function with
the signature ``func(t, y, ...)``, the argument `tfirst` must be
set to ``True``.
Parameters
----------
func : callable(y, t, ...) or callable(t, y, ...)
Computes the derivative of y at t.
If the signature is ``callable(t, y, ...)``, then the argument
`tfirst` must be set ``True``.
y0 : array
Initial condition on y (can be a vector).
t : array
A sequence of time points for which to solve for y. The initial
value point should be the first element of this sequence.
This sequence must be monotonically increasing or monotonically
decreasing; repeated values are allowed.
args : tuple, optional
Extra arguments to pass to function.
Dfun : callable(y, t, ...) or callable(t, y, ...)
Gradient (Jacobian) of `func`.
If the signature is ``callable(t, y, ...)``, then the argument
`tfirst` must be set ``True``.
col_deriv : bool, optional
True if `Dfun` defines derivatives down columns (faster),
otherwise `Dfun` should define derivatives across rows.
full_output : bool, optional
True if to return a dictionary of optional outputs as the second output
printmessg : bool, optional
Whether to print the convergence message
tfirst: bool, optional
If True, the first two arguments of `func` (and `Dfun`, if given)
must ``t, y`` instead of the default ``y, t``.
.. versionadded:: 1.1.0
Returns
-------
y : array, shape (len(t), len(y0))
Array containing the value of y for each desired time in t,
with the initial value `y0` in the first row.
infodict : dict, only returned if full_output == True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'hu' vector of step sizes successfully used for each time step
'tcur' vector with the value of t reached for each time step
(will always be at least as large as the input times)
'tolsf' vector of tolerance scale factors, greater than 1.0,
computed when a request for too much accuracy was detected
'tsw' value of t at the time of the last method switch
(given for each time step)
'nst' cumulative number of time steps
'nfe' cumulative number of function evaluations for each time step
'nje' cumulative number of jacobian evaluations for each time step
'nqu' a vector of method orders for each successful step
'imxer' index of the component of largest magnitude in the
weighted local error vector (e / ewt) on an error return, -1
otherwise
'lenrw' the length of the double work array required
'leniw' the length of integer work array required
'mused' a vector of method indicators for each successful time step:
1: adams (nonstiff), 2: bdf (stiff)
======= ============================================================
Other Parameters
----------------
ml, mu : int, optional
If either of these are not None or non-negative, then the
Jacobian is assumed to be banded. These give the number of
lower and upper non-zero diagonals in this banded matrix.
For the banded case, `Dfun` should return a matrix whose
rows contain the non-zero bands (starting with the lowest diagonal).
Thus, the return matrix `jac` from `Dfun` should have shape
``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
The data in `jac` must be stored such that ``jac[i - j + mu, j]``
holds the derivative of the `i`th equation with respect to the `j`th
state variable. If `col_deriv` is True, the transpose of this
`jac` must be returned.
rtol, atol : float, optional
The input parameters `rtol` and `atol` determine the error
control performed by the solver. The solver will control the
vector, e, of estimated local errors in y, according to an
inequality of the form ``max-norm of (e / ewt) <= 1``,
where ewt is a vector of positive error weights computed as
``ewt = rtol * abs(y) + atol``.
rtol and atol can be either vectors the same length as y or scalars.
Defaults to 1.49012e-8.
tcrit : ndarray, optional
Vector of critical points (e.g., singularities) where integration
care should be taken.
h0 : float, (0: solver-determined), optional
The step size to be attempted on the first step.
hmax : float, (0: solver-determined), optional
The maximum absolute step size allowed.
hmin : float, (0: solver-determined), optional
The minimum absolute step size allowed.
ixpr : bool, optional
Whether to generate extra printing at method switches.
mxstep : int, (0: solver-determined), optional
Maximum number of (internally defined) steps allowed for each
integration point in t.
mxhnil : int, (0: solver-determined), optional
Maximum number of messages printed.
mxordn : int, (0: solver-determined), optional
Maximum order to be allowed for the non-stiff (Adams) method.
mxords : int, (0: solver-determined), optional
Maximum order to be allowed for the stiff (BDF) method.
See Also
--------
solve_ivp : solve an initial value problem for a system of ODEs
ode : a more object-oriented integrator based on VODE
quad : for finding the area under a curve
Examples
--------
The second order differential equation for the angle `theta` of a
pendulum acted on by gravity with friction can be written::
theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
where `b` and `c` are positive constants, and a prime (') denotes a
derivative. To solve this equation with `odeint`, we must first convert
it to a system of first order equations. By defining the angular
velocity ``omega(t) = theta'(t)``, we obtain the system::
theta'(t) = omega(t)
omega'(t) = -b*omega(t) - c*sin(theta(t))
Let `y` be the vector [`theta`, `omega`]. We implement this system
in Python as:
>>> def pend(y, t, b, c):
... theta, omega = y
... dydt = [omega, -b*omega - c*np.sin(theta)]
... return dydt
...
We assume the constants are `b` = 0.25 and `c` = 5.0:
>>> b = 0.25
>>> c = 5.0
For initial conditions, we assume the pendulum is nearly vertical
with `theta(0)` = `pi` - 0.1, and is initially at rest, so
`omega(0)` = 0. Then the vector of initial conditions is
>>> y0 = [np.pi - 0.1, 0.0]
We will generate a solution at 101 evenly spaced samples in the interval
0 <= `t` <= 10. So our array of times is:
>>> t = np.linspace(0, 10, 101)
Call `odeint` to generate the solution. To pass the parameters
`b` and `c` to `pend`, we give them to `odeint` using the `args`
argument.
>>> from scipy.integrate import odeint
>>> sol = odeint(pend, y0, t, args=(b, c))
The solution is an array with shape (101, 2). The first column
is `theta(t)`, and the second is `omega(t)`. The following code
plots both components.
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
>>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
>>> plt.legend(loc='best')
>>> plt.xlabel('t')
>>> plt.grid()
>>> plt.show()
"""
if ml is None:
ml = -1 # changed to zero inside function call
if mu is None:
mu = -1 # changed to zero inside function call
dt = np.diff(t)
if not((dt >= 0).all() or (dt <= 0).all()):
raise ValueError("The values in t must be monotonically increasing "
"or monotonically decreasing; repeated values are "
"allowed.")
t = copy(t)
y0 = copy(y0)
output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
full_output, rtol, atol, tcrit, h0, hmax, hmin,
ixpr, mxstep, mxhnil, mxordn, mxords,
int(bool(tfirst)))
if output[-1] < 0:
warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information."
warnings.warn(warning_msg, ODEintWarning)
elif printmessg:
warning_msg = _msgs[output[-1]]
warnings.warn(warning_msg, ODEintWarning)
if full_output:
output[1]['message'] = _msgs[output[-1]]
output = output[:-1]
if len(output) == 1:
return output[0]
else:
return output
| bsd-3-clause |
maxlikely/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 9 | 4822 | from itertools import product
import numpy as np
from nose.tools import assert_true
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.todense(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.todense(), 1), np.ones((3, 1)))
pred = np.dot(A.todense(), X)
assert_less(np.linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).todense()
reconstruction_error = np.linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = np.linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(np.linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(20), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 20]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = np.linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = np.linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
import warnings
from nose.tools import assert_raises
M = np.ones((10, 3))
with warnings.catch_warnings(record=True):
assert_raises(ValueError, manifold.locally_linear_embedding,
M, 2, 1, method='standard', eigen_solver='arpack')
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
dialounke/pylayers | pylayers/antprop/antssh.py | 1 | 8590 | #from pylayers.antprop.antenna import *
#from pylayers.antprop.antvsh import *
"""
.. currentmodule:: pylayers.antprop.antssh
.. autosummary::
:members:
"""
from __future__ import print_function
import doctest
import os
import glob
from pylayers.antprop.spharm import *
import numpy as np
import scipy as sp
import pdb
import sys
import matplotlib.pyplot as plt
import doctest
def SSHFunc(L, theta,phi):
""" ssh function
Parameters
----------
L : integer,
spherical harmonics order
theta: numpy array (1,nth)
phi: numpy array (1,nph)
Returns
-------
Y : np.array
((1+L)*(2+L)/2,nth*nph)
indx : np.array
Notes
-----
Compute the spherical harmonic functions for the order L
return a spherical matrix ((1+L)*(2+L)/2,nth*nph) and the index (l,m) of the shperical harmonics
"""
l = np.arange(0,1+L).reshape(1,(1+L))
m = np.arange(0,1+L).reshape(((1+L),1))
# normalize the associated Legendre polynoms
NRM = np.sqrt((2*l+1)*factorial(l-abs(m))/(4*np.pi*factorial(l+abs(m))))
NRM = NRM.reshape((1+L,1+L,1))
# compute the associated Legendre polynoms part Plm(cos(theta))
ll = l.reshape(1,(1+L),1)
mm = m.reshape(((1+L),1,1))
x = np.cos(theta).reshape((1,1, len(theta)))
PLM = sp.special.lpmv(mm,ll,x)
# Normalize
NPLM = NRM*PLM
NPLM = NPLM.reshape((1+L,1+L,len(theta),1))
# compute the exp(j*m*phi) part
PHI = phi.reshape((1,len(phi)))
mm = m.reshape(((1+L),1))
EXP = np.exp(1j*mm*PHI)
EXP = EXP.reshape((1+L,1,1,len(phi)))
# Compute Y : the spherical harmonics matrix and reshaping it
Yi= NPLM*EXP
Yi = Yi.reshape(((1+L)**2,len(theta)*len(phi)))
#~ Y = Yi
nzero_rows = Yi.any(axis = 1)
Y = Yi[nzero_rows] # eliminating the undefined functions (Y01)
ll = (l*np.ones((1+L,1))).reshape(((1+L)**2))
mm = (m*np.ones((1,1+L))).reshape(((1+L)**2))
# spherical harmonics index
#~ indx = np.array([ll[nzero_rows],mm[nzero_rows]]).T
indx = np.array([ll[nzero_rows],mm[nzero_rows]]).T
Y2 = ((-1)**indx[1+L:,1]).reshape((len(indx[1+L:,1]),1))*np.conj(Y[1+L:,:])
Y = np.append(Y,Y2, axis = 0)
indx2 = np.array([indx[1+L:,0],(-1)*indx[1+L:,1]]).T
indx = np.append(indx,indx2, axis = 0)
return Y, indx
def SSHFunc2(L, theta,phi):
""" ssh function version 2
Parameters
----------
L : integer,
spherical harmonics order
theta: numpy array(1,ndir)
phi: numpy array(1,ndir)
Notes
-----
theta and phi should have the same dimensions which represents the rays
Compute the spherical harmonic functions for the order L
return a spherical matrix ((1+L)*(2+L)/2,ndir) and the index (l,m) of the shperical harmonics
"""
nray = len(theta)
l = np.arange(0,1+L).reshape(1,(1+L))
m = np.arange(0,1+L).reshape(((1+L),1))
# normalize the associated legendre polynoms
NRM = np.sqrt((2*l+1)*factorial(l-abs(m))/(4*np.pi*factorial(l+abs(m))))
NRM = NRM.reshape((1+L,1+L,1))
# compute the associated legendre polynoms part Plm(cos(theta))
ll = l.reshape(1,(1+L),1)
mm = m.reshape(((1+L),1,1))
x = np.cos(theta).reshape((1,1, nray))
PLM = sp.special.lpmv(mm,ll,x)
# Normalize
NPLM = NRM*PLM
NPLM = NPLM.reshape((1+L,1+L,nray))
# compute the exp(j*m*phi) part
PHI = phi.reshape((1,nray))
mm = m.reshape(((1+L),1))
EXP = np.exp(1j*mm*PHI)
EXP = EXP.reshape((1+L,1,nray))
# Compute Y : the spherical harmonics matrix and reshaping it
Yi= NPLM*EXP
Yi = Yi.reshape(((1+L)**2,nray))
#~ Y = Yi
nzero_rows = Yi.any(axis = 1)
Y = Yi[nzero_rows] # eliminating the non defined functions (Y01)
ll = (l*np.ones((1+L,1))).reshape(((1+L)**2))
mm = (m*np.ones((1,1+L))).reshape(((1+L)**2))
# spherical harmonics index
#~ indx = np.array([ll[nzero_rows],mm[nzero_rows]]).T
indx = np.array([ll[nzero_rows],mm[nzero_rows]]).T
Y2 = ((-1)**indx[1+L:,1]).reshape((len(indx[1+L:,1]),1))*np.conj(Y[1+L:,:])
Y = np.append(Y,Y2, axis = 0)
indx2 = np.array([indx[1+L:,0],(-1)*indx[1+L:,1]]).T
indx = np.append(indx,indx2, axis = 0)
return Y, indx
def SphereToCart (theta, phi, eth, eph, bfreq ):
""" Spherical to Cartesian
Parameters
----------
theta :
phi :
eth :
eph :
bfreq: boolean
indicate if the conversion is done for all frequencies or only one.
"""
if bfreq == False:
PHI = phi.reshape((1,len(phi)))
THETA = theta.reshape((len(theta),1))
ec = ndarray(shape = (3, len(theta),len(phi)) , dtype = complex )
else:
PHI = phi.reshape((1,len(phi),1))
THETA = theta.reshape((len(theta),1,1))
ec = np.ndarray(shape = (3, len(theta),len(phi),eth.shape[-1] ) , dtype = complex )
ec[0] = np.cos(THETA)*np.cos(PHI)*eth -np.sin(PHI)*eph
ec[1] = np.cos(THETA)*np.sin(PHI)*eth +np.cos(PHI)*eph
ec[2] = -np.sin(THETA)*eth
return ec
def CartToSphere (theta, phi,ex,ey,ez, bfreq=True, pattern = True):
""" Convert from Cartesian to Spherical
Parameters
----------
theta
phi
ex
ey
ez
bfreq : boolean
pattern : boolean
Convert from cartesian to spherical coordinates
bfreq : boolean parameter to indicate if the conversion is done for all frequencies of only one.
"""
nray = len(theta)
if bfreq == False:
es = np.ndarray(shape = (2, nray) , dtype = complex )
else:
es = np.ndarray(shape = (2, ex.shape[0], nray) , dtype = complex )
es[0] = np.cos(theta)*np.cos(phi)*ex + np.cos(theta)*np.sin(phi)*ey -np.sin(theta)*ez
es[1] = -np.sin(phi)*ex +np.cos(phi)*ey
return es[0],es[1]
def ssh(A,L= 20,dsf=1):
"""
Parameters
----------
A : antenna
dsf : int
down sampling factor 'default 1'
Summary
-------
This function calculates the Scalar Spherical Harmonics coefficients
m : phi longitude
l : theta latitude
Antenna pattern are stored (f theta phi)
Coeff are stored with this order (f , l , m )
"""
th = A.theta[::dsf]
ph = A.phi[::dsf]
nth = len(th)
nph = len(ph)
nf = A.nf
if (nph % 2) == 1:
mdab = min(nth, (nph + 1) / 2)
else:
mdab = min(nth, nph / 2)
ndab = nth
Etheta = A.Ft[::dsf,::dsf,:]
Ephi = A.Fp[::dsf,::dsf,:]
# compute the spherical harmonics functions at the order L
Y,ssh_index = SSHFunc(L,th,ph)
# Compute the pseudo inverse of Y
Ypinv = sp.linalg.pinv(Y)
# convert the field from spherical to cartesian coordinates system
Ex,Ey,Ez = SphereToCart (th, ph, Etheta, Ephi, True)
#
Ex = Ex.reshape((nf,nth*nph))
Ey = Ey.reshape((nf,nth*nph))
Ez = Ez.reshape((nf,nth*nph))
cx = np.dot(Ex,Ypinv)
cy = np.dot(Ey,Ypinv)
cz = np.dot(Ez,Ypinv)
lmax = L
Cx = SCoeff(typ='s2',fmin=A.fGHz[0],fmax=A.fGHz[-1],lmax=lmax,data=cx,ind=ssh_index)
Cy = SCoeff(typ='s2',fmin=A.fGHz[0],fmax=A.fGHz[-1],lmax=lmax,data=cy,ind=ssh_index)
Cz = SCoeff(typ='s2',fmin=A.fGHz[0],fmax=A.fGHz[-1],lmax=lmax,data=cz,ind=ssh_index)
A.S = SSHCoeff(Cx,Cy,Cz)
return(A)
def sshs(G,fGHz,th,ph,L= 20):
"""scalar spherical harmonics transform
Parameters
----------
G : antenna gain (f,th,phi)
fGHz : np.array
th : np.array
ph : np.array
Summary
-------
This function calculates the Scalar Spherical Harmonics coefficients
m : phi longitude
l : theta latitude
Antenna pattern are stored (f theta phi)
Coeff are stored with this order (f , l , m )
"""
th = A.theta[::dsf]
ph = A.phi[::dsf]
nth = len(th)
nph = len(ph)
nf = A.nf
if (nph % 2) == 1:
mdab = min(nth, (nph + 1) / 2)
else:
mdab = min(nth, nph / 2)
ndab = nth
#Etheta = A.Ft[::dsf,::dsf,:]
#Ephi = A.Fp[::dsf,::dsf,:]
# compute the spherical harmonics functions at the order L
Y,ssh_index = SSHFunc(L,th,ph)
# Compute the pseudo inverse of Y
Ypinv = sp.linalg.pinv(Y)
# convert the field from spherical to cartesian coordinates system
#Ex,Ey,Ez = SphereToCart (th, ph, Etheta, Ephi, True)
#
G = G.reshape((nf,nth*nph))
cg = np.dot(G,Ypinv)
lmax = L
Cg = SCoeff(typ='s2',fmin=fGHz[0],fmax=fGHz[-1],lmax=lmax,data=cg,ind=ssh_index)
return(Cg)
if (__name__=="__main__"):
doctest.testmod()
| mit |
migueln/enrico | enrico/scan.py | 1 | 5911 | """
Make a profile likelihood for each free parameters of the source of interest
begun November 2013
"""
import enrico.constants as cst
import RunGTlike
import ROOT
import numpy,os,string,array
from enrico import Loggin
from enrico import utils
import matplotlib
matplotlib.rc('text', usetex=True)
import matplotlib.pyplot as plt
def MakeScan(Fit,spectrum,par,bmin,bmax,opt,N=100):
Param = numpy.zeros(N)
loglike = numpy.zeros(N)
for i in xrange(N):
Param[i] = bmin + (bmax-bmin)*i/(N-1.)
spectrum.getParam(par).setValue(Param[i])
# spectrum.getParam(par).setFree(0)
# loglike[i] = Fit.fit(0,covar=False,optimizer=opt)
# Fit.optimize(0)
Fit.logLike.syncParams()
loglike[i] = -Fit.logLike.value()
return Param,loglike
def Scan(config):
ROOT.gROOT.SetBatch(ROOT.kTRUE)
cres = ROOT.TCanvas("Scan")
config["Spectrum"]["FitsGeneration"] = "no"
FitRunner,Fit = RunGTlike.GenAnalysisObjects(config)
spectrum = Fit[FitRunner.obs.srcname].funcs['Spectrum']
ParName = spectrum.paramNames
Fit.fit(0,covar=False,optimizer=config['fitting']['optimizer'])
for par in ParName : #Loop over the parameters and get value, error and scale
if spectrum.getParam(par).isFree():
print "Scan for parameter ",par
ParValue = spectrum.getParam(par).value()
ParError = spectrum.getParam(par).error()
bmin,bmax = spectrum.getParam(par).getBounds()
bmin = max(bmin,ParValue-10*ParError)
bmax = min(bmax,ParValue+10*ParError)
param,loglike = MakeScan(Fit,spectrum,par,bmin,bmax,config['fitting']['optimizer'])
#restore best fit parameters
spectrum.getParam(par).setFree(1)
ParValue = spectrum.getParam(par).setValue(ParValue)
plt.figure()
plt.plot(param,loglike,"-r")
plt.title(par)
plt.xlabel("Parameter: "+par)
plt.ylabel("Log(Like)")
utils.mkdir_p(config["out"]+"/"+cst.ScanPath)
savefile = open(config["out"]+"/"+cst.ScanPath+ "/Scan_"+par+".txt","w")
for i in xrange(param.size):
savefile.write(str(param[i])+" "+str(loglike[i])+"\n")
savefile.close()
plt.savefig(config["out"]+"/"+cst.ScanPath+ "/Scan_"+par+".png", dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
def Contour(config):
# ROOT.gROOT.SetBatch(ROOT.kTRUE)
# cres = ROOT.TCanvas("Contour")
config["Spectrum"]["FitsGeneration"] = "no"
parname1 = config["Contours"]["parname1"]
parname2 = config["Contours"]["parname2"]
FitRunner,Fit = RunGTlike.GenAnalysisObjects(config)
spectrum = Fit[FitRunner.obs.srcname].funcs['Spectrum']
ParName = spectrum.paramNames
mes = Loggin.Message()
mes.info("Computing Contours for "+parname1+" and "+parname2)
### Check part !!!!
findpar2 = findpar1 = False
for par in ParName : #Loop over the parameters to check
if par == parname1:
findpar1 = True
if not(spectrum.getParam(par).isFree()):
mes.error(parname1+" is not a free parameter")
if par == parname2:
findpar2 = True
if not(spectrum.getParam(par).isFree()):
mes.error(parname2+" is not a free parameter")
if not(findpar1):
mes.error(parname1+" is not a valid parameter")
if not(findpar2):
mes.error(parname2+" is not a valid parameter")
bestloglike = Fit.fit(0,covar=False,optimizer=config['fitting']['optimizer'])
print spectrum
print "Min LogLikelihood =",bestloglike
## get values
ParValue1 = spectrum.getParam(parname1).value()
ParError1 = spectrum.getParam(parname1).error()
bmin1,bmax1 = spectrum.getParam(parname1).getBounds()
bmin1 = max(bmin1,ParValue1-20*ParError1)
bmax1 = min(bmax1,ParValue1+20*ParError1)
ParValue2 = spectrum.getParam(parname2).value()
ParError2 = spectrum.getParam(parname2).error()
bmin2,bmax2 = spectrum.getParam(parname2).getBounds()
bmin2 = max(bmin2,ParValue2-20*ParError2)
bmax2 = min(bmax2,ParValue2+20*ParError2)
N = 100
param2 = numpy.zeros(N)
loglike = ROOT.TH2F("loglike","Contours (68%, 95%, 99%)",N,bmin1,bmax1,N,bmin2,bmax2)
spectrum.getParam(parname2).setFree(0)
mes.info("Boundaries for "+parname1+" ["+str(bmin1)+","+str(bmax1)+"]")
mes.info("Boundaries for "+parname2+" ["+str(bmin2)+","+str(bmax2)+"]")
for i in xrange(N):
param2[i] = bmin2 + (bmax2-bmin2)*i/(N-1.)
spectrum.getParam(parname2).setValue(param2[i])
param1,ll = MakeScan(Fit,spectrum,parname1,bmin1,bmax1,config['fitting']['optimizer'],N)
for j in xrange(N):
loglike.Fill(param1[j],param2[i],ll[j])
utils.mkdir_p(config["out"]+"/"+cst.ScanPath)
cres = ROOT.TCanvas("Contours")
loglike.SetMinimum(bestloglike);
loglike.SetMaximum(bestloglike+3);
loglike.SetXTitle(parname1);
loglike.SetYTitle(parname2);
loglike.SetStats(000)
loglike.SetContour(3)
loglike.SetContourLevel(0,bestloglike+0.5)
loglike.SetContourLevel(1,bestloglike+4./2.)
loglike.SetContourLevel(2,bestloglike+6.63/2.)
loglike.Draw("CONT1");
tgrres = ROOT.TGraphErrors(2,array.array('f',[ParValue1,ParValue1]),array.array('f',[ParValue2,ParValue2]),array.array('f',[ParError1,ParError1]),array.array('f',[ParError2,ParError2]))
tgrres.Draw(".pz")
cres.Print(config["out"]+"/"+cst.ScanPath+ "/Contours_"+parname1+"_"+parname2+".eps")
cres.Print(config["out"]+"/"+cst.ScanPath+ "/Contours_"+parname1+"_"+parname2+".C")
cres.Print(config["out"]+"/"+cst.ScanPath+ "/Contours_"+parname1+"_"+parname2+".png")
mes.success("Scan Performed")
| bsd-3-clause |
wllmtrng/ggplot | ggplot/geoms/geom.py | 12 | 13443 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from copy import deepcopy
import pandas as pd
import numpy as np
from matplotlib.cbook import iterable
from ggplot.utils import is_string
import ggplot.stats
from ggplot.utils import is_scalar_or_string
from ggplot.components import aes
from ggplot.utils.exceptions import GgplotError
__all__ = ['geom']
__all__ = [str(u) for u in __all__]
class geom(object):
"""Base class of all Geoms"""
DEFAULT_AES = dict()
REQUIRED_AES = set()
DEFAULT_PARAMS = dict()
data = None
aes = None
manual_aes = None
params = None
# Some geoms require more information than that provided by the
# user. This information is usually another aesthetic variable
# but it could another non-aesthetic variable. It is the duty
# of the associated statistic to calculate this information.
#
# For example:
# A geom may have REQUIRED_AES = {'x', 'y'} and
# the user may map or manually set only aesthetic 'x',
# so the stat would have to calculate 'y'. However this
# may not be enough, to actually make the plot the geom
# may require the 'width' aesthetic. In this case, 'width'
# would be the extra required information.
#
# geoms should fill out this set with what they require
# and is not in REQUIRED_AES
# see: geom_bar, stat_bin
_extra_requires = set()
# Some ggplot aesthetics are named different from the parameters of
# the matplotlib function that will be used to plot.
# This dictionary, of the form {ggplot-aes-name: matplotlib-aes-name},
# connects the two.
#
# geoms should fill it out so that the plot
# information they receive is properly named.
# See: geom_point
_aes_renames = dict()
# A matplotlib plot function may require that an aethestic have a
# single unique value. e.g. linestyle='dashed' and not
# linestyle=['dashed', 'dotted', ...].
# A single call to such a function can only plot lines with the
# same linestyle. However, if the plot we want has more than one
# line with different linestyles, we need to group the lines with
# the same linestyle and plot them as one unit.
#
# geoms should fill out this set with such aesthetics so that the
# plot information they receive can be plotted in a single call.
# Use names as expected by matplotlib
# See: geom_point
_units = set()
def __init__(self, *args, **kwargs):
self.valid_aes = set(self.DEFAULT_AES) ^ self.REQUIRED_AES
self._stat_type = self._get_stat_type(kwargs)
self.aes, self.data, kwargs = self._find_aes_and_data(args, kwargs)
# This set will list the geoms that were uniquely set in this
# geom (not specified already i.e. in the ggplot aes).
self.aes_unique_to_geom = set(self.aes.keys())
if 'colour' in kwargs:
kwargs['color'] = kwargs.pop('colour')
# When a geom is created, some of the parameters may be meant
# for the stat and some for the layer.
# Some arguments are can be identified as either aesthetics to
# the geom and or parameter settings to the stat, in this case
# if the argument has a scalar value it is a setting for the stat.
self._stat_params = {}
self.params = deepcopy(self.DEFAULT_PARAMS)
self.manual_aes = {}
for k, v in kwargs.items():
if k in self.aes:
raise GgplotError('Aesthetic, %s, specified twice' % k)
elif (k in self.valid_aes and
k in self._stat_type.DEFAULT_PARAMS and
is_scalar_or_string(kwargs[k])):
self._stat_params[k] = v
elif k in self.valid_aes:
self.manual_aes[k] = v
elif k in self.DEFAULT_PARAMS:
self.params[k] = v
elif k in self._stat_type.DEFAULT_PARAMS:
self._stat_params[k] = v
else:
raise GgplotError('Cannot recognize argument: %s' % k)
self._cache = {}
# When putting together the plot information for the geoms,
# we need the aethetics names to be matplotlib compatible.
# These are created and stored in self._cache and so would
# go stale if users or geoms change geom.manual_aes
self._create_aes_with_mpl_names()
def plot_layer(self, data, ax):
# Any aesthetic to be overridden by the manual aesthetics
# should not affect the statistics and the unit grouping
# of the data
_cols = set(data.columns) & set(self.manual_aes)
data = data.drop(_cols, axis=1)
data = self._calculate_stats(data)
self._verify_aesthetics(data)
_needed = self.valid_aes | self._extra_requires
data = data[list(set(data.columns) & _needed)]
# aesthetic precedence
# geom.manual_aes > geom.aes > ggplot.aes (part of data)
# NOTE: currently geom.aes is not handled. This may be
# a bad place to do it -- may mess up faceting or just
# inefficient. Probably in ggplot or layer.
data = data.rename(columns=self._aes_renames)
units = self._units & set(data.columns)
# Create plot information that observes the aesthetic precedence
# - (grouped data + manual aesthics)
# - modify previous using statistic
# - previous overwrites the default aesthetics
for _data in self._get_unit_grouped_data(data, units):
_data.update(self._cache['manual_aes_mpl']) # should happen before the grouping
pinfo = deepcopy(self._cache['default_aes_mpl'])
pinfo.update(_data)
self._plot_unit(pinfo, ax)
def _plot_unit(self, pinfo, ax):
msg = "{} should implement this method."
raise NotImplementedError(
msg.format(self.__class__.__name__))
def _get_stat_type(self, kwargs):
"""
Find out the stat and return the type object that can be
used(called) to create it.
For example, if the stat is 'smooth' we return
ggplot.stats.stat_smooth
"""
# get
try:
_name = 'stat_%s' % kwargs['stat']
except KeyError:
_name = 'stat_%s' % self.DEFAULT_PARAMS['stat']
return getattr(ggplot.stats, _name)
def __radd__(self, gg):
gg = deepcopy(gg)
# steal aesthetics info.
self._cache['ggplot.aesthetics'] = deepcopy(gg.aesthetics)
self.aes_unique_to_geom -= set(gg.aesthetics.keys())
# create stat and hand over the parameters it understands
if not hasattr(self, '_stat'):
self._stat = self._stat_type()
self._stat.params.update(self._stat_params)
gg.geoms.append(self)
self.gg = gg
return gg
def _verify_aesthetics(self, data):
"""
Check if all the required aesthetics have been specified.
Raise an Exception if an aesthetic is missing
"""
missing_aes = (self.REQUIRED_AES -
set(self.manual_aes) -
set(data.columns))
if missing_aes:
msg = '{} requires the following missing aesthetics: {}'
raise GgplotError(msg.format(
self.__class__.__name__, ', '.join(missing_aes)))
def _find_aes_and_data(self, args, kwargs):
"""
Identify the aes and data objects.
Return a dictionary of the aes mappings and
the data object.
- args is a list
- kwargs is a dictionary
Note: This is a helper function for self.__init__
It modifies the kwargs
"""
passed_aes = {}
data = None
aes_err = 'Found more than one aes argument. Expecting zero or one'
for arg in args:
if isinstance(arg, aes) and passed_aes:
raise Exception(aes_err)
if isinstance(arg, aes):
passed_aes = arg
elif isinstance(arg, pd.DataFrame):
data = arg
else:
raise GgplotError(
'Unknown argument of type "{0}".'.format(type(arg)))
if 'mapping' in kwargs and passed_aes:
raise GgplotError(aes_err)
elif not passed_aes and 'mapping' in kwargs:
passed_aes = kwargs.pop('mapping')
if data is None and 'data' in kwargs:
data = kwargs.pop('data')
_aes = {}
# To make mapping of columns to geom/stat or stat parameters
# possible
_keep = set(self.DEFAULT_PARAMS) | set(self._stat_type.DEFAULT_PARAMS)
for k, v in passed_aes.items():
if k in self.valid_aes or k in _keep:
_aes[k] = v
else:
raise GgplotError('Cannot recognize aesthetic: %s' % k)
return _aes, data, kwargs
def _calculate_and_rename_stats(self, data):
"""
Use the stat object (self._stat) to compute the stats
and make sure the returned columns are renamed to
matplotlib compatible names
"""
# only rename the new columns,
# so keep track of the original ones
_original = set(data)
data = self._stat._calculate(data)
_d = {}
for old, new in self._aes_renames.items():
if (old in data) and (old not in _original):
_d[new] = data.pop(old)
data.update(_d)
return data
def _calculate_stats(self, data):
"""
Calculate the statistics on each group in the data
The groups are determined by the mappings.
Returns
-------
data : dataframe
"""
self._stat._verify_aesthetics(data)
self._stat._calculate_global(data)
# In most cases 'x' and 'y' mappings do not and
# should not influence the grouping. If this is
# not the desired behaviour then the groups
# parameter should be used.
groups = set(self._cache['ggplot.aesthetics'].keys())
groups = groups & (self.valid_aes - {'x', 'y'})
groups = groups & set(data.columns)
new_data = pd.DataFrame()
# TODO: Find a more effecient way to concatenate
# the dataframes
if groups:
for name, _data in data.groupby(sorted(groups)):
_data = _data.reindex()
_data = self._stat._calculate(_data)
new_data = new_data.append(_data, ignore_index=True)
else:
new_data = self._stat._calculate(data)
return new_data
def _create_aes_with_mpl_names(self):
"""
Create copies of the manual and default aesthetics
with matplotlib compatitble names.
Uses self._aes_renames, and the results are stored
in:
self._cache['manual_aes_mpl']
self._cache['default_aes_mpl']
"""
def _rename_fn(aes_dict):
# to prevent overwrites
_d = {}
for old, new in self._aes_renames.items():
if old in aes_dict:
_d[new] = aes_dict.pop(old)
aes_dict.update(_d)
self._cache['manual_aes_mpl'] = deepcopy(self.manual_aes)
self._cache['default_aes_mpl'] = deepcopy(self.DEFAULT_AES)
_rename_fn(self._cache['manual_aes_mpl'])
_rename_fn(self._cache['default_aes_mpl'])
def _get_unit_grouped_data(self, data, units):
"""
Split data into groups.
The units determine the groups.
Parameters
----------
data : dataframe
The data to be split into groups
units : set
A set of column names in the data and by
which the grouping will happen
Returns
-------
out : list of dict
Each dict represents a unique grouping.
The dicts are of the form
{'column-name': list-of-values | value}
Note
----
This is a helper function for self._plot_layer
"""
out = []
if units:
for name, _data in data.groupby(list(units)):
_data = _data.to_dict('list')
for ae in units:
_data[ae] = _data[ae][0]
out.append(_data)
else:
_data = data.to_dict('list')
out.append(_data)
return out
def sort_by_x(self, pinfo):
"""
Sort the lists in pinfo according to pinfo['x']
This function is useful for geom's that expect
the x-values to come in sorted order
"""
# Remove list types from pinfo
_d = {}
for k in list(pinfo.keys()):
if not is_string(pinfo[k]) and iterable(pinfo[k]):
_d[k] = pinfo.pop(k)
# Sort numerically if all items can be cast
try:
x = list(map(np.float, _d['x']))
except (ValueError, TypeError):
x = _d['x']
# Make sure we don't try to sort something unsortable
try:
idx = np.argsort(x)
# Put sorted lists back in pinfo
for key in _d:
pinfo[key] = [_d[key][i] for i in idx]
except:
pass
return pinfo
| bsd-2-clause |
pv/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
GGoussar/scikit-image | setup.py | 12 | 5030 | #! /usr/bin/env python
descr = """Image Processing SciKit
Image processing algorithms for SciPy, including IO, morphology, filtering,
warping, color manipulation, object detection, etc.
Please refer to the online documentation at
http://scikit-image.org/
"""
DISTNAME = 'scikit-image'
DESCRIPTION = 'Image processing routines for SciPy'
LONG_DESCRIPTION = descr
MAINTAINER = 'Stefan van der Walt'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-image.org'
LICENSE = 'Modified BSD'
DOWNLOAD_URL = 'http://github.com/scikit-image/scikit-image'
import os
import sys
import setuptools
from distutils.command.build_py import build_py
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# skimage __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-image to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKIMAGE_SETUP__ = True
with open('skimage/__init__.py') as fid:
for line in fid:
if line.startswith('__version__'):
VERSION = line.strip().split()[-1][1:-1]
break
with open('requirements.txt') as fid:
INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
# requirements for those browsing PyPI
REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]
REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]
REQUIRES = [r.replace('[array]', '') for r in REQUIRES]
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('skimage')
config.add_data_dir('skimage/data')
return config
if __name__ == "__main__":
try:
from numpy.distutils.core import setup
extra = {'configuration': configuration}
# Do not try and upgrade larger dependencies
for lib in ['scipy', 'numpy', 'matplotlib', 'pillow']:
try:
__import__(lib)
INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES
if lib not in i]
except ImportError:
pass
except ImportError:
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'--version',
'clean')):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install scikit-image when Numpy is not yet
# present in the system.
from setuptools import setup
extra = {}
else:
print('To install scikit-image from source, you will need numpy.\n' +
'Install numpy with pip:\n' +
'pip install numpy\n'
'Or use your operating system package manager. For more\n' +
'details, see http://scikit-image.org/docs/stable/install.html')
sys.exit(1)
setup(
name=DISTNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
license=LICENSE,
download_url=DOWNLOAD_URL,
version=VERSION,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
],
install_requires=INSTALL_REQUIRES,
# install cython when running setup.py (source install)
setup_requires=['cython>=0.21'],
requires=REQUIRES,
packages=setuptools.find_packages(exclude=['doc']),
include_package_data=True,
zip_safe=False, # the package can run out of an .egg file
entry_points={
'console_scripts': ['skivi = skimage.scripts.skivi:main'],
},
cmdclass={'build_py': build_py},
**extra
)
| bsd-3-clause |
Determined22/Assignments-PatternRecognition-2016Fall | spectral.py | 1 | 3659 | ## Ng spectral clustering
import numpy as np
import matplotlib.pyplot as plt
from kmeans import KMeans
with open('X_spectral.txt', encoding='utf-8') as fr:
X = [(float(line.strip().split()[0]), float(line.strip().split()[1])) for line in fr.readlines()]
X = np.array(X)
def visual(X, k=None, sigma=None, save=0):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X[:100, :1], X[:100, 1:], c='blue')
ax.scatter(X[100:, :1], X[100:, 1:], c='red')
if save != 0:
plt.title('data after transforming\n($\sigma=' + str(sigma) + '$, $k=' + str(k) + '$)')
plt.savefig('k' + str(k) + '_' + 'sigma' + str(sigma) + '.pdf', dpi=400)
plt.show()
def get_L(X, k, sigma):
"""
生成对称型拉普拉斯矩阵L_sym
:param X: 数据点
:param k: 参数
:param sigma: 参数
:return: L_sym
"""
(n, d) = X.shape
D = np.zeros((n, n)) # 度矩阵
W = np.zeros((n, n)) # 点对亲和性矩阵
for i in range(n):
Xi_neibors = [np.linalg.norm(X[i] - X[j]) for j in range(n)] # k近邻生成边
neibors_index = np.argsort(Xi_neibors)[1:(k+1)]
for index in neibors_index:
W[i][index] = np.exp(-(np.linalg.norm(X[i] - X[index]))**2 / (2 * sigma**2))
W = (W + np.transpose(W)) / 2 # 保证其是对称矩阵, 修正k近邻生成的图
for i in range(n):
D[i][i] = sum(W[i])
L = D - W
D_ = np.zeros((n, n))
for i in range(n):
D_[i][i] = pow(D[i][i], -0.5)
L_sym = np.dot(np.dot(D_, L), D_)
return L_sym, L
def spectral(X, sigma, k, centroids):
"""
Ng谱聚类算法
:param X: 数据点
:param sigma: 参数
:param k: 参数
:return: accu聚类精度
"""
(n, d) = X.shape
L_sym, L = get_L(X, k, sigma)
eig, eigvec = np.linalg.eig(L_sym) # eigvec按列
# eig_index = np.argsort(eig)[1:d+1]
eig_index = np.argsort(eig)[:d] # 最小的d个特征值的索引
U = eigvec[:, eig_index]
T = np.zeros(U.shape)
for i in range(n):
for j in range(d):
T[i][j] = U[i][j] / np.linalg.norm(U[i])
Y = T
# visual(Y, k=k, sigma=sigma, save=1)
cluster = KMeans(2, 100, centroids)
cluster.fit(Y)
labels = cluster.labels
if labels[0] == 0:
n1 = 100 - sum(labels[:100]); n2 = sum(labels[100:])
else:
n1 = sum(labels[:100]); n2 = 100 - sum(labels[100:])
accu = (n1 + n2) / n
print('---------------------sigma=%.2f, k=%d, accu=%.4f' % (sigma, k, accu))
return accu
def visualExperiment(para_list, accu_list, C, paraISk=1):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(para_list, accu_list, marker='s', c='blue', lw=1.5)
plt.ylim(0.6, 1.02)
plt.ylabel('$accu$')
if paraISk == 1:
plt.xlabel('$k$')
plt.title('$accu$ with different $k$\n($\sigma=0.5$)')
# plt.savefig('spectral_k.pdf', dpi=400)
else:
plt.xlabel('$\sigma$')
plt.title('$accu$ with different $\sigma$\n($k=10$)')
# plt.savefig('spectral_sigma.pdf', dpi=400)
plt.show()
if __name__ == '__main__':
centroids = None
# visual(X)
sigma = 0.5
k_list = range(10, 60, 5)
accu_list_k = []
for k in k_list:
accu_list_k.append(spectral(X, sigma, k, centroids))
visualExperiment(k_list, accu_list_k, sigma, paraISk=1)
k = 20
sigma_list = range(2, 22, 2); sigma_list = [_/10 for _ in sigma_list] # 0.2, 0.4, 0.6, ..., 2.0
accu_list_sigma = []
for sigma in sigma_list:
accu_list_sigma.append(spectral(X, sigma, k, centroids))
visualExperiment(sigma_list, accu_list_sigma, k, paraISk=0)
| mit |
igormarfin/trading-with-python | sandbox/spreadCalculations.py | 78 | 1496 | '''
Created on 28 okt 2011
@author: jev
'''
from tradingWithPython import estimateBeta, Spread, returns, Portfolio, readBiggerScreener
from tradingWithPython.lib import yahooFinance
from pandas import DataFrame, Series
import numpy as np
import matplotlib.pyplot as plt
import os
symbols = ['SPY','IWM']
y = yahooFinance.HistData('temp.csv')
y.startDate = (2007,1,1)
df = y.loadSymbols(symbols,forceDownload=False)
#df = y.downloadData(symbols)
res = readBiggerScreener('CointPairs.csv')
#---check with spread scanner
#sp = DataFrame(index=symbols)
#
#sp['last'] = df.ix[-1,:]
#sp['targetCapital'] = Series({'SPY':100,'IWM':-100})
#sp['targetShares'] = sp['targetCapital']/sp['last']
#print sp
#The dollar-neutral ratio is about 1 * IWM - 1.7 * IWM. You will get the spread = zero (or probably very near zero)
#s = Spread(symbols, histClose = df)
#print s
#s.value.plot()
#print 'beta (returns)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='returns')
#print 'beta (log)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='log')
#print 'beta (standard)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='standard')
#p = Portfolio(df)
#p.setShares([1, -1.7])
#p.value.plot()
quote = yahooFinance.getQuote(symbols)
print quote
s = Spread(symbols,histClose=df, estimateBeta = False)
s.setLast(quote['last'])
s.setShares(Series({'SPY':1,'IWM':-1.7}))
print s
#s.value.plot()
#s.plot()
fig = figure(2)
s.plot()
| bsd-3-clause |
Garrett-R/scikit-learn | sklearn/tests/test_dummy.py | 2 | 14060 | from __future__ import division
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
def test_most_frequent_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1/3, decimal=1)
assert_almost_equal(p[2], 1/3, decimal=1)
assert_almost_equal(p[4], 1/3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
idf/FaceReader | facerec_py/facerec/visual.py | 1 | 3020 | from facerec_py.facerec.normalization import minmax
import os as os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# try to import the PIL Image module
try:
from PIL import Image
except ImportError:
import Image
import math as math
def create_font(fontname='Tahoma', fontsize=10):
return { 'fontname': fontname, 'fontsize':fontsize }
def plot_gray(X, sz=None, filename=None):
if not sz is None:
X = X.reshape(sz)
X = minmax(I, 0, 255)
fig = plt.figure()
implot = plt.imshow(np.asarray(Ig), cmap=cm.gray)
if filename is None:
plt.show()
else:
fig.savefig(filename, format="png", transparent=False)
def plot_eigenvectors(eigenvectors, num_components, sz, filename=None, start_component=0, rows = None, cols = None, title="Subplot", color=True):
if (rows is None) or (cols is None):
rows = cols = int(math.ceil(np.sqrt(num_components)))
num_components = np.min(num_components, eigenvectors.shape[1])
fig = plt.figure()
for i in range(start_component, num_components):
vi = eigenvectors[0:,i].copy()
vi = minmax(np.asarray(vi), 0, 255, dtype=np.uint8)
vi = vi.reshape(sz)
ax0 = fig.add_subplot(rows,cols,(i-start_component)+1)
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax0.get_yticklabels(), visible=False)
plt.title("%s #%d" % (title, i), create_font('Tahoma',10))
if color:
implot = plt.imshow(np.asarray(vi))
else:
implot = plt.imshow(np.asarray(vi), cmap=cm.grey)
if filename is None:
fig.show()
else:
fig.savefig(filename, format="png", transparent=False)
def subplot(title, images, rows, cols, sptitle="subplot", sptitles=[], colormap=cm.gray, ticks_visible=True, filename=None):
fig = plt.figure()
# main title
fig.text(.5, .95, title, horizontalalignment='center')
for i in xrange(len(images)):
ax0 = fig.add_subplot(rows,cols,(i+1))
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax0.get_yticklabels(), visible=False)
if len(sptitles) == len(images):
plt.title("%s #%s" % (sptitle, str(sptitles[i])), create_font('Tahoma',10))
else:
plt.title("%s #%d" % (sptitle, (i+1)), create_font('Tahoma',10))
plt.imshow(np.asarray(images[i]), cmap=colormap)
if filename is None:
plt.show()
else:
fig.savefig(filename)
# using plt plot:
#filename="/home/philipp/facerec/at_database_vs_accuracy_xy.png"
#t = np.arange(2., 10., 1.)
#fig = plt.figure()
#plt.plot(t, r0, 'k--', t, r1, 'k')
#plt.legend(("Eigenfaces", "Fisherfaces"), 'lower right', shadow=True, fancybox=True)
#plt.ylim(0,1)
#plt.ylabel('Recognition Rate')
#plt.xlabel('Database Size (Images per Person)')
#fig.savefig(filename, format="png", transparent=False)
#plt.show()
| mit |
shyamalschandra/scikit-learn | examples/svm/plot_oneclass.py | 80 | 2338 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred')
s = 40
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white', s=s)
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s)
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s)
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |