repo_name
stringlengths 6
96
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 762
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
zhmz90/hep_ml | hep_ml/metrics_utils.py | 3 | 11095 | from __future__ import division, print_function, absolute_import
import numpy
from sklearn.utils.validation import column_or_1d
from .commonutils import check_sample_weight, sigmoid_function
__author__ = 'Alex Rogozhnikov'
def check_metrics_arguments(y_true, y_pred, sample_weight, two_class=True, binary_pred=True):
"""
Checks the arguments passed to metrics
:param y_true: labels of classes
:param y_pred: predictions
:param sample_weight: weights of samples
:param two_class: if True, will check that y_true contains only zeros and ones
:param binary_pred: if True, will check that y_pred contains only zeros and ones
:return: the same arguments as tuple
"""
sample_weight = check_sample_weight(y_true, sample_weight=sample_weight)
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
assert len(y_true) == len(y_pred), \
'The lengths of y_true and y_pred are different: %i and %i' % (len(y_true), len(y_pred))
if two_class:
assert numpy.in1d(y_true, [0, 1]).all(), 'The y_true array should contain only two labels: 0 and 1, ' \
'it contains:' + str(numpy.unique(y_true))
if binary_pred:
assert numpy.in1d(y_pred, [0, 1]).all(), 'The y_pred array should contain only two labels: 0 and 1, ' \
'it contains:' + str(numpy.unique(y_pred))
return y_true, y_pred, sample_weight
def prepare_distribution(data, weights):
"""Prepares the distribution to be used later in KS and CvM,
merges equal data, computes (summed) weights and cumulative distribution.
All output arrays are of same length and correspond to each other."""
weights = weights / numpy.sum(weights)
prepared_data, indices = numpy.unique(data, return_inverse=True)
prepared_weights = numpy.bincount(indices, weights=weights)
prepared_cdf = compute_cdf(prepared_weights)
return prepared_data, prepared_weights, prepared_cdf
# region Helpful functions to work with bins and groups
"""
There are two basic approaches to handle are bins and knn.
Here they are represented as bins and (!) groups.
The difference between bins and groups: each event belongs to one and only one bin,
in the case of groups each event may belong to several groups.
Knn is one particular case of groups, bins can be reduced to groups either
Bin_indices is an array, where for each event it's bin is written:
bin_indices = [0, 0, 1, 2, 2, 4]
Group_indices is list, each item is indices of events in some group
group_indices = [[0,1], [2], [3,4], [5]]
Group matrix is another way to write group_indices,
this is sparse matrix of shape [n_groups, n_samples],
group_matrix[group_id, sample_id] = 1, if event belong to cell, 0 otherwise
While bin indices are computed for all the events together, group indices
are typically computed only for events of some particular class.
"""
def compute_bin_indices(X_part, bin_limits=None, n_bins=20):
"""For arbitrary number of variables computes the indices of data,
the indices are unique numbers of bin from zero to \prod_j (len(bin_limits[j])+1)
If bin_limits is not provided, they are computed using data.
:param X_part: columns along which binning is done
:type X_part: numpy.ndarray
"""
if bin_limits is None:
bin_limits = []
for variable_index in range(X_part.shape[1]):
variable_data = X_part[:, variable_index]
bin_limits.append(numpy.linspace(numpy.min(variable_data), numpy.max(variable_data), n_bins + 1)[1: -1])
bin_indices = numpy.zeros(len(X_part), dtype=numpy.int)
for axis, bin_limits_axis in enumerate(bin_limits):
bin_indices *= (len(bin_limits_axis) + 1)
bin_indices += numpy.searchsorted(bin_limits_axis, X_part[:, axis])
return bin_indices
def bin_to_group_indices(bin_indices, mask):
""" Transforms bin_indices into group indices, skips empty bins
:type bin_indices: numpy.array, each element in index of bin this event belongs, shape = [n_samples]
:type mask: numpy.array, boolean mask of indices to split into bins, shape = [n_samples]
:rtype: list(numpy.array), each element is indices of elements in some bin
"""
assert len(bin_indices) == len(mask), "Different length"
bins_id = numpy.unique(bin_indices)
result = list()
for bin_id in bins_id:
result.append(numpy.where(mask & (bin_indices == bin_id))[0])
return result
def group_indices_to_groups_matrix(group_indices, n_events):
"""
:param group_indices:
:return: sparse matrix of shape [n_groups, n_samples],
one if particular event belongs to particular category.
"""
from scipy import sparse
groups_matrix = sparse.lil_matrix((len(group_indices), n_events))
for group_id, events_in_group in enumerate(group_indices):
groups_matrix[group_id, events_in_group] = 1
return sparse.csr_matrix(groups_matrix)
# endregion
# region Supplementary uniformity-related functions (to measure flatness of predictions)
def compute_cdf(ordered_weights):
"""Computes cumulative distribution function (CDF) by ordered weights,
be sure that sum(ordered_weights) == 1
"""
return numpy.cumsum(ordered_weights) - 0.5 * ordered_weights
def compute_bin_weights(bin_indices, sample_weight):
assert len(bin_indices) == len(sample_weight), 'Different lengths of array'
result = numpy.bincount(bin_indices, weights=sample_weight)
return result / numpy.sum(result)
def compute_divided_weight(group_matrix, sample_weight):
"""Divided weight takes into account that different events
are met different number of times """
occurences = numpy.array(group_matrix.sum(axis=0)).flatten()
return sample_weight / numpy.maximum(occurences, 1)
def compute_group_weights(group_matrix, sample_weight):
"""
Group weight = sum of divided weights of indices inside that group.
"""
divided_weight = compute_divided_weight(group_matrix=group_matrix, sample_weight=sample_weight)
result = group_matrix.dot(divided_weight)
return result / numpy.sum(result)
def compute_bin_efficiencies(y_score, bin_indices, cut, sample_weight, minlength=None):
"""Efficiency of bin = total weight of (signal) events that passed the cut
in the bin / total weight of signal events in the bin.
Returns small negative number for empty bins"""
y_score = column_or_1d(y_score)
assert len(y_score) == len(sample_weight) == len(bin_indices), "different size"
if minlength is None:
minlength = numpy.max(bin_indices) + 1
bin_total = numpy.bincount(bin_indices, weights=sample_weight, minlength=minlength)
passed_cut = y_score > cut
bin_passed_cut = numpy.bincount(bin_indices[passed_cut],
weights=sample_weight[passed_cut], minlength=minlength)
return bin_passed_cut / numpy.maximum(bin_total, 1)
def compute_group_efficiencies_by_indices(y_score, groups_indices, cut, divided_weight=None, smoothing=0.0):
""" Provided cut, computes efficiencies inside each bin.
:param divided_weight: weight for each event, divided by the number of it's occurences """
y_score = column_or_1d(y_score)
divided_weight = check_sample_weight(y_score, sample_weight=divided_weight)
# with smoothing=0, this is 0 or 1, latter for passed events.
passed_cut = sigmoid_function(y_score - cut, width=smoothing)
if isinstance(groups_indices, numpy.ndarray) and numpy.ndim(groups_indices) == 2:
# this speedup is specially for knn
result = numpy.average(numpy.take(passed_cut, groups_indices),
weights=numpy.take(divided_weight, groups_indices),
axis=1)
else:
result = numpy.zeros(len(groups_indices))
for i, group in enumerate(groups_indices):
result[i] = numpy.average(passed_cut[group], weights=divided_weight[group])
return result
def compute_group_efficiencies(y_score, groups_matrix, cut, divided_weight=None, smoothing=0.0):
""" Provided cut, computes efficiencies inside each bin.
:param divided_weight: weight for each event, divided by the number of it's occurences """
y_score = column_or_1d(y_score)
divided_weight = check_sample_weight(y_score, sample_weight=divided_weight)
# with smoothing=0, this is 0 or 1, latter for passed events.
passed_cut = sigmoid_function(y_score - cut, width=smoothing)
passed_weight = groups_matrix.dot(divided_weight * passed_cut)
total_weight = groups_matrix.dot(divided_weight)
return passed_weight / numpy.maximum(total_weight, 1e-10)
def weighted_deviation(a, weights, power=2.):
""" sum weight * |x - x_mean|^power, measures deviation from mean """
mean = numpy.average(a, weights=weights)
return numpy.average(numpy.abs(mean - a) ** power, weights=weights)
# endregion
# region Special methods for uniformity metrics
def theil(x, weights):
"""Theil index of array with regularization"""
assert numpy.all(x >= 0), "negative numbers can't be used in Theil"
x_mean = numpy.average(x, weights=weights)
normed = x / x_mean
# to avoid problems with log of negative number.
normed[normed < 1e-20] = 1e-20
return numpy.average(normed * numpy.log(normed), weights=weights)
def _ks_2samp_fast(prepared_data1, data2, prepared_weights1, weights2, cdf1):
"""Pay attention - prepared data should not only be sorted,
but equal items should be merged (by summing weights),
data2 should not have elements larger then max(prepared_data1) """
indices = numpy.searchsorted(prepared_data1, data2)
weights2 /= numpy.sum(weights2)
prepared_weights2 = numpy.bincount(indices, weights=weights2, minlength=len(prepared_data1))
cdf2 = compute_cdf(prepared_weights2)
return numpy.max(numpy.abs(cdf1 - cdf2))
def ks_2samp_weighted(data1, data2, weights1, weights2):
""" almost the same as ks2samp from scipy.stats, but this also supports weights """
x = numpy.unique(numpy.concatenate([data1, data2]))
weights1 /= numpy.sum(weights1) * 1.
weights2 /= numpy.sum(weights2) * 1.
inds1 = numpy.searchsorted(x, data1)
inds2 = numpy.searchsorted(x, data2)
w1 = numpy.bincount(inds1, weights=weights1, minlength=len(x))
w2 = numpy.bincount(inds2, weights=weights2, minlength=len(x))
F1 = compute_cdf(w1)
F2 = compute_cdf(w2)
return numpy.max(numpy.abs(F1 - F2))
def _cvm_2samp_fast(prepared_data1, data2, prepared_weights1, weights2, cdf1, power=2.):
"""Pay attention - prepared data should not only be sorted,
but equal items should be merged (by summing weights) """
indices = numpy.searchsorted(prepared_data1, data2)
weights2 /= numpy.sum(weights2)
prepared_weights2 = numpy.bincount(indices, weights=weights2, minlength=len(prepared_data1))
cdf2 = compute_cdf(prepared_weights2)
return numpy.average(numpy.abs(cdf1 - cdf2) ** power, weights=prepared_weights1)
# endregion
| apache-2.0 |
Nyker510/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
elijah513/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
catalyst-cooperative/pudl | src/pudl/constants.py | 1 | 98991 | """
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2020)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2020)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "[email protected]",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "Zane Selvans",
"email": "[email protected]",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "Christina Gosnell",
"email": "[email protected]",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "Steven Winter",
"email": "[email protected]",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "Alana Wilson",
"email": "[email protected]",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "Karl Dunkle Werner",
"email": "[email protected]",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "Greg Schivley",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "[email protected]",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': pd.StringDtype(),
'contact_firstname_2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname_2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title_2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'credits_or_adjustments': float,
'critical_peak_pricing': pd.BooleanDtype(),
'critical_peak_rebate': pd.BooleanDtype(),
'current_planned_operating_date': 'datetime64[ns]',
'customers': float,
'customer_class': pd.CategoricalDtype(categories=CUSTOMER_CLASSES),
'customer_incentives_cost': float,
'customer_incentives_incremental_cost': float,
'customer_incentives_incremental_life_cycle_cost': float,
'customer_other_costs_incremental_life_cycle_cost': float,
'daily_digital_access_customers': pd.Int64Dtype(),
'data_observed': pd.BooleanDtype(),
'datum': pd.StringDtype(),
'deliver_power_transgrid': pd.BooleanDtype(),
'delivery_customers': float,
'direct_load_control_customers': pd.Int64Dtype(),
'distributed_generation': pd.BooleanDtype(),
'distributed_generation_owned_capacity_mw': float,
'distribution_activity': pd.BooleanDtype(),
'distribution_circuits': pd.Int64Dtype(),
'duct_burners': pd.BooleanDtype(),
'energy_displaced_mwh': float,
'energy_efficiency_annual_cost': float,
'energy_efficiency_annual_actual_peak_reduction_mw': float,
'energy_efficiency_annual_effects_mwh': float,
'energy_efficiency_annual_incentive_payment': float,
'energy_efficiency_incremental_actual_peak_reduction_mw': float,
'energy_efficiency_incremental_effects_mwh': float,
'energy_savings_estimates_independently_verified': pd.BooleanDtype(),
'energy_savings_independently_verified': pd.BooleanDtype(),
'energy_savings_mwh': float,
'energy_served_ami_mwh': float,
'energy_source_1_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.CategoricalDtype(categories=ENTITY_TYPE_DICT.values()),
'estimated_or_actual_capacity_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_fuel_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_tech_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'exchange_energy_delivered_mwh': float,
'exchange_energy_recieved_mwh': float,
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_class': pd.StringDtype(),
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_pct': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
'furnished_without_charge_mwh': float,
'generation_activity': pd.BooleanDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'generators_number': float,
'generators_num_less_1_mw': float,
'green_pricing_revenue': float,
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'highest_distribution_voltage_kv': float,
'home_area_network': pd.Int64Dtype(),
'inactive_accounts_included': pd.BooleanDtype(),
'incremental_energy_savings_mwh': float,
'incremental_life_cycle_energy_savings_mwh': float,
'incremental_life_cycle_peak_reduction_mwh': float,
'incremental_peak_reduction_mw': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'load_management_annual_cost': float,
'load_management_annual_actual_peak_reduction_mw': float,
'load_management_annual_effects_mwh': float,
'load_management_annual_incentive_payment': float,
'load_management_annual_potential_peak_reduction_mw': float,
'load_management_incremental_actual_peak_reduction_mw': float,
'load_management_incremental_effects_mwh': float,
'load_management_incremental_potential_peak_reduction_mw': float,
'longitude': float,
'major_program_changes': pd.BooleanDtype(),
'mercury_content_ppm': float,
'merge_address': pd.StringDtype(),
'merge_city': pd.StringDtype(),
'merge_company': pd.StringDtype(),
'merge_date': 'datetime64[ns]',
'merge_state': pd.StringDtype(),
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'momentary_interruption_definition': pd.CategoricalDtype(categories=MOMENTARY_INTERRUPTION_DEF.values()),
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'nerc_regions_of_operation': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'net_power_exchanged_mwh': float,
'net_wheeled_power_mwh': float,
'new_parent': pd.StringDtype(),
'non_amr_ami': pd.Int64Dtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'operates_generating_plant': pd.BooleanDtype(),
'operating_date': 'datetime64[ns]',
'operating_switch': pd.StringDtype(),
# TODO: double check this for early 860 years
'operational_status': pd.StringDtype(),
'operational_status_code': pd.StringDtype(),
'original_planned_operating_date': 'datetime64[ns]',
'other': float,
'other_combustion_tech': pd.BooleanDtype(),
'other_costs': float,
'other_costs_incremental_cost': float,
'other_modifications_date': 'datetime64[ns]',
'other_planned_modifications': pd.BooleanDtype(),
'outages_recorded_automatically': pd.BooleanDtype(),
'owned_by_non_utility': pd.BooleanDtype(),
'owner_city': pd.StringDtype(),
'owner_name': pd.StringDtype(),
'owner_state': pd.StringDtype(),
'owner_street_address': pd.StringDtype(),
'owner_utility_id_eia': pd.Int64Dtype(),
'owner_zip_code': pd.StringDtype(),
# we should transition these into readable codes, not a one letter thing
'ownership_code': pd.StringDtype(),
'phone_extension_1': pd.StringDtype(),
'phone_extension_2': pd.StringDtype(),
'phone_number_1': pd.StringDtype(),
'phone_number_2': pd.StringDtype(),
'pipeline_notes': pd.StringDtype(),
'planned_derate_date': 'datetime64[ns]',
'planned_energy_source_code_1': pd.StringDtype(),
'planned_modifications': pd.BooleanDtype(),
'planned_net_summer_capacity_derate_mw': float,
'planned_net_summer_capacity_uprate_mw': float,
'planned_net_winter_capacity_derate_mw': float,
'planned_net_winter_capacity_uprate_mw': float,
'planned_new_capacity_mw': float,
'planned_new_prime_mover_code': pd.StringDtype(),
'planned_repower_date': 'datetime64[ns]',
'planned_retirement_date': 'datetime64[ns]',
'planned_uprate_date': 'datetime64[ns]',
'plant_id_eia': pd.Int64Dtype(),
'plant_id_epa': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_name_eia': pd.StringDtype(),
'plants_reported_asset_manager': pd.BooleanDtype(),
'plants_reported_operator': pd.BooleanDtype(),
'plants_reported_other_relationship': pd.BooleanDtype(),
'plants_reported_owner': pd.BooleanDtype(),
'point_source_unit_id_epa': pd.StringDtype(),
'potential_peak_demand_savings_mw': float,
'pulverized_coal_tech': pd.BooleanDtype(),
'previously_canceled': pd.BooleanDtype(),
'price_responsive_programes': pd.BooleanDtype(),
'price_responsiveness_customers': pd.Int64Dtype(),
'primary_transportation_mode_code': pd.StringDtype(),
'primary_purpose_naics_id': pd.Int64Dtype(),
'prime_mover_code': pd.StringDtype(),
'pv_current_flow_type': pd.CategoricalDtype(categories=['AC', 'DC']),
'reactive_power_output_mvar': float,
'real_time_pricing_program': pd.BooleanDtype(),
'rec_revenue': float,
'rec_sales_mwh': float,
'regulatory_status_code': pd.StringDtype(),
'report_date': 'datetime64[ns]',
'reported_as_another_company': pd.StringDtype(),
'retail_marketing_activity': pd.BooleanDtype(),
'retail_sales': float,
'retail_sales_mwh': float,
'retirement_date': 'datetime64[ns]',
'revenue_class': pd.CategoricalDtype(categories=REVENUE_CLASSES),
'rto_iso_lmp_node_id': pd.StringDtype(),
'rto_iso_location_wholesale_reporting_id': pd.StringDtype(),
'rtos_of_operation': pd.StringDtype(),
'saidi_w_major_event_dats_minus_loss_of_service_minutes': float,
'saidi_w_major_event_days_minutes': float,
'saidi_wo_major_event_days_minutes': float,
'saifi_w_major_event_days_customers': float,
'saifi_w_major_event_days_minus_loss_of_service_customers': float,
'saifi_wo_major_event_days_customers': float,
'sales_for_resale': float,
'sales_for_resale_mwh': float,
'sales_mwh': float,
'sales_revenue': float,
'sales_to_ultimate_consumers_mwh': float,
'secondary_transportation_mode_code': pd.StringDtype(),
'sector_id': pd.Int64Dtype(),
'sector_name': pd.StringDtype(),
'service_area': pd.StringDtype(),
'service_type': pd.CategoricalDtype(categories=[
"bundled", "energy", "delivery",
]),
'short_form': pd.BooleanDtype(),
'sold_to_utility_mwh': float,
'solid_fuel_gasification': pd.BooleanDtype(),
'data_source': pd.StringDtype(),
'standard': pd.CategoricalDtype(categories=RELIABILITY_STANDARDS),
'startup_source_code_1': pd.StringDtype(),
'startup_source_code_2': pd.StringDtype(),
'startup_source_code_3': pd.StringDtype(),
'startup_source_code_4': pd.StringDtype(),
'state': pd.StringDtype(),
'state_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'street_address': pd.StringDtype(),
'stoker_tech': pd.BooleanDtype(),
'storage_capacity_mw': float,
'storage_customers': pd.Int64Dtype(),
'subcritical_tech': pd.BooleanDtype(),
'sulfur_content_pct': float,
'summer_capacity_mw': float,
'summer_capacity_estimate': pd.BooleanDtype(),
# TODO: check if there is any data pre-2016
'summer_estimated_capability_mw': float,
'summer_peak_demand_mw': float,
'supercritical_tech': pd.BooleanDtype(),
'supplier_name': pd.StringDtype(),
'switch_oil_gas': pd.BooleanDtype(),
'syncronized_transmission_grid': pd.BooleanDtype(),
# Added by AES for NM & DG tech table (might want to consider merging with another fuel label)
'tech_class': pd.CategoricalDtype(categories=TECH_CLASSES),
'technology_description': pd.StringDtype(),
'time_cold_shutdown_full_load_code': pd.StringDtype(),
'time_of_use_pricing_program': pd.BooleanDtype(),
'time_responsive_programs': pd.BooleanDtype(),
'time_responsiveness_customers': pd.Int64Dtype(),
'timezone': pd.StringDtype(),
'topping_bottoming_code': pd.StringDtype(),
'total': float,
'total_capacity_less_1_mw': float,
'total_meters': pd.Int64Dtype(),
'total_disposition_mwh': float,
'total_energy_losses_mwh': float,
'total_sources_mwh': float,
'transmission': float,
'transmission_activity': pd.BooleanDtype(),
'transmission_by_other_losses_mwh': float,
'transmission_distribution_owner_id': pd.Int64Dtype(),
'transmission_distribution_owner_name': pd.StringDtype(),
'transmission_distribution_owner_state': pd.StringDtype(),
'turbines_inverters_hydrokinetics': float,
'turbines_num': pd.Int64Dtype(), # TODO: check if any turbines show up pre-2016
'ultrasupercritical_tech': pd.BooleanDtype(),
'unbundled_revenues': float,
'unit_id_eia': pd.StringDtype(),
'unit_id_pudl': pd.Int64Dtype(),
'uprate_derate_completed_date': 'datetime64[ns]',
'uprate_derate_during_year': pd.BooleanDtype(),
'utility_id_eia': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'utility_name_eia': pd.StringDtype(),
'utility_owned_capacity_mw': float, # Added by AES for NNM table
'variable_peak_pricing_program': pd.BooleanDtype(), # Added by AES for DP table
'virtual_capacity_mw': float, # Added by AES for NM table
'virtual_customers': pd.Int64Dtype(), # Added by AES for NM table
'water_heater': pd.Int64Dtype(), # Added by AES for DR table
'water_source': pd.StringDtype(),
'weighted_average_life_years': float,
'wheeled_power_delivered_mwh': float,
'wheeled_power_recieved_mwh': float,
'wholesale_marketing_activity': pd.BooleanDtype(),
'wholesale_power_purchases_mwh': float,
'winter_capacity_mw': float,
'winter_capacity_estimate': pd.BooleanDtype(),
'winter_estimated_capability_mw': float,
'winter_peak_demand_mw': float,
# 'with_med': float,
# 'with_med_minus_los': float,
# 'without_med': float,
'zip_code': pd.StringDtype(),
'zip_code_4': pd.StringDtype()
},
'depreciation': {
'utility_id_ferc1': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
# 'plant_name': pd.StringDtype(),
'note': pd.StringDtype(),
'report_year': int,
'report_date': 'datetime64[ns]',
'common': pd.BooleanDtype(),
'plant_balance': float,
'book_reserve': float,
'unaccrued_balance': float,
'reserve_pct': float,
# 'survivor_curve_type': pd.StringDtype(),
'service_life_avg': float,
'net_salvage_pct': float,
'net_salvage_rate_type_pct': pd.BooleanDtype(),
'net_removal': float,
'net_removal_pct': float,
'remaining_life_avg': float,
# 'retirement_date': 'datetime64[ns]',
'depreciation_annual_epxns': float,
'depreciation_annual_pct': float,
'depreciation_annual_rate_type_pct': pd.BooleanDtype(),
# 'data_source': pd.StringDtype(),
}
}
| mit |
karthikvadla16/spark-tk | regression-tests/sparktkregtests/testcases/models/random_forest_regressor_test.py | 10 | 7504 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests the random forest functionality """
import unittest
from sparktkregtests.lib import sparktk_test
class RandomForest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build the required frame"""
super(RandomForest, self).setUp()
schema = [("feat1", int), ("feat2", int), ("class", float)]
filename = self.get_file("rand_forest_class.csv")
self.frame = self.context.frame.import_csv(filename, schema=schema)
def test_train(self):
"""Test random forest train method"""
model = self.context.models.regression.random_forest_regressor.train(
self.frame, ["feat1", "feat2"], "class", seed=0)
self.assertItemsEqual(model.observation_columns, ["feat1", "feat2"])
self.assertEqual(model.label_column, "class")
self.assertEqual(model.max_bins, 100)
self.assertEqual(model.max_depth, 4)
self.assertEqual(model.num_trees, 1)
self.assertEqual(model.impurity, "variance")
self.assertEqual(model.min_instances_per_node, 1)
self.assertEqual(model.feature_subset_category, "auto")
self.assertEqual(model.seed, 0)
self.assertAlmostEqual(model.sub_sampling_rate, 1.0)
self.assertIsNone(model.categorical_features_info)
def test_predict(self):
"""Test predicted values are correct"""
model = self.context.models.regression.random_forest_regressor.train(
self.frame, ["feat1", "feat2"], "class", seed=0)
result_frame = model.predict(self.frame)
preddf = result_frame.to_pandas(self.frame.count())
for index, row in preddf.iterrows():
self.assertAlmostEqual(row['class'], row['predicted_value'])
def test_negative_seed(self):
"""Test training with negative seed does not throw exception"""
model = self.context.models.regression.random_forest_regressor.train(
self.frame, ["feat1", "feat2"], "class", seed=-10)
def test_bad_class_col_name(self):
"""Negative test to check behavior for bad class column"""
with self.assertRaisesRegexp(
Exception, "Invalid column name ERR provided"):
model = self.context.models.regression.random_forest_regressor.train(
self.frame, ["feat1", "feat2"], "ERR")
def test_bad_feature_col_name(self):
"""Negative test to check behavior for feature column"""
with self.assertRaisesRegexp(
Exception, ".*Invalid column name ERR provided"):
model = self.context.models.regression.random_forest_regressor.train(
self.frame, ["ERR", "feat2"], "class")
def test_invalid_impurity(self):
"""Negative test for invalid impurity value"""
with self.assertRaisesRegexp(
Exception, "Only variance *"):
model = self.context.models.regression.random_forest_regressor.train(
self.frame, ["feat1", "feat2"], "class", impurity="gini")
def test_negative_max_bins(self):
"""Negative test for max_bins < 0"""
with self.assertRaisesRegexp(
Exception, "Found max_bins = -1. Expected non-negative integer."):
model = self.context.models.regression.random_forest_regressor.train(
self.frame, ["feat1", "feat2"], "class", max_bins=-1)
def test_max_bins_0(self):
"""Test for max_bins = 0; should not throw exception"""
with self.assertRaisesRegexp(
Exception,
"maxBins must be greater than 0"):
model = self.context.models.regression.random_forest_regressor.train(
self.frame, ["feat1", "feat2"], "class", max_bins=0)
def test_negative_max_depth(self):
"""Negative test for max_depth < 0"""
with self.assertRaisesRegexp(
Exception, "Found max_depth = -2. Expected non-negative integer."):
model = self.context.models.regression.random_forest_regressor.train(
self.frame, ["feat1", "feat2"], "class", max_depth=-2)
def test_max_depth_0(self):
"""Negative test for max_depth=0"""
model = self.context.models.regression.random_forest_regressor.train(
self.frame, ["feat1", "feat2"], "class", max_depth=0)
#check predicted values for depth 0
result_frame = model.predict(self.frame)
preddf = result_frame.to_pandas(self.frame.count())
expected_pred_labels = [0.41299999999999998]*self.frame.count()
actual_pred_labels = preddf['predicted_value'].tolist()
self.assertItemsEqual(actual_pred_labels, expected_pred_labels)
def test_negative_num_trees(self):
"""Negative test for num_trees<0"""
with self.assertRaisesRegexp(
Exception, "Found num_trees = -10. Expected non-negative integer."):
model = self.context.models.regression.random_forest_regressor.train(
self.frame, ["feat1", "feat2"], "class", num_trees=-10)
def test_num_trees_0(self):
"""Negative test for num_trees=0"""
with self.assertRaisesRegexp(
Exception, "numTrees must be greater than 0"):
model = self.context.models.regression.random_forest_regressor.train(
self.frame, ["feat1", "feat2"], "class", num_trees=0)
def test_invalid_feature_subset_category(self):
"""Negative test for feature subset category"""
with self.assertRaisesRegexp(
Exception, "feature subset category"):
model = self.context.models.regression.random_forest_regressor.train(
self.frame, ["feat1", "feat2"], "class",
feature_subset_category="any")
def test_rand_forest_save(self):
"""Test save plugin"""
model = self.context.models.regression.random_forest_regressor.train(
self.frame, ["feat1", "feat2"], "class", seed=0)
path = self.get_name("test")
model.save(path + "/randomforestregressor")
restored = self.context.load(path +"/randomforestregressor")
self.assertItemsEqual(restored.observation_columns, ["feat1", "feat2"])
self.assertEqual(restored.label_column, "class")
self.assertEqual(restored.max_bins, 100)
self.assertEqual(restored.max_depth, 4)
self.assertEqual(restored.num_trees, 1)
self.assertEqual(restored.impurity, "variance")
self.assertEqual(restored.min_instances_per_node, 1)
self.assertEqual(restored.feature_subset_category, "auto")
self.assertEqual(restored.seed, 0)
self.assertAlmostEqual(restored.sub_sampling_rate, 1.0)
self.assertIsNone(restored.categorical_features_info)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
massmutual/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
siutanwong/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
degoldschmidt/pytrack-analysis | examples/oldstuff/broken.py | 1 | 4006 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def dfx(_ax, x, d):
x0 = _ax.get_xlim()[0]
x1 = _ax.get_xlim()[1]
xs = x1-x0
o = (x-x0)/(x1-x0)
return (o-d,o+d)
def dfy(_ax, y, d):
y0 = _ax.get_ylim()[0]
y1 = _ax.get_ylim()[1]
ys = y1-y0
o = (y-y0)/(y1-y0)
return (o-d,o+d)
def break_the_ax(f, _ax, break_at=[], scale=[1, 1]):
if type(break_at) is int or type(break_at) is float:
break0 = break_at
break1 = break_at
elif type(break_at) is list:
if len(break_at) == 0:
return None
elif len(break_at) == 1:
break0 = break_at[0]
break1 = break_at[0]
else:
break0 = break_at[0]
break1 = break_at[1]
else:
return None
start = _ax.get_ylim()[0]
end = _ax.get_ylim()[1]
ratio0 = int((break0-start)/scale[0])
ratio1 = int((end-break1)/scale[1])
perc = 100*(ratio1/ratio0)
ax_old = _ax
divider = make_axes_locatable(ax_old)
print(str(perc)+"%")
ax_new = divider.new_vertical(size=str(perc)+"%", pad=0.1)
f.add_axes(ax_new)
ax_new.spines['right'].set_visible(ax_old.spines['right'].get_visible())
ax_new.spines['top'].set_visible(ax_old.spines['top'].get_visible())
ax_old.set_ylim(start, break0)
ax_old.spines['top'].set_visible(False)
ax_new.set_ylim(break1, end)
ax_new.tick_params(bottom="off", labelbottom='off')
ax_new.spines['bottom'].set_visible(False)
for each_line in ax_old.get_lines():
x,y = each_line.get_data()
ax_new.plot(x, y)
if y[0] <= break0:
curax = 0
if y[0] >= break1:
curax = 1
pts = []
for i, allx in enumerate(x[1:]):
if y[i] <= break0:
if curax == 1:
pts.append([x[i], y[i]])
curax = 0
if y[i] >= break1:
if curax == 0:
pts.append([x[i-1], y[i-1]])
curax = 1
# From https://matplotlib.org/examples/pylab_examples/broken_axis.html
d0 = .01*scale[0] # how big to make the diagonal lines in axes coordinates
d1 = .01*scale[1]*(ratio1/ratio0)
dx = .01
# arguments to pass to plot, just so we don't keep repeating them
d = d1
kwargs = dict(transform=ax_new.transAxes, color='k', clip_on=False)
ax_new.plot((-dx, +dx), (-d, +d), **kwargs) # top-left diagonal
if ax_old.spines['right'].get_visible():
ax_new.plot((1 - dx, 1 + dx), (-d, +d), **kwargs) # top-right diagonal
d = d0
kwargs.update(transform=ax_old.transAxes) # switch to the bottom axes
ax_old.plot((-dx, +dx), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
if ax_old.spines['right'].get_visible():
ax_old.plot((1 - dx, 1 + dx), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
for ps in pts:
ax_old.plot( dfx(ax_old, ps[0], dx), dfy(ax_old, break0, d0), 'k', clip_on=False, transform=ax_old.transAxes) # top-right diagonal
ax_new.plot( dfx(ax_new, ps[0], dx), dfy(ax_new, break1, d1), 'k', clip_on=False, transform=ax_new.transAxes) # top-right diagonal
return [ax_old, ax_new]
"""
Random data generation
"""
np.random.seed(42)
N = 100
x = np.linspace(0,10,num=N)
A = 10
listA = []
even = np.arange(0,10,2).astype(int)
for i in range(A):
if i in even:
listA.append(np.random.uniform(0, 1, int(N/A)))
else:
listA.append(np.random.uniform(20, 25, int(N/A)))
y = np.concatenate(listA)
y2 = np.random.uniform(0, 1, N)
"""
Actual plotting
"""
f, axes = plt.subplots(nrows=2)
axes[0].plot(x, y)
axes[0].spines['right'].set_visible(False)
axes[0].spines['top'].set_visible(False)
axes[0].set_ylim([0,25])
axes[0] = break_the_ax(f, axes[0], break_at=6, scale=[1, 5])
axes[0][1].set_yticks([10,15,20,25])
axes[0][0].set_yticks([0,1,2,3,4,5])
#create bottom subplot as usual
axes[1].plot(x, y2)
plt.show()
| gpl-3.0 |
boland1992/seissuite_iran | seissuite/sort_later/dist_gen.py | 2 | 10450 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 09:12:49 2015
@author: boland
"""
import sys
sys.path.append("/home/boland/Anaconda/lib/python2.7/site-packages")
import pickle
import fiona
import seaborn as sns
from shapely import geometry
import numpy as np
import matplotlib.pyplot as plt
from scipy.cluster.vq import kmeans
import multiprocessing as mp
import pyproj
from matplotlib.colors import ColorConverter
import os
from matplotlib.colors import LinearSegmentedColormap
import itertools
import datetime
import pointshape as ps
from numba import jit
from math import sqrt, atan2, radians,degrees, cos, tan, sin, asin, acos
shape_path = "/home/boland/Dropbox/University/UniMelb/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
N = 130
#enter km spacing between path density points
km_points = 10.0
# reference elipsoid to calculate distance
wgs84 = pyproj.Geod(ellps='WGS84')
#wgs84 = pyproj.Geod(ellps='sphere')
#lat lon coordinates of random points generated within set shape file
coords = ps.points_in_shape(shape_path, N)
def dist(lons1, lats1, lons2, lats2):
"""
Returns an array of geodetic distance(s) in km between
points (lon1, lat1) and (lon2, lat2)
"""
_, _, d = wgs84.inv(lons1=lons1, lats1=lats1, lons2=lons2, lats2=lats2)
return np.array(d) / 1000.0
def new_dist(coordinates):
"""
Returns an array of geodetic distance(s) in km between
points (lon1, lat1) and (lon2, lat2)
"""
lons1=coordinates[0]
lats1=coordinates[1]
lons2=coordinates[2]
lats2=coordinates[3]
_, _, d = wgs84.inv(lons1=lons1, lats1=lats1, lons2=lons2, lats2=lats2)
return np.array(d) / 1000.0
from math import radians, cos, sin, asin, sqrt
def haversine(coordinates):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
lon1=coordinates[0]
lat1=coordinates[1]
lon2=coordinates[2]
lat2=coordinates[3]
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
def geodesic(coord1, coord2, npts):
"""
Returns a list of *npts* points along the geodesic between
(and including) *coord1* and *coord2*, in an array of
shape (*npts*, 2).
@rtype: L{ndarray}
"""
if npts < 2:
raise Exception('nb of points must be at least 2')
path = wgs84.npts(lon1=coord1[0], lat1=coord1[1],
lon2=coord2[0], lat2=coord2[1],
npts=npts-2)
return np.array([coord1] + path + [coord2])
def new_geodesic(lon1,lat1,lon2,lat2, npts):
"""
Returns a list of *npts* points along the geodesic between
(and including) *coord1* and *coord2*, in an array of
shape (*npts*, 2).
@rtype: L{ndarray}
"""
if npts < 2:
raise Exception('nb of points must be at least 2')
path = wgs84.npts(lon1=lon1, lat1=lat1,
lon2=lon2, lat2=lat2,
npts=npts-2)
return np.array([[lon1,lat1]] + path + [[lon2,lat2]])
def cluster_points(coord_points):
"""
Function that returns k which is an nx2 matrix of lon-lat vector columns
containing the optimal cluster centroid spacings within a large set of random
numbers e.g. those produced by the many_points() function above!
"""
k = kmeans(coord_points, 130)
return k[0]
lons1 = coords[:,0]; lats1 = coords[:,1]
lons2 = coords[:,0]; lats2 = coords[:,1]
lonmin = np.floor(min(lons1))
latmin = np.floor(min(lats1))
dists = []
coords1 = []
coords2 = []
t0 = datetime.datetime.now()
coords11 = [coord1 for coord1 in coords for coord2 in coords]
coords22 = [coord2 for coord1 in coords for coord2 in coords]
dist_list = [dist(coord1[0], coord1[1],coord2[0],coord2[1]) \
for coord1 in coords for coord2 in coords]
t1 = datetime.datetime.now()
print "dist list comprehension", t1-t0
t0 = datetime.datetime.now()
coords11 = [coord1 for coord1 in coords for coord2 in coords]
coords22 = [coord2 for coord1 in coords for coord2 in coords]
columns = np.column_stack((coords11, coords22))
t0 = datetime.datetime.now()
dist_list = map(new_dist, columns)
t1 = datetime.datetime.now()
print "map dists", t1-t0
t0 = datetime.datetime.now()
coords11 = [coord1 for coord1 in coords for coord2 in coords]
coords22 = [coord2 for coord1 in coords for coord2 in coords]
columns = np.column_stack((coords11, coords22))
t0 = datetime.datetime.now()
dist_list = map(haversine, columns)
t1 = datetime.datetime.now()
print "haversine", t1-t0
@jit
def distances(lats1, lats2):
for index, item in enumerate(lats1):
lat1 = item
lon1 = lons1[index]
for index2, item2 in enumerate(lats2):
lat2 = item2
lon2 = lons2[index2]
dists.append(dist(lon1, lat1, lon2, lat2))
coords1.append([lon1,lat1])
coords2.append([lon2,lat2])
return dists, coords1, coords2
t0 = datetime.datetime.now()
dists, coords1, coords2 = distances(lats1, lats2)
t1 = datetime.datetime.now()
print t1-t0
t0 = datetime.datetime.now()
path_info = zip(coords1,coords2, dists)
#path_info = np.column_stack((coords1, coords2, dists))
#del dists; del coords1; del coords2
def create_paths(path_point, km=km_points):
coord1 = path_point[0]
coord2 = path_point[1]
dist = path_point[2]
# interpoint distance <= 1 km, and nb of points >= 100
npts = max(int((np.ceil(dist) + 1)/km), 100)
path = geodesic(coord1, coord2, npts)
#print("still going strong\n")
return path
#parallise the generation of path points for SPEED!
pool = mp.Pool()
paths = pool.map(create_paths, path_info)
pool.close()
pool.join()
t1 = datetime.datetime.now()
print 'paths parallel create_paths', t1-t0
del path_info
def new_paths(path_info, km=km_points):
lon1, lat1, lon2, lat2, dist = path_info[0], path_info[1], path_info[2], path_info[3], path_info[4]
# interpoint distance <= 1 km, and nb of points >= 100
npts = max(int((np.ceil(dist) + 1)/km), 100)
path = new_geodesic(lon1,lat1,lon2,lat2, npts)
#print("still going strong\n")
return path
#parallise the generation of path points for SPEED!
t0 = datetime.datetime.now()
path_info = np.column_stack((coords1, coords2, dists))
pool = mp.Pool()
paths = pool.map(new_paths, path_info)
pool.close()
pool.join()
t1 = datetime.datetime.now()
print 'paths parallel new_paths', t1-t0
del path_info
#plt.figure()
#plt.scatter(paths[1][:,0], paths[1][:,1])
#plt.show()
def points_curve(path_info, km=km_points):
"""
Function that returns N equidistance lat lon coordinates
along the great-circle line between the two lat lon coordinates.
"""
lon1 = path_info[0]
lat1 = path_info[1]
lon2 = path_info[2]
lat2 = path_info[3]
dist = path_info[4]
lat_dif = lat2-lat1
lon_dif = lon2-lon1
npts = max(int((np.ceil(dist) + 1)/km), 100)
lat_op = (lat_dif)/npts
lon_op = (lon_dif)/npts
nums = np.arange(1,npts+1,1)
latn = np.add(lat1, np.multiply(lat_op,nums))
lonn = np.add(lon1, np.multiply(lon_op,nums))
#latn = lat1 + lat_op * nums#[lat1 + n * lat_op for n in nums]
#lonn = lon1 + lon_op * nums #[lon1 + n * lon_op for n in nums]
path = np.column_stack((lonn,latn))
return path
t0 = datetime.datetime.now()
path_info = np.column_stack((coords1, coords2, dists))
pool = mp.Pool()
paths = pool.map(points_curve, path_info)
pool.close()
pool.join()
t1 = datetime.datetime.now()
print 'paths parallel points_curve', t1-t0
del path_info
#plt.figure()
#plt.scatter(paths[1][:,0], paths[1][:,1])
#plt.show()
#from geographiclib.geodesic import Geodesic
#number_points = 10
#gd = Geodesic.WGS84.Inverse(35, 0, 35, 90)
#line = Geodesic.WGS84.Line(gd['lat1'], gd['lon1'], gd['azi1'])
#for i in range(number_points + 1):
# point = line.Position(gd['s12'] / number_points * i)
# print((point['lat2'], point['lon2']))
#t0 = datetime.datetime.now()
#paths_info = np.column_stack((coords1, coords2, dists))
#paths = map(new_paths, paths_info)
#t1 = datetime.datetime.now()
#print 'paths map', t1-t0
def latitude(dist, sigma01, alpha0, lon0):
sigma = sigma01 + dist#/R
lat = degrees(asin(cos(alpha0)*sin(sigma)))
#alpha = atan2(tan(alpha0),cos(sigma))
return lat
def longitude(dist, sigma01, alpha0, lon0):
sigma = sigma01 + dist#/R
lon = degrees(atan2(sin(alpha0)*sin(sigma), cos(sigma))) + degrees(lon0)
#alpha = atan2(tan(alpha0),cos(sigma))
return lon
vlat_func = np.vectorize(latitude)
vlon_func = np.vectorize(longitude)
def waypoint_init(path_info, km=km_points):
R = 6371
lon1, lat1, lon2, lat2, dist = radians(path_info[0]), \
radians(path_info[1]), radians(path_info[2]), \
radians(path_info[3]), radians(path_info[4])
#lon1, lat1, lon2, lat2, dist = map(radians, [path_info[0],path_info[1],path_info[2],path_info[3],path_info[4]])
lon_diff = lon2-lon1
alpha1 = atan2(sin(lon_diff),(cos(lat1)*tan(lat2)-sin(lat1)*cos(lon_diff)))
#alpha2 = atan2(sin(lon_diff),(-cos(lat2)*tan(lat1)+sin(lat2)*cos(lon_diff)))
#try:
#sigma12 = acos(sin(lat1)*sin(lat2)+cos(lat1)*cos(lat2)*cos(lon_diff))
#except:
#return
sigma01, alpha0 = atan2(tan(lat1), cos(alpha1)), asin(sin(alpha1)*cos(lat1))
#sigma02 = sigma01+sigma12
lon01 = atan2(sin(alpha0)*sin(sigma01), cos(sigma01))
lon0 = lon1 - lon01
npts = max(int((np.ceil(dist) + 1)/km), 100)
all_d = np.linspace(0,dist,npts)/R
lons, lats = vlon_func(all_d, sigma01, alpha0, lon0), vlat_func(all_d, sigma01, alpha0, lon0)
return np.column_stack((lons, lats))
t0 = datetime.datetime.now()
path_info = np.column_stack((coords1, coords2, dists))
pool = mp.Pool()
paths = pool.map(waypoint_init, path_info)
pool.close()
pool.join()
t1 = datetime.datetime.now()
print "waypoints", t1-t0
#plt.figure()
#plt.scatter(coords[:,0], coords[:,1])
#plt.show()
#parallise the generation of path points for SPEED!
| gpl-3.0 |
davidgbe/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
OSCAAR/OSCAAR | oscaar/extras/eph/calculateEphemerides.py | 2 | 37621 | '''
Ephemeris calculating tool that uses transit data from exoplanets.org
and astrometric calculations by PyEphem to tell you what transits you'll
be able to observe from your observatory in the near future.
Exoplanets.org citation: Wright et al.2011
http://arxiv.org/pdf/1012.5676v3.pdf
Core developer: Brett Morris
'''
import ephem ## PyEphem module
import numpy as np
#from ephemeris import gd2jd, jd2gd
from matplotlib import pyplot as plt
from glob import glob
from os import getcwd, sep
from time import time
import os.path
import oscaar
import sys
def calculateEphemerides(parFile):
'''
:INPUTS:
parFile -- path to the parameter file
'''
#parFile = 'umo.par'
'''Parse the observatory .par file'''
parFileText = open(os.path.join(os.path.dirname(oscaar.__file__),'extras','eph','observatories',parFile),'r').read().splitlines()
def returnBool(value):
'''Return booleans from strings'''
if value.upper().strip() == 'TRUE': return True
elif value.upper().strip() == 'FALSE': return False
if hasattr(sys, 'real_prefix'):
show_lt = float(0)
for line in parFileText:
parameter = line.split(':')[0]
if len(line.split(':')) > 1:
value = line.split(':')[1].strip()
if parameter == 'name': observatory_name = value
elif parameter == 'latitude': observatory_latitude = value
elif parameter == 'longitude': observatory_longitude = value
elif parameter == 'elevation': observatory_elevation = float(value)
elif parameter == 'temperature': observatory_temperature = float(value)
elif parameter == 'min_horizon': observatory_minHorizon = value
elif parameter == 'start_date': startSem = gd2jd(eval(value))
elif parameter == 'end_date': endSem = gd2jd(eval(value))
elif parameter == 'mag_limit': mag_limit = float(value)
elif parameter == 'band': band = value
elif parameter == 'depth_limit': depth_limit = float(value)
elif parameter == 'calc_transits': calcTransits = returnBool(value)
elif parameter == 'calc_eclipses': calcEclipses = returnBool(value)
elif parameter == 'html_out': htmlOut = returnBool(value)
elif parameter == 'text_out': textOut = returnBool(value)
elif parameter == 'twilight': twilightType = value
elif parameter == 'show_lt': show_lt = float(value)
from oscaar.extras.knownSystemParameters import getLatestParams
exoplanetDB = getLatestParams.downloadAndPickle()
''' Set up observatory parameters '''
observatory = ephem.Observer()
observatory.lat = observatory_latitude#'38:58:50.16' ## Input format- deg:min:sec (type=str)
observatory.long = observatory_longitude#'-76:56:13.92' ## Input format- deg:min:sec (type=str)
observatory.elevation = observatory_elevation # m
observatory.temp = observatory_temperature ## Celsius
observatory.horizon = observatory_minHorizon ## Input format- deg:min:sec (type=str)
def trunc(f, n):
'''Truncates a float f to n decimal places without rounding'''
slen = len('%.*f' % (n, f))
return str(f)[:slen]
def RA(planet):
'''Type: str, Units: hours:min:sec'''
return exoplanetDB[planet]['RA_STRING']
def dec(planet):
'''Type: str, Units: deg:min:sec'''
return exoplanetDB[planet]['DEC_STRING']
def period(planet):
'''Units: days'''
return np.float64(exoplanetDB[planet]['PER'])
def epoch(planet):
'''Tc at mid-transit. Units: days'''
if exoplanetDB[planet]['TT'] == '': return 0.0
else: return np.float64(exoplanetDB[planet]['TT'])
def duration(planet):
'''Transit/eclipse duration. Units: days'''
if exoplanetDB[planet]['T14'] == '': return 0.0
else: return float(exoplanetDB[planet]['T14'])
def V(planet):
'''V mag'''
if exoplanetDB[planet]['V'] == '': return 0.0
else: return float(exoplanetDB[planet]['V'])
def KS(planet):
'''KS mag'''
if exoplanetDB[planet]['KS'] == '': return 0.0
else: return float(exoplanetDB[planet]['KS'])
def bandMagnitude(planet):
if band.upper() == 'V':
return V(planet)
elif band.upper() == 'K':
return KS(planet)
def depth(planet):
'''Transit depth'''
if exoplanetDB[planet]['DEPTH'] == '': return 0.0
else: return float(exoplanetDB[planet]['DEPTH'])
def transitBool(planet):
'''True if exoplanet is transiting, False if detected by other means'''
if exoplanetDB[planet]['TRANSIT'] == '0': return 0
elif exoplanetDB[planet]['TRANSIT'] == '1': return 1
########################################################################################
########################################################################################
def datestr2list(datestr):
''' Take strings of the form: "2013/1/18 20:08:18" and return them as a
tuple of the same parameters'''
year,month,others = datestr.split('/')
day, time = others.split(' ')
hour,minute,sec = time.split(':')
return (int(year),int(month),int(day),int(hour),int(minute),int(sec))
def list2datestr(inList):
'''Converse function to datestr2list'''
inList = map(str,inList)
return inList[0]+'/'+inList[1]+'/'+inList[2]+' '+inList[3].zfill(2)+':'+inList[4].zfill(2)+':'+inList[5].zfill(2)
def list2datestrCSV(inList):
'''Converse function to datestr2list'''
inList = map(str,inList)
print inList
return inList[0]+'/'+inList[1]+'/'+inList[2]+','+inList[3].zfill(2)+':'+inList[4].zfill(2)+':'+inList[5].zfill(2)
def list2datestrHTML(inList,alt,direction):
'''Converse function to datestr2list'''
inList = map(str,inList)
#return inList[1].zfill(2)+'/'+inList[2].zfill(2)+'<br />'+inList[3].zfill(2)+':'+inList[4].zfill(2)
return inList[1].zfill(2)+'/<strong>'+inList[2].zfill(2)+'</strong>, '+inList[3].zfill(2)+':'+inList[4].split('.')[0].zfill(2)+'<br /> '+alt+'° '+direction
def list2datestrHTML_UTnoaltdir(inList,alt,direction):
'''Converse function to datestr2list'''
inList = map(str,inList)
#return inList[1].zfill(2)+'/'+inList[2].zfill(2)+'<br />'+inList[3].zfill(2)+':'+inList[4].zfill(2)
return inList[1].zfill(2)+'/<strong>'+inList[2].zfill(2)+'</strong>, '+inList[3].zfill(2)+':'+inList[4].split('.')[0].zfill(2)
def list2datestrHTML_LT(inList,alt,direction):
'''Converse function to datestr2list for daylight savings time'''
#print "original",inList
tempDate = ephem.Date(inList)
inList = ephem.Date(ephem.localtime(tempDate)).tuple()
#print "converted",lt_inList,'\n'
inList = map(str,inList)
#return inList[1].zfill(2)+'/'+inList[2].zfill(2)+'<br />'+inList[3].zfill(2)+':'+inList[4].zfill(2)
return inList[1].zfill(2)+'/<strong>'+inList[2].zfill(2)+'</strong>, '+inList[3].zfill(2)+':'+inList[4].split('.')[0].zfill(2)+'<br /> '+alt+'° '+direction
def simbadURL(planet):
if exoplanetDB[planet]['SIMBADURL'] == '': return 'http://simbad.harvard.edu/simbad/'
else: return exoplanetDB[planet]['SIMBADURL']
def RADecHTML(planet):
return '<a href="'+simbadURL(planet)+'">'+RA(planet).split('.')[0]+'<br />'+dec(planet).split('.')[0]+'</a>'
def constellation(planet):
return exoplanetDB[planet]['Constellation']
def orbitReference(planet):
return exoplanetDB[planet]['TRANSITURL']
def orbitReferenceYear(planet):
'''ORBREF returns the citation in the format "<first author> <year>", so parse and return just the year'''
return exoplanetDB[planet]['ORBREF'].split()[1]
def nameWithLink(planet):
return '<a href="'+orbitReference(planet)+'">'+planet+'</a>'
def mass(planet):
if exoplanetDB[planet]['MASS'] == '': return '---'
else: return trunc(float(exoplanetDB[planet]['MASS']),2)
def semimajorAxis(planet):
#return trunc(0.004649*float(exoplanetDB[planet]['AR'])*float(exoplanetDB[planet]['RSTAR']),3) ## Convert from solar radii to AU
return trunc(float(exoplanetDB[planet]['SEP']),3)
def radius(planet):
if exoplanetDB[planet]['R'] == '': return '---'
else: return trunc(float(exoplanetDB[planet]['R']),2) ## Convert from solar radii to Jupiter radii
def midTransit(Tc, P, start, end):
'''Calculate mid-transits between Julian Dates start and end, using a 2500
orbital phase kernel since T_c (for 2 day period, 2500 phases is 14 years)
'''
Nepochs = np.arange(0,2500,dtype=np.float64)
transitTimes = Tc + P*Nepochs
transitTimesInSem = transitTimes[(transitTimes < end)*(transitTimes > start)]
return transitTimesInSem
def midEclipse(Tc, P, start, end):
'''Calculate mid-eclipses between Julian Dates start and end, using a 2500
orbital phase kernel since T_c (for 2 day period, 2500 phases is 14 years)
'''
Nepochs = np.arange(0,2500,dtype=np.float64)
transitTimes = Tc + P*(0.5 + Nepochs)
transitTimesInSem = transitTimes[(transitTimes < end)*(transitTimes > start)]
return transitTimesInSem
'''Choose which planets from the database to include in the search,
assemble a list of them.'''
planets = []
for planet in exoplanetDB:
if bandMagnitude(planet) != 0.0 and depth(planet) != 0.0 and float(bandMagnitude(planet)) <= mag_limit and \
float(depth(planet)) >= depth_limit and transitBool(planet):
planets.append(planet)
if calcTransits: transits = {}
if calcEclipses: eclipses = {}
for day in np.arange(startSem,endSem+1):
if calcTransits: transits[str(day)] = []
if calcEclipses: eclipses[str(day)] = []
planetsNeverUp = []
def azToDirection(az):
az = float(az)
if (az >= 0 and az < 22.5) or (az >= 337.5 and az < 360): return 'N'
elif az >= 22.5 and az < 67.5: return 'NE'
elif az >= 67.5 and az < 112.5: return 'E'
elif az >= 112.5 and az < 157.5: return 'SE'
elif az >= 157.5 and az < 202.5: return 'S'
elif az >= 202.5 and az < 247.5: return 'SW'
elif az >= 247.5 and az < 292.5: return 'W'
elif az >= 292.5 and az < 337.5: return 'NW'
def ingressEgressAltAz(planet,observatory,ingress,egress):
altitudes = []
directions = []
for time in [ingress,egress]:
observatory.date = list2datestr(jd2gd(time))
star = ephem.FixedBody()
star._ra = ephem.hours(RA(planet))
star._dec = ephem.degrees(dec(planet))
star.compute(observatory)
altitudes.append(str(ephem.degrees(star.alt)).split(":")[0])
directions.append(azToDirection(str(ephem.degrees(star.az)).split(":")[0]))
ingressAlt,egressAlt = altitudes
ingressDir,egressDir = directions
return ingressAlt,ingressDir,egressAlt,egressDir
def aboveHorizonForEvent(planet,observatory,ingress,egress):
altitudes = []
for time in [ingress,egress]:
observatory.date = list2datestr(jd2gd(time))
star = ephem.FixedBody()
star._ra = ephem.hours(RA(planet))
star._dec = ephem.degrees(dec(planet))
star.compute(observatory)
#altitudes.append(str(ephem.degrees(star.alt)).split(":")[0])
altitudes.append(float(repr(star.alt))/(2*np.pi) * 360) ## Convert altitudes to degrees
#if altitudes[0] > 0 and altitudes[1] > 0: return True
if altitudes[0] > float(ephem.degrees(observatory_minHorizon))*(180/np.pi) and altitudes[1] > float(ephem.degrees(observatory_minHorizon))*(180/np.pi): return True
else: return False
def eventAfterTwilight(planet,observatory,ingress,egress,twilightType):
altitudes = []
for time in [ingress,egress]:
observatory.date = list2datestr(jd2gd(time))
sun = ephem.Sun()
sun.compute(observatory)
altitudes.append(float(repr(sun.alt))/(2*np.pi) * 360) ## Convert altitudes to degrees
if altitudes[0] < float(twilightType) and altitudes[1] < float(twilightType): return True
else: return False
for planet in planets:
'''Compute all of the coming transits and eclipses for a long time out'''
allTransitEpochs = midTransit(epoch(planet),period(planet),startSem,endSem)
allEclipseEpochs = midEclipse(epoch(planet),period(planet),startSem,endSem)
for day in np.arange(startSem,endSem+1,1.0):
try:
'''For each day, gather the transits and eclipses that happen'''
transitEpochs = allTransitEpochs[(allTransitEpochs <= day+0.5)*(allTransitEpochs > day-0.5)]
eclipseEpochs = allEclipseEpochs[(allEclipseEpochs <= day+0.5)*(allEclipseEpochs > day-0.5)]
if calcTransits and len(transitEpochs) != 0:
transitEpoch = transitEpochs[0]
ingress = transitEpoch-duration(planet)/2
egress = transitEpoch+duration(planet)/2
''' Calculate positions of host stars'''
star = ephem.FixedBody()
star._ra = ephem.hours(RA(planet))
star._dec = ephem.degrees(dec(planet))
star.compute(observatory)
exoplanetDB[planet]['Constellation'] = ephem.constellation(star)[0]
'''If star is above horizon and sun is below horizon during transit/eclipse:'''
if aboveHorizonForEvent(planet,observatory,ingress,egress) and eventAfterTwilight(planet,observatory,ingress,egress,twilightType):
ingressAlt,ingressDir,egressAlt,egressDir = ingressEgressAltAz(planet,observatory,ingress,egress)
transitInfo = [planet,transitEpoch,duration(planet)/2,'transit',ingressAlt,ingressDir,egressAlt,egressDir]
transits[str(day)].append(transitInfo)
if calcEclipses and len(eclipseEpochs) != 0:
eclipseEpoch = eclipseEpochs[0]
ingress = eclipseEpoch-duration(planet)/2
egress = eclipseEpoch+duration(planet)/2
''' Calculate positions of host stars'''
star = ephem.FixedBody()
star._ra = ephem.hours(RA(planet))
star._dec = ephem.degrees(dec(planet))
star.compute(observatory)
exoplanetDB[planet]['Constellation'] = ephem.constellation(star)[0]
if aboveHorizonForEvent(planet,observatory,ingress,egress) and eventAfterTwilight(planet,observatory,ingress,egress,twilightType):
ingressAlt,ingressDir,egressAlt,egressDir = ingressEgressAltAz(planet,observatory,ingress,egress)
eclipseInfo = [planet,eclipseEpoch,duration(planet)/2,'eclipse',ingressAlt,ingressDir,egressAlt,egressDir]
eclipses[str(day)].append(eclipseInfo)
except ephem.NeverUpError:
if str(planet) not in planetsNeverUp:
print 'Note: planet %s is never above the horizon at this observing location.' % (planet)
planetsNeverUp.append(str(planet))
def removeEmptySets(dictionary):
'''Remove days where there were no transits/eclipses from the transit/eclipse list dictionary.
Can't iterate through the transits dictionary with a for loop because it would change length
as keys get deleted, so loop through with while loop until all entries are not empty sets'''
dayCounter = startSem
while any(dictionary[day] == [] for day in dictionary):
if dictionary[str(dayCounter)] == []:
del dictionary[str(dayCounter)]
dayCounter += 1
if calcTransits: removeEmptySets(transits)
if calcEclipses: removeEmptySets(eclipses)
events = {}
def mergeDictionaries(dict):
for key in dict:
if any(key == eventKey for eventKey in events) == False: ## If key does not exist in events,
if np.shape(dict[key])[0] == 1: ## If new event is the only one on that night, add only it
events[key] = [dict[key][0]]
else: ## If there were multiple events that night, add them each
events[key] = []
for event in dict[key]:
events[key].append(event)
else:
if np.shape(dict[key])[0] > 1: ## If there are multiple entries to append,
for event in dict[key]:
events[key].append(event)
else: ## If there is only one to add,
events[key].append(dict[key][0])
if calcTransits: mergeDictionaries(transits)
if calcEclipses: mergeDictionaries(eclipses)
if textOut:
allKeys = events.keys()
allKeys = np.array(allKeys)[np.argsort(allKeys)]
report = open(os.path.join(os.path.dirname(oscaar.__file__),'extras','eph','ephOutputs','eventReport.csv'),'w')
firstLine = 'Planet,Event,Ingress Date, Ingress Time (UT) ,Altitude at Ingress,Azimuth at Ingress,Egress Date, Egress Time (UT) ,Altitude at Egress,Azimuth at Egress,V mag,Depth,Duration,RA,Dec,Const.,Mass,Semimajor Axis (AU),Radius (R_J)\n'
report.write(firstLine)
for key in allKeys:
def writeCSVtransit():
middle = ','.join([planet[0],str(planet[3]),list2datestrCSV(jd2gd(float(planet[1]-planet[2]))),planet[4],planet[5],\
list2datestrCSV(jd2gd(float(planet[1]+planet[2]))),planet[6],planet[7],trunc(bandMagnitude(str(planet[0])),2),\
trunc(depth(planet[0]),4),trunc(24.0*duration(planet[0]),2),RA(planet[0]),dec(planet[0]),constellation(planet[0]),\
mass(planet[0]),semimajorAxis(planet[0]),radius(planet[0])])
line = middle+'\n'
report.write(line)
def writeCSVeclipse():
middle = ','.join([planet[0],str(planet[3]),list2datestrCSV(jd2gd(float(planet[1]-planet[2]))),planet[4],planet[5],\
list2datestrCSV(jd2gd(float(planet[1]+planet[2]))),planet[6],planet[7],trunc(bandMagnitude(str(planet[0])),2),\
trunc(depth(planet[0]),4),trunc(24.0*duration(planet[0]),2),RA(planet[0]),dec(planet[0]),constellation(planet[0]),\
mass(planet[0]),semimajorAxis(planet[0]),radius(planet[0])])
line = middle+'\n'
report.write(line)
if np.shape(events[key])[0] > 1:
elapsedTime = []
for i in range(1,len(events[key])):
nextPlanet = events[key][1]
planet = events[key][0]
double = False
'''If the other planet's ingress is before this one's egress, then'''
if ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]-nextPlanet[2])))) -\
ephem.Date(list2datestr(jd2gd(float(planet[1]+planet[2])))) > 0.0:
double = True
elapsedTime.append(ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]-nextPlanet[2])))) - \
ephem.Date(list2datestr(jd2gd(float(planet[1]+planet[2])))))
if ephem.Date(list2datestr(jd2gd(float(planet[1]-planet[2])))) - \
ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]+nextPlanet[2])))) > 0.0:
'''If the other planet's egress is before this one's ingress, then'''
double = True
elapsedTime.append(ephem.Date(list2datestr(jd2gd(float(planet[1]-planet[2])))) - \
ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]+nextPlanet[2])))))
for planet in events[key]:
if calcTransits and planet[3] == 'transit':
writeCSVtransit()
if calcEclipses and planet[3] == 'eclipse':
writeCSVeclipse()
elif np.shape(events[key])[0] == 1:
planet = events[key][0]
if calcTransits and planet[3] == 'transit':
writeCSVtransit()
if calcEclipses and planet[3] == 'eclipse':
writeCSVeclipse()
# report.write('\n')
report.close()
#print exoplanetDB['HD 209458 b']
print 'calculateEphemerides.py: Done'
if htmlOut:
'''Write out a text report with the transits/eclipses. Write out the time of
ingress, egress, whether event is transit/eclipse, elapsed in time between
ingress/egress of the temporally isolated events'''
report = open(os.path.join(os.path.dirname(oscaar.__file__),'extras','eph','ephOutputs','eventReport.html'),'w')
allKeys = events.keys()
## http://www.kryogenix.org/code/browser/sorttable/
htmlheader = '\n'.join([
'<!doctype html>',\
'<html>',\
' <head>',\
' <meta http-equiv="content-type" content="text/html; charset=UTF-8" />',\
' <title>Ephemeris</title>',\
' <link rel="stylesheet" href="stylesheetEphem.css" type="text/css" />',\
' <script type="text/javascript">',\
' function changeCSS(cssFile, cssLinkIndex) {',\
' var oldlink = document.getElementsByTagName("link").item(cssLinkIndex);',\
' var newlink = document.createElement("link")',\
' newlink.setAttribute("rel", "stylesheet");',\
' newlink.setAttribute("type", "text/css");',\
' newlink.setAttribute("href", cssFile);',\
' document.getElementsByTagName("head").item(0).replaceChild(newlink, oldlink);',\
' }',\
' </script>',\
' <script src="./sorttable.js"></script>',\
' </head>',\
' <body>',\
' <div id="textDiv">',\
' <h1>Ephemerides for: '+observatory_name+'</h1>',\
' <h2>Observing dates (UT): '+list2datestr(jd2gd(startSem)).split(' ')[0]+' - '+list2datestr(jd2gd(endSem)).split(' ')[0]+'</h2>'
' Click the column headers to sort. ',\
' <table class="daynight" id="eph">',\
' <tr><th colspan=2>Toggle Color Scheme</th></tr>',\
' <tr><td><a href="#" onclick="changeCSS(\'stylesheetEphem.css\', 0);">Day</a></td><td><a href="#" onclick="changeCSS(\'stylesheetEphemDark.css\', 0);">Night</a></td></tr>',\
' </table>'])
if show_lt == 0:
tableheader = '\n'.join([
'\n <table class="sortable" id="eph">',\
' <tr> <th>Planet<br /><span class="small">[Link: Orbit ref.]</span></th> <th>Event<br /><span class="small">[Transit/<br />Eclipse]</span></th> <th>Ingress <br /><span class="small">(MM/DD<br />HH:MM, UT)</span></th> <th>Egress <br /><span class="small">(MM/DD<br />HH:MM, (UT), Alt., Dir.)</span></th>'+\
'<th>'+band.upper()+'</th> <th>Depth<br />(mag)</th> <th>Duration<br />(hrs)</th> <th>RA/Dec<br /><span class="small">[Link: Simbad ref.]</span></th> <th>Const.</th> <th>Mass<br />(M<sub>J</sub>)</th>'+\
'<th>Radius<br />(R<sub>J</sub>)</th> <th>Ref. Year</th></tr>'])
else:
tableheader = '\n'.join([
'\n <table class="sortable" id="eph">',\
' <tr> <th>Planet<br /><span class="small">[Link: Orbit ref.]</span></th> <th>Event<br /><span class="small">[Transit/<br />Eclipse]</span></th> <th>Ingress <br /><span class="small">(MM/DD<br />HH:MM (LT), Alt., Dir.)</span></th> <th>Egress <br /><span class="small">(MM/DD<br />HH:MM (LT), Alt., Dir.)</span></th> '+\
'<th>'+band.upper()+'</th> <th>Depth<br />(mag)</th> <th>Duration<br />(hrs)</th> <th>RA/Dec<br /><span class="small">[Link: Simbad ref.]</span></th> <th>Const.</th> <th>Mass<br />(M<sub>J</sub>)</th>'+\
' <th>Radius<br />(R<sub>J</sub>)</th> <th>Ref. Year</th> <th>Ingress <br /><span class="small">(MM/DD<br />HH:MM (UT))</span></th> <th>Egress <br /><span class="small">(MM/DD<br />HH:MM, (UT))</span></th></tr>'])
tablefooter = '\n'.join([
'\n </table>',\
' <br /><br />',])
htmlfooter = '\n'.join([
'\n <p class="headinfo">',\
' Developed by Brett Morris with great gratitude for the help of <a href="http://rhodesmill.org/pyephem/">PyEphem</a>,<br/>',\
' and for up-to-date exoplanet parameters from <a href="http://www.exoplanets.org/">exoplanets.org</a> (<a href="http://adsabs.harvard.edu/abs/2011PASP..123..412W">Wright et al. 2011</a>).<br />',\
' </p>',\
' </div>',\
' </body>',\
'</html>'])
report.write(htmlheader)
report.write(tableheader)
allKeys = np.array(allKeys)[np.argsort(allKeys)]
for key in allKeys:
def writeHTMLtransit():
indentation = ' '
if show_lt != 0:
middle = '</td><td>'.join([nameWithLink(planet[0]),str(planet[3]),list2datestrHTML_LT(jd2gd(float(planet[1]-planet[2])),planet[4],planet[5]),\
list2datestrHTML_LT(jd2gd(float(planet[1]+planet[2])),planet[6],planet[7]),trunc(bandMagnitude(str(planet[0])),2),\
trunc(depth(planet[0]),4),trunc(24.0*duration(planet[0]),2),RADecHTML(planet[0]),constellation(planet[0]),\
mass(planet[0]),radius(planet[0]),orbitReferenceYear(planet[0]),list2datestrHTML_UTnoaltdir(jd2gd(float(planet[1]-planet[2])),planet[4],planet[5]),\
list2datestrHTML_UTnoaltdir(jd2gd(float(planet[1]+planet[2])),planet[6],planet[7])])
else:
middle = '</td><td>'.join([nameWithLink(planet[0]),str(planet[3]),list2datestrHTML(jd2gd(float(planet[1]-planet[2])),planet[4],planet[5]),\
list2datestrHTML(jd2gd(float(planet[1]+planet[2])),planet[6],planet[7]),trunc(bandMagnitude(str(planet[0])),2),\
trunc(depth(planet[0]),4),trunc(24.0*duration(planet[0]),2),RADecHTML(planet[0]),constellation(planet[0]),\
mass(planet[0]),radius(planet[0]),orbitReferenceYear(planet[0])])
line = indentation+'<tr><td>'+middle+'</td></tr>\n'
report.write(line)
def writeHTMLeclipse():
indentation = ' '
if show_lt != 0:
middle = '</td><td>'.join([nameWithLink(planet[0]),str(planet[3]),list2datestrHTML_LT(jd2gd(float(planet[1]-planet[2])),planet[4],planet[5]),\
list2datestrHTML_LT(jd2gd(float(planet[1]+planet[2])),planet[6],planet[7]),trunc(bandMagnitude(str(planet[0])),2),\
'---',trunc(24.0*duration(planet[0]),2),RADecHTML(planet[0]),constellation(planet[0]),\
mass(planet[0]),radius(planet[0]),orbitReferenceYear(planet[0]),list2datestrHTML_UTnoaltdir(jd2gd(float(planet[1]-planet[2])),planet[4],planet[5]),\
list2datestrHTML_UTnoaltdir(jd2gd(float(planet[1]+planet[2])),planet[6],planet[7])])
else:
middle = '</td><td>'.join([nameWithLink(planet[0]),str(planet[3]),list2datestrHTML(jd2gd(float(planet[1]-planet[2])),planet[4],planet[5]),\
list2datestrHTML(jd2gd(float(planet[1]+planet[2])),planet[6],planet[7]),trunc(bandMagnitude(str(planet[0])),2),\
'---',trunc(24.0*duration(planet[0]),2),RADecHTML(planet[0]),constellation(planet[0]),\
mass(planet[0]),radius(planet[0]),orbitReferenceYear(planet[0])])
line = indentation+'<tr><td>'+middle+'</td></tr>\n'
report.write(line)
if np.shape(events[key])[0] > 1:
elapsedTime = []
for i in range(1,len(events[key])):
nextPlanet = events[key][1]
planet = events[key][0]
double = False
'''If the other planet's ingress is before this one's egress, then'''
if ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]-nextPlanet[2])))) -\
ephem.Date(list2datestr(jd2gd(float(planet[1]+planet[2])))) > 0.0:
double = True
elapsedTime.append(ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]-nextPlanet[2])))) - \
ephem.Date(list2datestr(jd2gd(float(planet[1]+planet[2])))))
if ephem.Date(list2datestr(jd2gd(float(planet[1]-planet[2])))) - \
ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]+nextPlanet[2])))) > 0.0:
'''If the other planet's egress is before this one's ingress, then'''
double = True
elapsedTime.append(ephem.Date(list2datestr(jd2gd(float(planet[1]-planet[2])))) - \
ephem.Date(list2datestr(jd2gd(float(nextPlanet[1]+nextPlanet[2])))))
for planet in events[key]:
if calcTransits and planet[3] == 'transit':
writeHTMLtransit()
if calcEclipses and planet[3] == 'eclipse':
writeHTMLeclipse()
elif np.shape(events[key])[0] == 1:
planet = events[key][0]
if calcTransits and planet[3] == 'transit':
writeHTMLtransit()
if calcEclipses and planet[3] == 'eclipse':
writeHTMLeclipse()
report.write(tablefooter)
report.write(htmlfooter)
report.close()
#print exoplanetDB['HD 209458 b']
"""
Functions for handling dates.
Contains:
gd2jd -- converts gregorian date to julian date
jd2gd -- converts julian date to gregorian date
Wish list:
Function to convert heliocentric julian date!
These functions were taken from Enno Middleberg's site of useful
astronomical python references:
http://www.astro.rub.de/middelberg/python/python.html
"Feel free to download, use, modify and pass on these scripts, but
please do not remove my name from it." --E. Middleberg
"""
# 2009-02-15 13:12 IJC: Converted to importable function
def gd2jd(*date):
"""
gd2jd.py converts a UT Gregorian date to Julian date.
Usage: gd2jd.py (2009, 02, 25, 01, 59, 59)
To get the current Julian date:
import time
gd2jd(time.gmtime())
Hours, minutesutes and/or seconds can be omitted -- if so, they are
assumed to be zero.
Year and month are converted to type INT, but all others can be
type FLOAT (standard practice would suggest only the final element
of the date should be float)
"""
#print date
#print date[0]
date = date[0]
date = list(date)
if len(date)<3:
print "You must enter a date of the form (2009, 02, 25)!"
return -1
elif len(date)==3:
for ii in range(3): date.append(0)
elif len(date)==4:
for ii in range(2): date.append(0)
elif len(date)==5:
date.append(0)
yyyy = int(date[0])
mm = int(date[1])
dd = float(date[2])
hh = float(date[3])
minutes = float(date[4])
sec = float(date[5])
#print yyyy,mm,dd,hh,minutes,sec
UT=hh+minutes/60+sec/3600
#print "UT="+`UT`
total_seconds=hh*3600+minutes*60+sec
fracday=total_seconds/86400
#print "Fractional day: %f" % fracday
# print dd,mm,yyyy, hh,minutes,sec, UT
if (100*yyyy+mm-190002.5)>0:
sig=1
else:
sig=-1
JD = 367*yyyy - int(7*(yyyy+int((mm+9)/12))/4) + int(275*mm/9) + dd + 1721013.5 + UT/24 - 0.5*sig +0.5
months=["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
#print "\n"+months[mm-1]+" %i, %i, %i:%i:%i UT = JD %f" % (dd, yyyy, hh, minutes, sec, JD),
# Now calculate the fractional year. Do we have a leap year?
daylist=[31,28,31,30,31,30,31,31,30,31,30,31]
daylist2=[31,29,31,30,31,30,31,31,30,31,30,31]
if (yyyy%4 != 0):
days=daylist2
elif (yyyy%400 == 0):
days=daylist2
elif (yyyy%100 == 0):
days=daylist
else:
days=daylist2
daysum=0
for y in range(mm-1):
daysum=daysum+days[y]
daysum=daysum+dd-1+UT/24
if days[1]==29:
fracyear=yyyy+daysum/366
else:
fracyear=yyyy+daysum/365
#print " = " + `fracyear`+"\n"
return JD
def jd2gd(jd,returnString=False):
"""Task to convert a list of julian dates to gregorian dates
description at http://mathforum.org/library/drmath/view/51907.html
Original algorithm in Jean Meeus, "Astronomical Formulae for
Calculators"
2009-02-15 13:36 IJC: Converted to importable, callable function
Note from author: This script is buggy and reports Julian dates which are
off by a day or two, depending on how far back you go. For example, 11 March
1609 converted to JD will be off by two days. 20th and 21st century seem to
be fine, though.
Note from Brett Morris: This conversion routine matches up to the "Numerical
Recipes" in C version from 2010-2100 CE, so I think we'll be ok for oscaar's
purposes.
"""
jd=jd+0.5
Z=int(jd)
F=jd-Z
alpha=int((Z-1867216.25)/36524.25)
A=Z + 1 + alpha - int(alpha/4)
B = A + 1524
C = int( (B-122.1)/365.25)
D = int( 365.25*C )
E = int( (B-D)/30.6001 )
dd = B - D - int(30.6001*E) + F
if E<13.5:
mm=E-1
if E>13.5:
mm=E-13
if mm>2.5:
yyyy=C-4716
if mm<2.5:
yyyy=C-4715
months=["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
daylist=[31,28,31,30,31,30,31,31,30,31,30,31]
daylist2=[31,29,31,30,31,30,31,31,30,31,30,31]
h=int((dd-int(dd))*24)
minutes=int((((dd-int(dd))*24)-h)*60)
sec=86400*(dd-int(dd))-h*3600-minutes*60
# Now calculate the fractional year. Do we have a leap year?
if (yyyy%4 != 0):
days=daylist2
elif (yyyy%400 == 0):
days=daylist2
elif (yyyy%100 == 0):
days=daylist
else:
days=daylist2
hh = 24.0*(dd % 1.0)
minutes = 60.0*(hh % 1.0)
sec = 60.0*(minutes % 1.0)
dd = int(dd-(dd%1.0))
hh = int(hh-(hh%1.0))
minutes = int(minutes-(minutes%1.0))
#print str(jd)+" = "+str(months[mm-1])+ ',' + str(dd) +',' +str(yyyy)
#print str(h).zfill(2)+":"+str(minutes).zfill(2)+":"+str(sec).zfill(2)+" UTC"
#print (yyyy, mm, dd, hh, minutes, sec)
if returnString:
return str(yyyy)+'-'+str(mm).zfill(2)+'-'+str(dd).zfill(2)+' '+str(hh).zfill(2)+':'+str(minutes).zfill(2)#+':'+str(sec)[0:2].zfill(2)
else:
return (yyyy, mm, dd, hh, minutes, sec)
| mit |
dungvtdev/upsbayescpm | bayespy/demos/stochastic_inference.py | 5 | 5013 | ################################################################################
# Copyright (C) 2015 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Stochastic variational inference on mixture of Gaussians
Stochastic variational inference is a scalable variational Bayesian
learning method which utilizes stochastic gradient. For details, see
:cite:`Hoffman:2013`.
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import bayespy.plot as myplt
from bayespy.utils import misc
from bayespy.utils import random
from bayespy.nodes import Gaussian, Categorical, Mixture, Dirichlet
from bayespy.inference.vmp.vmp import VB
from bayespy.inference.vmp import transformations
import bayespy.plot as bpplt
from bayespy.demos import pca
def run(N=100000, N_batch=50, seed=42, maxiter=100, plot=True):
"""
Run deterministic annealing demo for 1-D Gaussian mixture.
"""
if seed is not None:
np.random.seed(seed)
# Number of clusters in the model
K = 20
# Dimensionality of the data
D = 5
# Generate data
K_true = 10
spread = 5
means = spread * np.random.randn(K_true, D)
z = random.categorical(np.ones(K_true), size=N)
data = np.empty((N,D))
for n in range(N):
data[n] = means[z[n]] + np.random.randn(D)
#
# Standard VB-EM algorithm
#
# Full model
mu = Gaussian(np.zeros(D), np.identity(D),
plates=(K,),
name='means')
alpha = Dirichlet(np.ones(K),
name='class probabilities')
Z = Categorical(alpha,
plates=(N,),
name='classes')
Y = Mixture(Z, Gaussian, mu, np.identity(D),
name='observations')
# Break symmetry with random initialization of the means
mu.initialize_from_random()
# Put the data in
Y.observe(data)
# Run inference
Q = VB(Y, Z, mu, alpha)
Q.save(mu)
Q.update(repeat=maxiter)
if plot:
bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'k-')
max_cputime = np.sum(Q.cputime[~np.isnan(Q.cputime)])
#
# Stochastic variational inference
#
# Construct smaller model (size of the mini-batch)
mu = Gaussian(np.zeros(D), np.identity(D),
plates=(K,),
name='means')
alpha = Dirichlet(np.ones(K),
name='class probabilities')
Z = Categorical(alpha,
plates=(N_batch,),
plates_multiplier=(N/N_batch,),
name='classes')
Y = Mixture(Z, Gaussian, mu, np.identity(D),
name='observations')
# Break symmetry with random initialization of the means
mu.initialize_from_random()
# Inference engine
Q = VB(Y, Z, mu, alpha, autosave_filename=Q.autosave_filename)
Q.load(mu)
# Because using mini-batches, messages need to be multiplied appropriately
print("Stochastic variational inference...")
Q.ignore_bound_checks = True
maxiter *= int(N/N_batch)
delay = 1
forgetting_rate = 0.7
for n in range(maxiter):
# Observe a mini-batch
subset = np.random.choice(N, N_batch)
Y.observe(data[subset,:])
# Learn intermediate variables
Q.update(Z)
# Set step length
step = (n + delay) ** (-forgetting_rate)
# Stochastic gradient for the global variables
Q.gradient_step(mu, alpha, scale=step)
if np.sum(Q.cputime[:n]) > max_cputime:
break
if plot:
bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'r:')
bpplt.pyplot.xlabel('CPU time (in seconds)')
bpplt.pyplot.ylabel('VB lower bound')
bpplt.pyplot.legend(['VB-EM', 'Stochastic inference'], loc='lower right')
bpplt.pyplot.title('VB for Gaussian mixture model')
return
if __name__ == '__main__':
import sys, getopt, os
try:
opts, args = getopt.getopt(sys.argv[1:],
"",
["n=",
"batch=",
"seed=",
"maxiter="])
except getopt.GetoptError:
print('python stochastic_inference.py <options>')
print('--n=<INT> Number of data points')
print('--batch=<INT> Mini-batch size')
print('--maxiter=<INT> Maximum number of VB iterations')
print('--seed=<INT> Seed (integer) for the random number generator')
sys.exit(2)
kwargs = {}
for opt, arg in opts:
if opt == "--maxiter":
kwargs["maxiter"] = int(arg)
elif opt == "--seed":
kwargs["seed"] = int(arg)
elif opt in ("--n",):
kwargs["N"] = int(arg)
elif opt in ("--batch",):
kwargs["N_batch"] = int(arg)
run(**kwargs)
plt.show()
| mit |
alexsavio/scikit-learn | sklearn/feature_extraction/text.py | 13 | 52040 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
normalized = unicodedata.normalize('NFKD', s)
if normalized == s:
return s
else:
return ''.join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary : boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
vocabulary[term] = new_val
map_index[old_val] = new_val
X.indices = map_index.take(X.indices, mode='clip')
return X
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = []
indptr = _make_int_array()
values = _make_int_array()
indptr.append(0)
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
j_indices.extend(feature_counter.keys())
values.extend(feature_counter.values())
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = np.asarray(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = frombuffer_empty(values, dtype=np.intc)
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sort_indices()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The formula that is used to compute the tf-idf of term t is
tf-idf(d, t) = tf(t) * idf(d, t), and the idf is computed as
idf(d, t) = log [ n / df(d, t) ] + 1 (if ``smooth_idf=False``),
where n is the total number of documents and df(d, t) is the
document frequency; the document frequency is the number of documents d
that contain term t. The effect of adding "1" to the idf in the equation
above is that terms with zero idf, i.e., terms that occur in all documents
in a training set, will not be entirely ignored.
(Note that the idf formula above differs from the standard
textbook notation that defines the idf as
idf(d, t) = log [ n / (df(d, t) + 1) ]).
If ``smooth_idf=True`` (the default), the constant "1" is added to the
numerator and denominator of the idf as if an extra document was seen
containing every term in the collection exactly once, which prevents
zero divisions: idf(d, t) = log [ (1 + n) / 1 + df(d, t) ] + 1.
Furthermore, the formulas used to compute tf and idf depend
on parameter settings that correspond to the SMART notation used in IR
as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when
``sublinear_tf=True``.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when ``norm='l2'``, "n" (none)
when ``norm=None``.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schütze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf, diags=0, m=n_features,
n=n_features, format='csr')
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
khkaminska/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
jzt5132/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
wxgeo/geophar | wxgeometrie/geolib/feuille.py | 1 | 70239 | # -*- coding: utf-8 -*-
##--------------------------------------#######
# Feuille #
##--------------------------------------#######
# WxGeometrie
# Dynamic geometry, graph plotter, and more for french mathematic teachers.
# Copyright (C) 2005-2013 Nicolas Pourcelot
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# Ce module contient essentiellement la classe Feuille. C'est la classe qui va accueillir tous les objets geometriques.
# Elle fait l'intermediaire entre les objets et le Panel ou s'affichent les objets.
from keyword import kwlist
from random import choice
from string import ascii_letters
from math import pi as PI, e as E
from types import MethodType, GeneratorType
from operator import attrgetter
import re
import time
from numpy import array
from sympy import Symbol, Wild, sympify, oo
from ..pylib import is_in, property2, print_error, rstrip_, CompressedList
from ..mathlib.intervalles import Union, Intervalle
from ..mathlib.parsers import VAR, NBR_SIGNE, traduire_formule, \
_convertir_separateur_decimal
from .objet import Objet, contexte, G
from .angles import Secteur_angulaire
from .lignes import Segment
from .fonctions import Fonction
from .points import Point
from .cercles import Arc_generique
from .courbes import Courbe
from .textes import Texte, Texte_generique
##from .labels import Label_generique
from .vecteurs import Vecteur_libre
from .variables import Variable, XMinVar, XMaxVar, YMinVar, YMaxVar, Dpx, Dpy, \
Variable_affichage, Variable_generique, Pixel_unite
from .pseudo_canvas import _pseudocanvas
from .. import param
from .. import mathlib
from ..pylib.securite import keywords_interdits_presents, keywords_interdits
PatternType = type(re.compile(''))
def is_equation(chaine):
"""Teste si une chaîne correspond bien à une équation."""
if chaine.count('=') != 1:
return False
left, right = chaine.split('=')
left = left.strip()
if left.count('(') != left.count(')') or right.count('(') != right.count(')'):
return False
if not left or left[-1] in '-+':
# Affectation. Ex: "=Point()"
# Opérateurs `+=` et `-=`.
return False
if re.match('(%s|[.])+$' % VAR, left):
# Affectation. Ex: `A = Point()`, `A.x = 3`,...
# Deux exceptions cependant: `x =` et `y =` correspondent bien à des débuts d'équation.
return left in ('x', 'y')
return True
#assert geo.Objet is Objet
def parse_equation(chaine):
"""Associe à une équation l'objet géométrique correspondant.
Vérifie que la chaîne est une équation, et retourne une chaîne
correspondant à l'objet géométrique correspondant le cas échéant.
Sinon, retourne l'objet initial.
"""
#XXX: ébauche d'un parser d'équations
# La première étape est de vérifier qu'il s'agit bien d'une équation,
# et pas par exemple d'une affectation (par exemple, `A=Point()`)
if not is_equation(chaine):
return chaine
left, right = chaine.split('=')
chaine = left + '-(' + right + ')'
chaine = traduire_formule(chaine, fonctions=list(mathlib.universal_functions.__dict__.keys()))
try:
expr = sympify(chaine).expand()
except Exception:
print('Sympify: ' + chaine)
raise
x = Symbol('x')
y = Symbol('y')
a = Wild('a',exclude=[x, y])
b = Wild('b',exclude=[x, y])
c = Wild('c',exclude=[x, y])
d = Wild('d',exclude=[x, y])
e = Wild('e',exclude=[y])
f = Wild('f',exclude=[y])
droite = a*x + b*y + c
# cercle: a((x - b)^2 + (y - c)^2 - d) = 0
cercle = a*x**2 + a*y**2 - 2*a*b*x - 2*a*c*y + a*b**2 + a*c**2 - a*d
m = expr.match(droite)
if m:
return "_ = Droite_equation(%s, %s, %s)" %(m[a], m[b], m[c])
m = expr.match(cercle)
if m and m[d].is_positive:
b = m[b]
c = m[c]
d = m[d]
return "_ = Cercle_equation(%s, %s, %s)" %(-2*b, -2*c, b**2 + c**2 - d)
fonction = f*y - e
m = expr.match(fonction)
if m:
return "_ = Courbe(Fonction(%s))" % repr(str(m[e]/m[f]))
return chaine
class MethodesObjets(object):
def __init__(self, nom_de_la_methode, *objets):
self.methode = nom_de_la_methode
self.objets = objets
def __call__(self, *args, **kw):
for objet in self.objets:
getattr(objet, self.methode)(*args, **kw)
class Liste_objets(object):
def __init__(self, feuille, classe):
self.__dict__['feuille'] = feuille
self.__dict__['classe'] = classe
def __iter__(self):
classe = self.classe
return (obj for obj in self.feuille.liste_objets() if isinstance(obj, classe))
def __setattr__(self, nom, valeur):
for obj in self:
setattr(obj, nom, valeur)
def __delattr__(self, nom):
for obj in self:
delattr(obj, nom)
def __getattr__(self, nom):
if hasattr(self.classe, nom) and isinstance(getattr(self.classe, nom), MethodType):
return MethodesObjets(nom, *self)
return [getattr(obj, nom) for obj in self]
def __str__(self):
return self.classe.__name__.split("_")[0] + 's: ' + ', '.join(obj.nom for obj in self)
__repr__ = __str__
class ModeTolerant(object):
'''Mode d'exécution tolérant aux erreurs.
Cela sert essentiellement à charger un fichier d'une ancienne version de WxGéométrie.'''
def __init__(self, feuille, mode = True):
self.feuille = feuille
self.mode = mode
def __enter__(self):
object.__setattr__(self.feuille.objets, '_Dictionnaire_objets__renommer_au_besoin', self.mode)
def __exit__(self, type, value, traceback):
object.__setattr__(self.feuille.objets, '_Dictionnaire_objets__renommer_au_besoin', False)
self.feuille.objets._Dictionnaire_objets__tmp_dict.clear()
class Dictionnaire_objets(dict):
"""Cette classe est un conteneur pour les objets de la feuille, qui sont tous ses attributs,
sauf ceux précédés de __ (attributs reserves pour tous les objets de la classe).
Elle contient aussi tous les objets geometriques.
Certaines methodes standard (comme __setattr__) sont aussi interceptées ou redefinies.
Attributs spéciaux:
`_` fait référence au dernier objet enregistré dans la feuille.
`_noms_restreints` est un dictionnaire contenant une liste de noms ou de patterns
qui ne peuvent être associés qu'à certains types d'objets.
`_noms_interdits` est une liste de noms correspondant à des objets en lecture
seule.
`_suppression_impossible` est une liste de noms d'objets qui ne peuvent pas
être supprimés.
Nota: lorsqu'une clef du dictionnaire est supprimée, la méthode `.supprimer()` de
l'objet est appelée ; les objets qui n'ont pas de méthode `.supprimer()` sont donc
de fait protégés (c'est le cas de `pi`, `e`, ...), sans qu'il soit nécessaire de
les inscrire dans `_noms_interdits`.
"""
__slots__ = ('feuille', '__timestamp', '__renommer_au_besoin', '__tmp_dict')
_noms_restreints = {re.compile('f[0-9]+(_prime)*$'): Fonction, 'xmin': XMinVar,
'xmax': XMaxVar, 'ymin': YMinVar, 'ymax': YMaxVar, 'dpx': Dpx,
'dpy': Dpy, re.compile('Cf[0-9]+$'): Courbe}
# `kwlist`: noms réservés en python (if, then, else, for, etc.)
_noms_interdits = kwlist + ['vue', 't', 'x', 'y', 'z']
_suppression_impossible = ['xmin', 'xmax', 'ymin', 'ymax', 'dpx', 'dpy']
def __init__(self, feuille):
object.__setattr__(self, 'feuille', feuille)
object.__setattr__(self, '_Dictionnaire_objets__timestamp', 0)
object.__setattr__(self, '_Dictionnaire_objets__renommer_au_besoin', False)
object.__setattr__(self, '_Dictionnaire_objets__tmp_dict', {})
self['xmin'] = XMinVar()
self['xmax'] = XMaxVar()
self['ymin'] = YMinVar()
self['ymax'] = YMaxVar()
self['dpx'] = Dpx()
self['dpy'] = Dpy()
self.clear()
def clear(self):
"""Réinitialise le dictionnaire des objets.
Ne pas utiliser directement, mais utiliser plutôt `Feuille.effacer()`
qui rafraichit correctement l'affichage."""
_tmp = {}
for nom in self._suppression_impossible:
if dict.__contains__(self, nom):
_tmp[nom] = dict.__getitem__(self, nom)
dict.clear(self)
dict.update(self, **_tmp)
# On ajoute au dictionnaire courant les objets géométriques, et uniquement eux
# (pas toutes les classes de geolib !)
self.update((key, val) for key, val in G.__dict__.items() \
if isinstance(val, type) and issubclass(val, Objet))
# Les noms de classe peuvent aussi être tapés en minuscules (c'est plus rapide à taper)
self.update((key.lower(), val) for key, val in G.__dict__.items() \
if isinstance(val, type) and issubclass(val, Objet))
# On ajoute au dictionnaire les fonctions mathématiques courantes
self.update((key, val) for key, val in mathlib.universal_functions.__dict__.items() \
if key[0] != "_" and key != "division")
self.update(pi = PI, e = E, oo = oo, \
Intervalle = Intervalle, Union = Union, \
x = Symbol("x"), y = Symbol("y"), z = Symbol("z"), \
t = Symbol("t"), range=range)
self.update(pause = self.feuille.pause, erreur = self.feuille.erreur,
effacer = self.feuille.effacer,
coder = self.feuille.coder,
effacer_codage = self.feuille.effacer_codage,
nettoyer = self.feuille.nettoyer,
supprimer = self.supprimer,
)
dict.__setitem__(self, 'None', None)
dict.__setitem__(self, 'True', True)
dict.__setitem__(self, 'False', False)
# NB: 'True = True' et 'False = False' : non compatible Py3k
types = {'points': 'Point_generique', 'droites': 'Droite_generique', 'polygones': 'Polygone_generique',
'segments': 'Segment', 'cercles': 'Cercle_generique', 'arcs': 'Arc_generique',
'textes': 'Texte_generique', 'vecteurs': 'Vecteur_generique', 'variables': 'Variable'}
d = {}
for typ in types:
d[typ] = Liste_objets(self.feuille, getattr(G, types[typ]))
self.update(d)
def add(self, valeur, nom_suggere=''):
"""Ajoute l'objet `valeur` à la feuille.
Si `nom_suggere` est donné, et que le nom suggéré n'est pas déjà
utilisé, l'objet est référencé sous ce nom.
Sinon, un nom lui est automatiquement attribué.
"""
if nom_suggere and nom_suggere in self:
nom_suggere = ''
self[nom_suggere] = valeur
def _dereferencer(self, objet):
"Commande de bas niveau. Ne pas utiliser directement !"
if objet._nom:
self.pop(objet._nom)
# Important: pour que l'objet soit bien considéré non référencé
# il faut qu'il n'ait pas de nom (on ne peut pas référencer 2 fois un objet).
objet._nom = ""
def __setitem__(self, nom, valeur):
"""Crée un objet de la feuille nommé `nom`, et ayant pour valeur `valeur`.
Remarque: les syntaxes `objets['a'] = 3` et `objets.a = 3` sont équivalentes.
::note::
Les objets d'une feuille (contrairement aux objets Python par exemple)
ne peuvent pas être redéfinis librement...
En général, quand on essaie d'attribuer un nom qui existe déjà,
ce n'est pas volontaire. Pour éviter les erreurs, on impose de
détruire explicitement l'objet avant que le nom puisse être redonné.
Certains noms sont également réservés, et ne peuvent pas être
attribués ainsi. Si l'on veut outrepasser la protection, il faut
utiliser la méthode `.update()` pour les attribuer (à bon escient !).
Enfin, si l'on essaie d'enregistrer dans la feuille un objet qui y est
déjà présent, l'objet est simplement renommé, au lieu d'être enregistré
sous deux noms différents.
"""
feuille = self.feuille
# Paramètres du repère -> géré directement par la feuille
if nom in feuille._parametres_repere:
return setattr(feuille, nom, valeur)
# Ne pas oublier le 'return' !
nom = self.__convertir_nom(nom) or '_'
# Pour certains types d'objets (points libres, textes, variables...),
# le nom peut être déja attribué.
# Par exemple, A=Point(1,2) est valide même si A existe déjà.
# L'objet A ne sera pas écrasé, mais actualisé.
# Dans cet exemple, les coordonnées de A sont actualisées en (1,2)
# (Autrement dit, A=(1,2) devient un alias de A(1,2) ou A.coordonnees = 1,2).
# Bien sûr, il faut en particulier que la valeur soit un objet de meme type.
# (A = Variable(3) n'est pas valide si A est un point !)
if nom in self:
try:
# Ne PAS rattacher la feuille à l'objet tant qu'il n'est pas enregistré
# sur la feuille. En particulier, pour un polygone, rattacher la feuille
# provoque déjà l'enregistrement des sommets sur la feuille, alors que rien
# ne garantisse que le polygone lui-même soit enregistré avec succès.
# Le paramètre `raw=True` indique que pour une variable, il faut retourner
# l'objet `Variable` lui-même, et non sa valeur comme habituellement.
self.__getitem(nom, raw=True)._update(valeur)
# On quitte, car le nom fait toujours référence au même objet, qui est
# déjà enregistré sur la feuille.
return
except Exception:
if param.debug:
print_error()
if self.__renommer_au_besoin:
new = feuille.nom_aleatoire(valeur, prefixe=nom)
if param.debug:
print("Warning: '%s' renommé en '%s'." %(nom, new))
nom = self.__tmp_dict[nom] = new
else:
self.erreur("Ce nom est déjà utilisé : " + nom, NameError)
if not isinstance(valeur, Objet):
# On tente de convertir valeur en un objet Geolib.
# La conversion des générateurs permet de construire des points
# à la volée : '=((i,sqrt(i)) for i in (3,4,5,6))'
if isinstance(valeur, GeneratorType) and nom == "_":
for item in valeur:
self.__setitem__('', item)
return
# 'A = Point' est un alias de 'A = Point()'
elif isinstance(valeur, type) and issubclass(valeur, Objet):
valeur = valeur()
# Par convénience, certains types sont automatiquement convertis :
# - Variable
elif isinstance(valeur, (int, float, str)): # u=3 cree une variable
valeur = Variable(valeur)
# - Point
elif isinstance(valeur, complex):
valeur = Point(valeur.real, valeur.imag)
elif hasattr(valeur, "__iter__"):
valeur = tuple(valeur)
# - Texte
if len(valeur) in (1, 3) and isinstance(tuple(valeur)[0], str):
# t=["Bonjour!"] cree un texte
# t=('Bonjour!', 2, 3) également
valeur = Texte(*valeur)
elif len(valeur) == 2:
# - Vecteur_libre
if nom.islower():
# u=(1,2) crée un vecteur
valeur = Vecteur_libre(*valeur)
# - Point
else:
# A=(1,2) cree un point.
valeur = Point(*valeur)
else:
# On vérifie que l'objet n'est pas déjà enregistré dans la feuille
# sous un autre nom. Si c'est le cas, on le renomme (pour ne pas
# l'enregistrer deux fois dans la feuille sous deux noms différents).
if valeur._nom:
# L'objet est déjà référencé dans une feuille ssi il a un nom.
assert (valeur.feuille is feuille), \
"L'objet %s (%s) est deja enregistre dans une autre feuille (%s)." \
% (valeur.nom, valeur, valeur.feuille)
valeur.renommer(nom)
return
# On n'a pas réussi à convertir `valeur` en un objet Geolib.
if not isinstance(valeur, Objet):
self.erreur("type d'objet incorrect :(%s,%s)"%(nom, valeur), TypeError)
self.__verifier_syntaxe_nom(valeur, nom)
valeur.feuille = feuille
if nom == "_":
# Attention, la feuille doit être déjà definie !
nom = valeur._nom_alea()
# Pour les objets nommés automatiquement, le nom n'est pas affiché par défaut.
if valeur.mode_affichage == 'nom':
valeur.label(mode='rien')
# Les objets dont le nom commence par "_" ne sont pas affichés par défaut (pure convention) :
if nom[0] == "_":
valeur.style(visible = False)
# Enregistrement de l'objet dans la feuille.
dict.__setitem__(self, nom, valeur)
valeur._nom = nom
valeur.on_register()
# L'attribut `_timestamp` permet de classer les objets par date de création.
# (Il sert essentiellement à pouvoir supprimer le dernier objet créé.)
valeur._timestamp = self.__timestamp
object.__setattr__(self, "_Dictionnaire_objets__timestamp", self.__timestamp + 1)
# Indiquer que la feuille doit être rafraichie (l'affichage notamment).
self.feuille._actualiser_liste_objets = True
self.feuille.affichage_perime()
def __getitem(self, nom, raw=False):
"""Usage interne: code commun aux méthodes `.__getitem__()` et `.get()`.
Le paramètre `raw=True` permet de récupérer les variables elles-mêmes
(et non leur contenu).
"""
# renommage temporaire :
nom = self.__tmp_dict.get(nom, nom)
# (utilisé en cas de chargement d'un fichier ancien lors d'un conflit de nom).
if nom in self.feuille._parametres_repere:
return getattr(self.feuille, nom)
elif nom == "objets":
return self()
elif nom == "noms":
return self.noms
elif nom == "_":
return self.__derniere_valeur()
value = dict.__getitem__(self, self.__convertir_nom(nom))
if isinstance(value, Variable_generique) and not raw:
return value.val
return value
def get_raw_item(self, nom):
"Permet de récupérer les variables elles-mêmes (et non leur contenu)."
return self.__getitem(nom, raw=True)
def __getitem__(self, nom):
try:
return self.__getitem(nom)
except KeyError:
if nom in ('_ipython_canary_method_should_not_exist_',
'bogu5_123_aTTri8ute', '__dict__'):
# Tests effectués par PyShell ou Ipython, ne pas afficher de message.
raise
assert 'erreur' in self
self.erreur("Objet introuvable sur la feuille : " + nom, KeyError)
def get(self, nom, defaut=None):
try:
return self.__getitem(nom)
except:
return defaut
def __contains__(self, nom):
return dict.__contains__(self, self.__convertir_nom(nom))
def __delitem__(self, nom):
if nom in self.feuille._parametres_repere:
return delattr(self.feuille, nom)
# ne pas oublier le 'return'
elif nom == "_":
self.__derniere_valeur().supprimer()
else:
try:
self[nom].supprimer()
except KeyError:
if param.debug:
print_error()
self.feuille._actualiser_liste_objets = True
self.feuille.affichage_perime()
__setattr__ = __setitem__
__delattr__ = __delitem__
__getattr__ = __getitem__
def lister(self, objets_caches=True, etiquettes=False, **kw):
"""Retourne la liste des objets géométriques.
Le paramètre `objets_caches` indique s'il faut inclure les objets cachés.
Le paramètre `etiquettes` indique s'il faut inclure les étiquettes
des objets retournés.
kw:
* `type` : types à inclure
* `sauf` : types à exclure
note:: Utiliser plutôt `Feuille.liste_objets()`, qui bénéficie d'une mise
en cache des résultats.
"""
sauf = kw.get("sauf", ())
type = kw.get("type", Objet)
objets = []
for objet in self.values():
if isinstance(objet, type) and not isinstance(objet, sauf) \
and (objets_caches or objet._style['visible']):
objets.append(objet)
if etiquettes and objet.etiquette is not None:
objets.append(objet.etiquette)
return objets
##if kw:
##sauf = kw.get("sauf", ())
##type = kw.get("type", Objet)
##objets = [obj for obj in self.values() if isinstance(obj, type) \
##and not isinstance(obj, sauf) and (objets_caches or obj.style("visible"))]
##elif objets_caches:
##objets = [obj for obj in self.values() if isinstance(obj, Objet)]
##else:
##objets = [obj for obj in self.values() if isinstance(obj, Objet) and obj.style("visible")]
def supprimer(self, *objets):
"""Supprime plusieurs objets dans le bon ordre.
Supprime successivement plusieurs objets après les avoir classé
hiérarchiquement. Cela évite d'avoir des erreurs avec certains
objets déjà supprimés avec les précédents du fait des dépendances.
Par exemple, `del feuille.objets.A, feuille.objets.B` renvoie une
erreur si l'objet `B` dépend de l'objet `A`, car l'objet `B`
n'existe déjà plus au moment où on cherche à le supprimer.
Nota: La suppression d'un objet qui n'est pas sur la feuille
provoque bien toujours une erreur, par contre."""
for obj in sorted(objets, key=attrgetter("_hierarchie"), reverse=True):
obj.supprimer()
@property
def noms(self):
u"""Retourne les noms de tous les objets géométriques."""
return set(nom for nom, obj in self.items() if isinstance(obj, Objet))
@staticmethod
def __convertir_nom(nom):
'''Convertit les noms contenant des `, ', ou " en noms python corrects.'''
return nom.replace('`', '_prime').replace('"', '_prime_prime').replace("'", "_prime")
def __match(self, pattern, nom):
if isinstance(pattern, PatternType):
return re.match(pattern, nom)
else:
return nom == pattern
def __verifier_syntaxe_nom(self, objet, nom, **kw):
"Vérifie que le nom est correct (ie. bien formé) et le modifie le cas échéant."
def err(msg):
if kw.get('skip_err'):
return
if self.__renommer_au_besoin:
new = self.feuille.nom_aleatoire(objet)
print("Warning: '%s' renommé en '%s'." %(nom, new))
return new
else:
self.erreur(msg, NameError)
if nom == '':
return '_'
nom = self.__convertir_nom(nom)
if nom in self.__class__.__dict__ \
or any(self.__match(pattern, nom) for pattern in self._noms_interdits):
return err("Nom réservé : " + nom) # Pas d'accent dans le code ici a cause de Pyshell !
# Les noms contenant '__' sont des noms réservés pour un usage futur éventuel (convention).
if "__" in nom:
return err('Un nom ne peut pas contenir "__".')
if not re.match("""[A-Za-z_][A-Za-z0-9_'"`]*$""", nom):
return err("'%s' n'est pas un nom d'objet valide." %nom)
# Certains noms sont réservés à des usages spécifiques.
# Par ex., les noms f1, f2... sont réservés aux fonctions (cf. module Traceur).
for pattern, types in self._noms_restreints.items():
if self.__match(pattern, nom):
if isinstance(objet, types):
break
return err("Le nom %s est réservé à certains types d'objets." % nom)
# Gestion des ' (qui servent pour les dérivées)
if nom.endswith('_prime'):
if isinstance(objet, Fonction):
return err('Nom interdit : %s est réservé pour la dérivée.' % nom)
else:
base = rstrip_(nom, '_prime')
if isinstance(self.get(base, None), Fonction):
return err('Nom interdit : %s désigne déjà la dérivée de %s.' % (nom, base))
elif isinstance(objet, Fonction):
# Si la fonction doit s'appeller f, on vérifie que f', f'', f''', etc. ne correspondent pas déjà à des objets.
for existant in self:
if existant.startswith(nom) and rstrip_(existant, '_prime') == nom:
return err('Ambiguité : un objet %s existe déjà.' % existant)
return nom
def _objet_renommable(self, objet, nom):
"Vérifie que le nom peut-être attribué (c-à-d. qu'il est bien formé, et non utilisé)."
nom = self.__verifier_syntaxe_nom(objet, nom)
if nom in self:
self.erreur("Ce nom est déjà utilisé.", NameError)
return nom
def __str__(self):
return "Gestionnaire d'objets de la feuille '" + self.feuille.nom \
+ "': " + str(self.noms)
def __repr__(self):
return "Gestionnaire d'objets de la feuille '" + self.feuille.nom \
+ "': " + repr(self.noms)
def __derniere_valeur(self):
"Dernier objet créé."
return max(self.feuille.liste_objets(True), key = lambda obj:obj._timestamp)
class Interprete_feuille(object):
"""Exécute des commandes dans la feuille.
Reformule également les commandes avant de les exécuter."""
def __init__(self, feuille):
self.feuille = feuille
def executer(self, commande, parser=True, signature=None):
"""Exécute la commande dans la feuille.
Si `parser=False`, les facilités de syntaxe (abréviations, etc.)
sont désactivées pour plus de rapidité.
Si `signature != None`, elle est utilisée pour la gestion de l'historique.
Voir aussi `commande_executee()`.
"""
if commande.startswith('#'):
return 'Commentaire ignoré : %s' % commande
if parser:
commande = self.parser(commande)
if param.debug:
self.feuille.save_log("REQUETE FEUILLE: " + commande)
# À mettre en toute fin, pour des raisons de sécurité.
if keywords_interdits_presents(commande):
self.erreur("Mots-clefs interdits : " + ", ".join(sorted(keywords_interdits)))
try:
code = compile(commande, '<string>', 'eval')
val = eval(code, self.feuille.objets)
if isinstance(val, Variable):
if val._type == "simple":
retour = str(val.val)
else:
retour = '"' + val.contenu + '" : ' + str(val.val)
elif isinstance(val, (list, tuple, set)):
# Améliore la lisibilité de l'affichage pour une liste d'objets
# en affichant le nom des objets au lieu des objets eux-mêmes
# (pour ceux qui ont un nom).
if isinstance(val, list):
retour = '['
elif isinstance(val, set):
retour = 'set(['
else:
retour = '('
for elt in val:
if isinstance(elt, Objet):
nom = elt.nom
retour += (nom if nom else str(elt))
else:
retour += repr(elt)
retour += ', '
retour = retour.rstrip(', ')
if isinstance(val, list):
retour += ']'
elif isinstance(val, set):
retour += '])'
else:
retour += ')'
else:
retour = str(val)
except SyntaxError:
exec(commande + '\n', self.feuille.objets)
# Le + '\n' final contourne un bug de Python 2.5 avec with_statement
retour = 'Commande exécutée.'
finally:
self.commande_executee(signature = signature)
return retour
def commande_executee(self, signature = None):
"""Méthode appelée automatiquement après avoir exécuté une commande dans la feuille.
Si l'on n'a pas utilisé la méthode executer(), il faut alors l'appeler manuellement."""
self.feuille.historique.archiver(signature = signature)
# TODO: A déplacer dans la console graphique d'exécution ?
# Redétection des objets à proximité du pointeur
self.feuille.canvas.redetecter = True
if self.feuille.classeur is not None and self.feuille.classeur.parent is not None:
self.feuille.classeur.parent.rafraichir_titre()
for action in self.feuille._actions:
action()
@staticmethod
def parser(commande):
"""Convertit la commande en code Python.
>>> from wxgeometrie.geolib.feuille import Interprete_feuille
>>> Interprete_feuille.parser("[A B]")
'Segment(A, B)'
>>> Interprete_feuille.parser("(A B)")
'Droite(A, B)'
"""
commande = commande.strip()
while ' ' in commande:
commande = commande.replace(' ', ' ')
if commande.startswith("="):
commande = "_" + commande
if commande == "del":
commande += " _"
# Gestion des '
# NB: attention, \' a déjà un sens en LaTeX
commande = commande.replace("'", "_prime").replace("\\_prime", "\\'")
# Exception à la conversion décimale :
# (1,2) est compris comme (1, 2) et non (1.2), qui est très peu probable.
# Par contre, f(1,5) ne doit pas être converti en f(1, 5), mais en f(1.5) !
def _virg(m):
return m.group().replace(',', ', ')
commande = re.sub(r'(?<!\w)[(]%s,%s[)]' % (NBR_SIGNE, NBR_SIGNE),
_virg, commande)
# Conversion décimale : 1,2 -> 1.2
commande = _convertir_separateur_decimal(commande)
# (A B) -> Droite(A, B)
def _dte(m):
return "Droite(%s, %s)" % m.groups()
commande = re.sub(r"\([ ]?(%s)[ ](%s)[ ]?\)" % (VAR, VAR), _dte, commande)
# [A B] -> Segment(A, B)
def _seg(m):
return "Segment(%s, %s)" % m.groups()
commande = re.sub(r"\[[ ]?(%s)[ ](%s)[ ]?\]" % (VAR, VAR), _seg, commande)
# ||u|| -> u.norme
def _normu(m):
return "%s.norme" % m.groups()
commande = re.sub(r"\|\|[ ]?(%s)[ ]?\|\|" % VAR, _normu, commande)
# ||A>B|| ou ||A->B|| -> (A->B).norme
def _normAB(m):
return "(%s->%s).norme" % m.groups()
commande = re.sub(r"\|\|[ ]*(%s)[ ]*-?>[ ]*(%s)[ ]*\|\|" % (VAR, VAR), _normAB, commande)
# ||A>B|| ou ||A->B|| -> (A->B).norme
def _vecAB(m):
return "Vecteur(%s, %s)" % m.groups()
commande = re.sub(r"(%s)[ ]*->[ ]*(%s)" % (VAR, VAR), _vecAB, commande)
# 1,2 ou 1;2 ou 1 2 ou (1,2) ou (1;2) ou (1 2) *uniquement* -> Point(1,2)
m = re.match("(\()?(?P<x>%s)[ ]?[;, ][ ]?(?P<y>%s)(?(1)\))$" % (NBR_SIGNE, NBR_SIGNE), commande)
if m:
commande = "Point(%(x)s,%(y)s)" % m.groupdict()
# `Bonjour !` -> Texte("Bonjour !")
# NB: attention, \` a déjà un sens en LaTeX
def _txt(m):
return "Texte(\"%s\")" % m.groups()[0]
commande = re.sub(r"(?<!\\)`(([^`]|\\`)*[^`\\]|)`", _txt, commande)
# Détection des équations
if '=' in commande:
commande = parse_equation(commande)
return commande
class Historique_feuille(object):
"""Historique de la feuille.
Permet d'enregistrer l'état de la feuille à un instant donné,
et de le restaurer ensuite."""
def __init__(self, feuille):
self.feuille = feuille
# taille maximale
self.n = param.nbr_annulations
self.etats = CompressedList()
self.archiver()
# pour comparer rapidement
self.last_hash = None
# à placer après self.archiver() !
self.feuille.vierge = True
def archiver(self, signature=None):
"""Sauvegarde l'état actuel de la feuille.
Notes concernant l'implémentation::
* si l'état de la feuille n'a pas changé depuis la dernière sauvegarde,
la nouvelle demande de sauvegarde n'est pas prise en compte.
* si `signature` est différente de `None`, une demande effectuée
avec la même signature que la précédente écrase la précédente.
Ceci sert essentiellement pour les zooms avec la molette de la souris,
afin d'éviter de saturer l'historique.
"""
sauvegarde = self.feuille.sauvegarder()
# On évite de stocker deux fois de suite la même chose dans l'historique.
if self.etats and hash(sauvegarde) == self.last_hash and sauvegarde == self.etats[-1]:
return
# Avec la molette de la souris, on effectue une succession rapide de zooms.
# Pour éviter que ça ne remplisse l'historique, on archive alors l'état actuel
# à la place du précedent. Ceci s'effectue grâce à la signature.
# De manière générale, si signature != None, lorsque deux demandes d'archivages
# successives parviennent avec la même signature, la seconde écrase la première.
if signature is not None and self._derniere_signature == signature:
self.etats[-1] = sauvegarde
else:
self.etats.append(sauvegarde)
self.etats_annules = CompressedList()
if len(self.etats) > self.n:
self.etats.pop(0) # plus rapide que "self.etats = self.etats[-self.n:]"
self._derniere_signature = signature
self.last_hash = hash(signature)
self.feuille.vierge = False
self.feuille.modifiee = True
def annuler(self):
if len(self.etats) > 1:
etat_actuel = self.etats.pop()
self.etats_annules.append(etat_actuel)
if len(self.etats_annules) > self.n:
self.etats_annules.pop(0) # plus rapide que "self.etats_annules = self.etats_annules[-self.n:]"
self.restaurer(self.etats[-1])
self.feuille.message("Action annulée.")
self.feuille.modifiee = True
else:
self.feuille.message("Impossible d'annuler.")
def refaire(self):
if self.etats_annules:
etat = self.etats_annules.pop()
self.etats.append(etat)
if len(self.etats) > self.n:
self.etats.pop(0) # plus rapide que "self.etats = self.etats[-self.n:]"
self.restaurer(etat)
self.feuille.message("Action restaurée.")
self.feuille.modifiee = True
else:
self.feuille.message("Impossible de restaurer.")
def restaurer(self, txt):
self.feuille.effacer()
self.feuille.charger(txt, archiver = False)
class Feuille(object):
"""Feuille de travail.
L'objet 'log' doit être une liste destinée à contenir tous les messages.
"""
# Pour limiter les erreurs, on indique le(s) type(s) autorisé
# pour chaque paramètre.
_parametres_repere = {"quadrillages": tuple,
"afficher_quadrillage": bool,
"afficher_axes": bool,
"afficher_fleches": bool,
"repere": tuple,
"gradu": tuple,
"utiliser_repere": bool,
"liste_axes": tuple,
"ratio": (int, float, type(None)),
"fenetre": tuple,
"zoom_texte": (int, float),
"zoom_ligne": (int, float),
"afficher_objets_caches": bool,
#~ "dpi_ecran": (int, float),
#~ "dimensions_en_pixels": tuple,
}
def __hash__(self):
return id(self)
def __init__(self, classeur = None, titre = "", log = None, parametres = None, canvas = None):
self.log = log
self.classeur = classeur
self.__canvas = canvas
## self._fenetre = self.param("fenetre")
# Gestion des paramètres graphiques (repère essentiellement)
self.__dict_repere = {}
if parametres is None:
parametres = {}
for nom in self._parametres_repere:
self.__dict_repere[nom] = parametres.get(nom, self.parametres_par_defaut(nom))
self.macros = {}
self._cache_listes_objets = {}
self._actualiser_liste_objets = True
## self._mettre_a_jour_figures = True
self._affichage_a_actualiser = True
self._repere_modifie = True
self._objets_temporaires = []
self.__point_temporaire__ = None
# Permet une optimsation de l'affichage en cas d'objet déplacé
self._objet_deplace = None
# On met ._stop à True pour stopper toutes les animations en cours.
self._stop = False
## self._afficher_objets_caches = False
## # Indique que l'arrière-plan doit être redessiné
## self._repere_modifie = True
# Parametres permettant de gerer l'enregistrement:
self.sauvegarde = {
"_modifie": True, # modifications depuis dernière sauvegarde
"repertoire": None, # répertoire de sauvegarde
"nom": None, # nom de sauvegarde
"export": None, # nom complet utilisé pour l'export
}
# (À créer *avant* l'historique de la feuille)
self.objets = Dictionnaire_objets(self)
self.historique = Historique_feuille(self)
self.interprete = Interprete_feuille(self)
# Informations sur le document
self._infos = {
"titre": titre,
"auteur": param.utilisateur,
"creation": time.strftime("%d/%m/%Y - %H:%M:%S",time.localtime()),
"modification": time.strftime("%d/%m/%Y - %H:%M:%S",time.localtime()),
"version": "",
"resume": "",
"notes": "",
"dimensions-pixels": repr(param.dimensions_en_pixels),
"dpi": repr(param.dpi_ecran),
}
# Resolution et dimensions ne sont pas intégrés aux paramètres,
# car ils ne sont pas gérés par la feuille, mais servent uniquement
# à enregistrer les dimensions et la résolution pour que le fichier .geo
# puisse être utilisé sans interface graphique.
# Dpi et dimensions en pixels dépendent du système (moniteur et taille de la fenêtre).
# En particulier, lorsque l'on charge un fichier, il ne faut pas
# que l'ancien dpi soit utilisé.
# Actions à effectuer après qu'une commande ait été exécutée.
self._actions = []
# Objet.__feuille__ = self # les objets sont crees dans cette feuille par defaut
# ---------------------------------------
# Gestion des paramètres du repère
# ---------------------------------------
def lier(self, action):
if not is_in(action, self._actions):
self._actions.append(action)
def affichage_perime(self):
"""Indique que l'affichage doit être actualisé.
Très rapide (inutile d'optimiser les appels), car aucune actualisation
n'a lieu, mais la feuille est juste marquée comme étant à actualiser."""
# NB: Utiliser une méthode au lieu d'un attribut permet de générer
# une erreur en cas de faute de frappe.
self._affichage_a_actualiser = True
@property2
def modifiee(self, val = None):
if val is None:
return self.sauvegarde['_modifie']
self.sauvegarde['_modifie'] = val
if val and self.classeur is not None:
self.classeur.modifie = True
def infos(self, _key_ = None, **kw):
if kw:
self._infos.update(kw)
self.modifiee = True
elif _key_ is None:
return self._infos.copy()
else:
return self._infos[_key_]
def _rafraichir_figures(self, tous_les_objets = False):
"""Recrée les figures des objets sensibles à la fenêtre d'affichage.
Si tous_les_objets = True, les figure des objets non sensibles sont
aussi récréées.
Par ailleurs, le rafraichissement n'a pas lieu immédiatement, mais
les figures sont simplement marquées comme périmées, et seront
rafraichies à la prochaine utilisation.
En principe, cette méthode n'a pas à être appelée directement.
"""
for objet in self.liste_objets():
# Il faut recalculer la position de toutes les étiquettes
# quand la fenêtre d'affichage change.
if objet._affichage_depend_de_la_fenetre:
objet.figure_perimee()
elif objet.etiquette:
# Même si tous les objets n'ont pas besoin d'être rafraichis,
# leurs étiquettes doivent l'être
objet.etiquette.figure_perimee()
def _gerer_parametres_repere(self, item = None, **kw):
if kw:
self.__dict_repere.update(kw)
self._repere_modifie = True
if 'fenetre' in kw or 'ratio' in kw:
self.fenetre_modifiee()
## self._mettre_a_jour_figures = True
if 'afficher_objets_caches' in kw:
for objet in self.liste_objets(True):
if not objet.style('visible'):
objet.figure_perimee()
self._actualiser_liste_objets = True
self.affichage_perime()
if item is not None:
return self.__dict_repere[item]
## @property2
## def afficher_objets_caches(self, valeur = None):
## if valeur is not None:
## self._afficher_objets_caches = valeur
## for objet in self.liste_objets(True):
## if not objet.style('visible'):
## objet.creer_figure()
## self._actualiser_liste_objets = True
## return self._afficher_objets_caches
def __getattr__(self, nom):
# Les parametres du repere
if nom in self._parametres_repere:
return self._gerer_parametres_repere(nom)
return object.__getattribute__(self, nom)
def __setattr__(self, nom, valeur):
if nom in self._parametres_repere:
# TODO: améliorer la détection des valeurs incorrectes
assert isinstance(valeur, self._parametres_repere[nom])
# tests personnalisés pour certains paramètres
nom_test = '_test_valeur_' + nom
if hasattr(self, nom_test):
valeur = getattr(self, nom_test)(valeur)
self._gerer_parametres_repere(**{nom: valeur})
else:
object.__setattr__(self, nom, valeur)
def __delattr__(self, nom):
if nom in self._parametres_repere:
self._gerer_parametres_repere(**{nom: self.parametres_par_defaut(nom)})
else:
object.__delattr__(self, nom)
def _test_valeur_fenetre(self, valeur):
xmin, xmax, ymin, ymax = valeur
xmin = xmin if xmin is not None else self.fenetre[0]
xmax = xmax if xmax is not None else self.fenetre[1]
ymin = ymin if ymin is not None else self.fenetre[2]
ymax = ymax if ymax is not None else self.fenetre[3]
epsilon = 100*contexte['tolerance']
if abs(xmax - xmin) < epsilon:
self.erreur(("Le réglage de la fenêtre est incorrect (xmin=%s et xmax=%s sont trop proches).\n"
"(Les paramètres doivent être dans cet ordre: xmin, xmax, ymin, ymax.)")
% (xmin, xmax), ValueError)
elif abs(ymax - ymin) < epsilon:
self.erreur(("Le réglage de la fenêtre est incorrect (ymin=%s et ymax=%s sont trop proches).\n"
"(Les paramètres doivent être dans cet ordre: xmin, xmax, ymin, ymax.)")
% (ymin, ymax), ValueError)
# Les 'float()' servent à contourner un bug de numpy 1.1.x et numpy 1.2.x (repr de float64)
return float(min(xmin, xmax)), float(max(xmin, xmax)), float(min(ymin, ymax)), float(max(ymin, ymax))
@property2
def xmin(self, value = None):
if value is None:
return self.fenetre[0]
self.fenetre = value, None, None, None
@property2
def xmax(self, value = None):
if value is None:
return self.fenetre[1]
self.fenetre = None, value, None, None
@property2
def ymin(self, value = None):
if value is None:
return self.fenetre[2]
self.fenetre = None, None, value, None
@property2
def ymax(self, value = None):
if value is None:
return self.fenetre[3]
self.fenetre = None, None, None, value
def fenetre_modifiee(self):
for name in ('xmin', 'xmax', 'ymin', 'ymax', 'dpx', 'dpy'):
dict.__getitem__(self.objets, name).perime()
# XXX: il ne devrait pas y avoir besoin d'appeler la méthode suivante :
self._rafraichir_figures()
#########################################################################################
def liste_objets(self, objets_caches=None, tri=False, etiquettes=False):
"""Liste des objets, triés éventuellement selon le style 'niveau'.
NB: un système de mise en cache est utilisé si possible, contrairement à .objets.lister().
"""
if self._actualiser_liste_objets:
for key in self._cache_listes_objets:
self._cache_listes_objets[key] = None
if objets_caches is None:
objets_caches = self.afficher_objets_caches
# 4 caches, correspondants aux 4 situations possibles :
# objets_caches = True, trier = True ;
# objets_caches = True, trier = False ; etc.
clef = 'c' if objets_caches else ''
if tri:
clef += 't'
if etiquettes:
clef += 'e'
objets = self._cache_listes_objets.get(clef)
if objets is None:
liste = self.objets.lister(objets_caches=objets_caches, etiquettes=etiquettes)
if tri:
# Exceptionnellement, on utilise '._style' au lieu de '.style'
# car le gain de temps est significatif.
liste.sort(key=(lambda x:x._style["niveau"]), reverse=True)
objets = self._cache_listes_objets[clef] = liste
return objets
#########################################################################################
def _dimensions_en_pixels(self):
"Dimensions en pixels de la feuille affichée par le canevas."
if self.canvas is not _pseudocanvas:
return self.canvas.dimensions
else:
return eval(self._infos['dimensions-pixels'])
def fenetre_reellement_affichee(self):
"""Fenêtre réellement affichée à l'écran.
Dans le cas où le ratio abscisse/ordonnée est fixe (par exemple,
si l'utilisateur impose un repère orthonormé), les valeurs
de xmin, xmax, ymin et ymax définies par l'utilisateur ne
peuvent plus correspondre en général à la fenêtre d'affichage réelle
(qui élargie dans une des 2 dimensions pour respecter cette contrainte).
Ainsi, `Feuille.fenetre` renvoie la fenêtre telle que définie par
l'utilisateur, tandis que `Fenetre.fenetre_reellement_affichee()`
renvoie la fenêtre réellement affichée à l'écran.
"""
fenetre = self.fenetre
rat = self.ratio # x:y -> x/y
# ratio est le rapport "unité en abscisse/unité en ordonnée"
if rat is not None:
w, h = self._dimensions_en_pixels()
coeff0 = rat*(fenetre[1] - fenetre[0])/w
coeff1 = (fenetre[3] - fenetre[2])/h
xmin, xmax, ymin, ymax = fenetre
xcoeff = (coeff1/coeff0 if coeff0 < coeff1 else 1)
ycoeff = (1 if coeff0 < coeff1 else coeff0/coeff1)
x, y, rx, ry = (xmin+xmax)/2., (ymin+ymax)/2., (xmax-xmin)/2., (ymax-ymin)/2.
return x - xcoeff*rx, x + xcoeff*rx, y - ycoeff*ry, y + ycoeff*ry
return fenetre
@property2
def canvas(self, val = None):
if val is None:
if self.__canvas is not None:
return self.__canvas
else:
# canvas = self.classeur.parent.canvas
canvas = getattr(getattr(self.classeur, 'parent', None), 'canvas', None)
if canvas is not None:
self.__canvas = canvas
return canvas
return _pseudocanvas
self.__canvas = val
def coo2pix(self, x, y):
"""Convertit des coordonnées en pixel."""
if isinstance(x, (list, tuple)):
x = array(x)
if isinstance(y, (list, tuple)):
y = array(y)
l, h = self._dimensions_en_pixels()
fenetre = self.fenetre_reellement_affichee()
px = l*(x - fenetre[0])/(fenetre[1] - fenetre[0])
py = h*(fenetre[3] - y)/(fenetre[3] - fenetre[2])
return px, py
def pix2coo(self, px, py):
"""Convertit un pixel en coordonnées."""
if isinstance(px, (list, tuple)):
px = array(px)
if isinstance(py, (list, tuple)):
py = array(py)
l, h = self._dimensions_en_pixels()
fenetre = self.fenetre_reellement_affichee()
x = px*(fenetre[1] - fenetre[0])/l + fenetre[0]
y = py*(fenetre[2] - fenetre[3])/h + fenetre[3]
# print x, y, -x, -y
return x, y
def dcoo2pix(self, dx, dy):
"""Convertit un déplacement exprimé en coordonnées en un déplacement en pixels."""
l, h = self._dimensions_en_pixels()
fenetre = self.fenetre_reellement_affichee()
dpx = l*dx/(fenetre[1] - fenetre[0])
dpy = h*dy/(fenetre[2] - fenetre[3])
return dpx, dpy
def dpix2coo(self, dpx, dpy):
"""Convertit un déplacement exprimé en pixels en un déplacement exprimé en coordonnées."""
l, h = self._dimensions_en_pixels()
fenetre = self.fenetre_reellement_affichee()
dx = dpx*(fenetre[1] - fenetre[0])/l
dy = dpy*(fenetre[2] - fenetre[3])/h
return dx, dy
# TODO: à réécrire
# les paramètres par défaut de geolib doivent être contenus dans geolib lui-même.
def parametres_par_defaut(self, nom):
if getattr(self.classeur, "parent", None) is not None:
return self.classeur.parent.param(nom)
else:
return getattr(param, nom)
@property
def parametres(self):
return self.__dict_repere.copy()
@property
def nom(self):
"Destiné à être affiché."
nom = self.infos("titre")
if self.sauvegarde["nom"]:
nom += ' - ' + self.sauvegarde["nom"]
return nom or "Feuille"
@property
def nom_complet(self):
"Destiné à être affiché en haut de la fenêtre."
nom = self.modifiee and "* " or ""
liste = self.sauvegarde["nom"], self.infos("titre")
nom += " - ".join(s for s in liste if s)
return nom
def objet(self, nom): # retourne l'objet associe au nom "nom"
return self.objets[nom]
#######################################################################################
# Methodes se rapportant a la feuille elle-meme
def exporter(self, backend_name, **options):
from .backends import backends_dict
return backends_dict[backend_name].exporter(self, **options)
def sauvegarder(self, _as_list=False):
"""Renvoie l'ensemble des commandes python qui permettra de recréer
la figure avec tous ses objets.
La figure pourra ensuite être restaurée à l'aide la commande `charger()`.
Si _as_list=True, renvoie une liste de commandes (par défaut, les
lignes de commandes sont concaténées et une chaîne est renvoyée).
:rtype: string/list
"""
# On sauvegarde les paramètres de la feuille.
commandes = [nom + ' = ' + repr(getattr(self, nom))
for nom in self._parametres_repere]
# On ajoute une ligne vide juste pour la lisibilité.
commandes.append('')
# Enfin, on sauvegarde les objets de la feuille.
# On doit enregistrer les objets dans le bon ordre (suivant la _hierarchie).
objets = sorted(self.liste_objets(objets_caches=True,
etiquettes=True), key=attrgetter("_hierarchie_et_nom"))
commandes += [obj.sauvegarder() for obj in objets
if obj._enregistrer_sur_la_feuille]
if _as_list:
return commandes
return '\n'.join(commandes)
def effacer(self):
self.objets.clear()
self.affichage_perime()
def charger(self, commandes, rafraichir = True, archiver = True,
mode_tolerant = False):
"""Exécute un ensemble de commandes dans la feuille.
Usage:
f = Feuille()
...
commandes = f.sauvegarder()
f.effacer()
f.charger(commandes)
"""
with self.canvas.geler_affichage(actualiser=rafraichir, sablier=rafraichir):
with ModeTolerant(self, mode_tolerant):
try:
exec(commandes, self.objets)
except:
print("Liste des commandes:")
print("--------------------")
print(commandes)
print("--------------------")
try:
print_error()
except:
print("Affichage de l'erreur impossible !")
self.erreur("Chargement incomplet de la feuille.")
finally:
for action in self._actions:
action()
if archiver:
self.historique.archiver()
def executer(self, commande, parser = True):
return self.interprete.executer(commande, parser = parser)
def redefinir(self, objet, valeur):
nom = objet.nom
# on récupère la liste des arguments, et on la formate...
args = valeur.strip()[valeur.find("("):-1] + ",)"
# ...de manière à obtenir un objet 'tuple' en l'évaluant.
args = eval(args, self.objets) # utiliser evalsafe à la place ?
heritiers = objet._heritiers()
heritiers.add(objet)
for arg in args:
if isinstance(arg, Objet):
for heritier in heritiers:
if arg is heritier:
self.erreur("Définition circulaire dans %s : \
l'objet %s se retrouve dépendre de lui-même."
%(valeur, nom))
#raise RuntimeError, "Definition circulaire dans %s : l'objet %s se retrouve dependre de lui-meme." %(valeur, nom)
commandes = self.sauvegarder(_as_list=True)
backup = '\n'.join(commandes)
# Utiliser '.copier_style()' et non '.style()' car le type de l'objet
# a pu changer, auquel cas il ne faut copier que les styles qui ont
# du sens pour le nouveau type d'objet.
valeur += "\n%s.copier_style(%s)" % (nom, repr(objet))
old_save = objet.sauvegarder()
etiquette = objet.etiquette
if etiquette is not None:
s = etiquette.sauvegarder()
commandes.remove(s)
# On déplace la commande concernant l'étiquette en dernière position, dans le doute.
commandes.append(s)
i = commandes.index(old_save)
# On remplace l'ancienne définition de l'objet par la nouvelle
commandes[i] = "\n%s=%s\n" % (nom, valeur)
# Il se peut que la nouvelle définition rajoute des dépendances
# à l'objet et oblige à le définir plus tard dans la feuille.
# L'idée est de repousser la définition de l'objet de plus en
# plus loin dans la feuille jusqu'à ce qu'il n'y ait plus d'erreur.
# Cette technique est loin d'être optimale, mais elle a
# l'avantage d'être simple et robuste.
while i < len(commandes):
try:
if param.debug:
print('\n'.join(commandes))
self.historique.restaurer('\n'.join(commandes))
break
except Exception:
lignes.insert(i + 1, lignes.pop(i))
i += 1
else:
self.historique.restaurer(backup)
self.erreur("Erreur lors de la redéfinition de %s." %nom)
def inventaire(self):
objets = self.liste_objets(True)
if param.debug:
for obj in objets:
print("- " + obj.nom + " : " + repr(obj) + " (" + obj.type() + ")")
liste = ["%s (%s%s)" % (obj.nom_complet, obj.titre(point_final=False),
('' if obj.style("visible") else " invisible")) for obj in objets
if not isinstance(obj, (Variable_affichage, Pixel_unite))]
liste.sort()
return liste
def nettoyer(self):
"""Supprime les objets cachés inutiles.
Un objet caché est inutile si aucun objet visible ne dépend de lui.
Les textes vides sont également supprimés."""
objets = [obj for obj in self.liste_objets(True) if not obj.visible
or isinstance(obj, Texte_generique) and not obj.texte]
objets.sort(key=attrgetter("_hierarchie"), reverse=True)
for obj in objets:
if obj.nom not in self.objets._suppression_impossible:
if not any(self.contient_objet(heritier) for heritier in obj._heritiers()):
obj.supprimer()
def effacer_codage(self):
"Efface tous les codages sur les segments, angles et arcs de cercles."
for obj in self.liste_objets(True):
if obj.style("codage") is not None:
obj.style(codage = "")
def coder(self):
"Codage automatique de la figure (longueurs égales, angles égaux, et angles droits)."
def test(groupe, liste, i):
if len(groupe) == 1:
groupe[0]["objet"].style(codage = "")
return False
else:
try:
for elt in groupe:
elt["objet"].style(codage = liste[i])
return True
except IndexError:
self.message("Le nombre de codages disponibles est insuffisant.")
print_error("Le nombre de codages disponibles est insuffisant.")
objets = self.objets.lister(False, type = (Segment, Arc_generique))
lignes = [{"longueur": obj._longueur(), "objet": obj} for obj in objets]
if lignes:
lignes.sort() # attention, le classement d'un dictionnaire se fait selon l'ordre alphabétique des clefs
groupe = [lignes[0]]
i = 1
for ligne in lignes[1:]:
if abs(groupe[-1]["longueur"] - ligne["longueur"]) < contexte['tolerance']:
groupe.append(ligne)
else:
resultat = test(groupe, param.codage_des_lignes, i)
if resultat is None:
break
if resultat:
i += 1
groupe = [ligne]
test(groupe, param.codage_des_lignes, i)
objets = self.objets.lister(False, type = Secteur_angulaire)
angles = [{"angle": obj.val, "objet": obj} for obj in objets]
if angles:
angles.sort() # attention, le classement d'un dictionnaire se fait selon l'ordre alphabétique des clefs
groupe = [angles[0]]
i = 2
for angle in angles[1:]:
if abs(groupe[-1]["angle"] - angle["angle"]) < contexte['tolerance']:
groupe.append(angle)
else:
# print abs(abs(groupe[-1]["angle"]) - pi/2)
if abs(abs(groupe[-1]["angle"]) - PI/2) < contexte['tolerance']:
for elt in groupe:
elt["objet"].style(codage = "^")
else:
resultat = test(groupe, param.codage_des_angles, i)
if resultat is None:
break
if resultat:
i += 1
groupe = [angle]
if abs(abs(groupe[-1]["angle"]) - PI/2) < contexte['tolerance']:
for elt in groupe:
elt["objet"].style(codage = "^")
else:
test(groupe, param.codage_des_angles, i)
self.affichage_perime()
def objet_temporaire(self, objet = False):
if objet is not False:
if self._objets_temporaires:
self.affichage_perime()
if objet is None:
self._objets_temporaires = []
else:
objet.feuille = self
self._objets_temporaires = [objet]
return self._objets_temporaires
def contient_objet(self, objet):
"""contient_objet(self, objet) -> bool
Teste rapidement si l'objet est répertorié dans la feuille.
(Ne cherche pas parmi les objets temporaires.)"""
return is_in(objet, list(self.objets.values()))
def contient_objet_temporaire(self, objet):
"""contient_objet_temporaire(self, objet) -> bool
Teste rapidement si l'objet est répertorié comme objet temporaire dans la feuille."""
for obj in self._objets_temporaires:
if obj is objet:
return True
return False
def point_temporaire(self):
if self.__point_temporaire__ is None:
self.__point_temporaire__ = Point()
self.__point_temporaire__.feuille = self
return self.__point_temporaire__
def start(self):
"Autorise le lancement d'animations."
self._stop = False
def stop(self):
"Arrête toutes les animations en cours."
self._stop = True
def animer(self, nom, debut = 0, fin = 1, pas = 0.02, periode = 0.03):
"""Anime la variable nommée `nom`.
:param nom: nom de la variable dont on souhaite faire varier la valeur.
:param debut: valeur initiale de la variable.
:param fin: valeur finale de la variable.
:param pas: de combien on incrémente la variable à chaque étape.
:param periode: durée (en secondes) entre 2 incrémentations.
`nom` peut aussi être une expression correspondant à une variable::
>>> from wxgeometrie.geolib import Point, Feuille
>>> f = Feuille()
>>> f.objets.A = A = Point()
>>> f.animer("A.x", 0, 5, .1)
"""
eval(nom, self.objets).varier(debut, fin, pas, periode)
#######################################################################################
def message(self, messg): # A REECRIRE
if contexte['afficher_messages'] and param.verbose:
messg = 'Feuille "%s" - %s' %(self.nom, messg)
self.canvas.message(messg)
print(messg)
if self.log is not None:
self.log.append(messg)
def erreur(self, message, erreur=None):
self.message("Erreur : " + message)
if erreur is None:
erreur = RuntimeError
raise erreur(message)
def save_log(self, log):
# Impérativement utiliser 'is not None' car le log peut être vide.
if self.log is not None:
self.log.append(log)
#######################################################################################
# Gestion de l'affichage
# cf. API/affichage.py
def lister_figures(self):
"""Renvoie deux listes de figures (artistes matplotlib).
La seconde est celle des figures qui vont bouger avec l'objet deplacé ;
et la première, des autres qui vont rester immobiles.
S'il n'y a pas d'objet en cours de déplacement, la deuxième liste est vide.
"""
objet_deplace = self._objet_deplace
##if isinstance(objet_deplace, Label_generique):
##objet_deplace = objet_deplace.parent
# TODO: pouvoir rafraichir uniquement l'étiquette ?
## # Rafraichit les figures s'il y a besoin:
## if self._mettre_a_jour_figures:
## self._rafraichir_figures()
## self._mettre_a_jour_figures = False
# On liste tous les objets qui vont bouger avec 'objet_deplace':
if objet_deplace is None:
heritiers = []
else:
heritiers = objet_deplace._heritiers()
heritiers.add(objet_deplace)
# objets non susceptibles d'être modifiés (sauf changement de fenêtre, etc.)
liste1 = []
# objets susceptibles d'être modifiés
liste2 = []
for objet in self.liste_objets(etiquettes=True):
liste = liste2 if is_in(objet, heritiers) else liste1
liste.extend(objet.figure)
liste.extend(objet._trace)
for objet in self._objets_temporaires:
liste2.extend(objet.figure)
liste2.extend(objet._trace)
if objet.etiquette:
liste2.extend(objet.etiquette.figure)
liste2.extend(objet.etiquette._trace)
return liste1, liste2
def effacer_traces(self):
"Efface toutes les traces (sans enlever le mode trace des objets)."
for objet in self.liste_objets():
objet.effacer_trace()
self.affichage_perime()
def met_objets_en_gras(self, *objets):
"""Met en gras les objets indiqués, et remet les autres objets en état "normal" le cas échéant."""
changements = False
for objet in self.liste_objets(True):
if is_in(objet, objets):
val = objet.en_gras(True)
else:
val = objet.en_gras(False)
if val is not None:
changements = True
if changements:
self.affichage_perime()
#########################################################################################
#########################################################################################
# Gestion du zoom et des coordonnees, reglage de la fenetre d'affichage.
# cf. API/affichage.py
#########################################################################################
# Diverses fonctionnalites de la feuille, utilisees par les objets.
# C'est un peu la boite a outils :-)
def nom_aleatoire(self, objet, prefixe=None):
"""Génère un nom d'objet non encore utilisé.
Si possible, le nom sera de la forme 'prefixe' + chiffres.
Sinon, un préfixe aléatoire est généré."""
prefixe = (prefixe if prefixe else objet._prefixe_nom)
existants = self.objets.noms
for i in range(1000):
n = len(prefixe)
numeros = [int(s[n:]) for s in existants if re.match(prefixe + "[0-9]+$", s)]
nom = prefixe + (str(max(numeros) + 1) if numeros else '1')
nom = self.objets._Dictionnaire_objets__verifier_syntaxe_nom(objet, nom, skip_err=True)
if nom is not None:
return nom
prefixe = ''.join(choice(ascii_letters) for i in range(8))
raise RuntimeError("Impossible de trouver un nom convenable apres 1000 essais !")
def pause(self):
Objet.souffler()
if self._stop:
raise RuntimeError("Interruption de la macro.")
| gpl-2.0 |
QJonny/CyNest | pynest/examples/mc_neuron.py | 2 | 5112 | # -*- coding: utf-8 -*-
#
# mc_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Simple example of how to use the three-compartment
iaf_cond_alpha_mc model neuron.
Three stimulation paradigms are illustrated:
- externally applied current, one compartment at a time
- spikes impinging on each compartment, one at a time
- rheobase current injected to soma causing output spikes
Voltage and synaptic conductance traces are shown for all compartments.
'''
import nest
import matplotlib
import pylab as pl
nest.ResetKernel()
# Obtain receptor dictionary
syns = nest.GetDefaults('iaf_cond_alpha_mc')['receptor_types']
print "iaf_cond_alpha_mc receptor_types: ", syns
# Obtain list of recordable quantities
rqs = nest.GetDefaults('iaf_cond_alpha_mc')['recordables']
print "iaf_cond_alpha_mc recordables : ", rqs
# Change some default values:
# - threshold potential
# - reset potential
# - refractory period
# - somato-proximal coupling conductance
# - somatic leak conductance
# - proximal synaptic time constants
# - distal capacitance
nest.SetDefaults('iaf_cond_alpha_mc',
{ 'V_th' : -60.0,
'V_reset': -65.0,
't_ref': 10.0,
'g_sp' : 5.0,
'soma' : { 'g_L': 12.0 },
'proximal': { 'tau_syn_ex': 1.0,
'tau_syn_in': 5.0 },
'distal' : { 'C_m': 90.0 }
})
# Create neuron
n = nest.Create('iaf_cond_alpha_mc')
# Create multimeter recording everything, connect
mm = nest.Create('multimeter',
params = {'record_from': rqs,
'interval': 0.1})
nest.Connect(mm, n)
# Create one current generator per compartment and configure
# stimulus regime that drives distal, proximal and soma, in that order
cgs = nest.Create('dc_generator', 3)
nest.SetStatus(cgs,
[{'start': 250.0, 'stop': 300.0, 'amplitude': 50.0}, # soma
{'start': 150.0, 'stop': 200.0, 'amplitude': -50.0}, # proximal
{'start': 50.0, 'stop': 100.0, 'amplitude': 100.0}]) # distal
# Connect generators to correct compartments
nest.Connect([cgs[0]], n, params = {'receptor_type': syns['soma_curr']})
nest.Connect([cgs[1]], n, params = {'receptor_type': syns['proximal_curr']})
nest.Connect([cgs[2]], n, params = {'receptor_type': syns['distal_curr']})
# Create one excitatory and one inhibitory spike generator per compartment,
# configure regime that drives distal, proximal and soma, in that order,
# excitation and inhibition alternating
sgs = nest.Create('spike_generator', 6)
nest.SetStatus(sgs,
[{'spike_times': [600.0, 620.0]}, # soma excitatory
{'spike_times': [610.0, 630.0]}, # soma inhibitory
{'spike_times': [500.0, 520.0]}, # proximal excitatory
{'spike_times': [510.0, 530.0]}, # proximal inhibitory
{'spike_times': [400.0, 420.0]}, # distal excitatory
{'spike_times': [410.0, 430.0]}]) # distal inhibitory
# Connect generators to correct compartments
nest.Connect([sgs[0]], n, params = {'receptor_type': syns['soma_exc']})
nest.Connect([sgs[1]], n, params = {'receptor_type': syns['soma_inh']})
nest.Connect([sgs[2]], n, params = {'receptor_type': syns['proximal_exc']})
nest.Connect([sgs[3]], n, params = {'receptor_type': syns['proximal_inh']})
nest.Connect([sgs[4]], n, params = {'receptor_type': syns['distal_exc']})
nest.Connect([sgs[5]], n, params = {'receptor_type': syns['distal_inh']})
# Simulate
nest.Simulate(700)
# Now turn on intrinsic current in soma to make neuron spike
nest.SetStatus(n, {'soma': {'I_e': 150.0}})
nest.Simulate(300)
# Retrieve data
rec = nest.GetStatus(mm)[0]['events']
t = rec['times']
# Plot potential traces
pl.figure()
pl.subplot(211)
pl.plot(t, rec['V_m.s'], t, rec['V_m.p'], t, rec['V_m.d'])
pl.legend(('Soma', 'Proximal dendrite', 'Distal dendrite'),loc='lower right')
pl.axis([0, 1000, -76, -59])
pl.ylabel('Membrane potential [mV]')
pl.title('Responses of iaf_cond_alpha_mc neuron')
# Plot conductance traces
pl.subplot(212)
pl.plot(t, rec['g_ex.s'], 'b-', t, rec['g_ex.p'], 'g-', t, rec['g_ex.d'], 'r-')
pl.plot(t, rec['g_in.s'], 'b--', t, rec['g_in.p'], 'g--', t, rec['g_in.d'], 'r--')
pl.legend(('g_ex.s', 'g_ex.p', 'g_in.d','g_in.s', 'g_in.p', 'g_in.d'))
pl.axis([350, 700, 0, 1.15])
pl.xlabel('Time [ms]')
pl.ylabel('Synaptic conductance [nS]')
| gpl-2.0 |
dubvulture/pyku | pyku/test/test_groundtruth.py | 1 | 2794 | # coding=utf-8
import logging
import os
import sys
import numpy as np
import pyku
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
def read_groundtruth():
ret = []
with open(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'groundtruth.txt'), 'rb') as lines:
for line in lines:
ret.append(line[:-2])
return np.array(ret)
def test_groundtruth(standard=True):
# Read all pictures
folder = pyku.utils.FOLDER
pics = sorted([os.path.join(folder, pic)
for pic in os.listdir(folder)
if os.path.isfile(os.path.join(folder, pic))])
# Read groundtruth
groundtruth = read_groundtruth()
if standard:
# Standard raw pixel data
model = pyku.DigitClassifier()
else:
# Zoning data
pyku.utils.DSIZE = 28.
model = pyku.DigitClassifier(
saved_model=pyku.utils.TRAIN_DATA + 'zoning_data.npz',
feature=pyku.DigitClassifier._zoning)
preds = []
# How many images
n = 44
for i in range(n):
pic = pics[i]
im = pyku.Sudoku(pic, classifier=model)
preds.append(im.extract(label_tries=3, debug=True))
preds = np.array(preds)
res = np.equal(groundtruth[:n], preds)
correct = np.size(res[res])
nogrid = np.size(preds[np.equal(preds, None)])
logging.info('Correct: %d', correct)
logging.info('No grid: %d', nogrid)
logging.info('Wrong digits: %d', (n - correct) - nogrid)
w_pos = 0
y_true = []
y_pred = []
for i in range(n):
pred = preds[i]
gt = groundtruth[i]
if pred is not None:
for j in range(81):
a = 0 if pred[j] == ' ' else int(pred[j])
b = 0 if gt[j] == ' ' else int(gt[j])
# Wrong position or noise
if (a == 0 and b != 0) or (a != 0 and b == 0):
w_pos += 1
elif a!=0 and b!=0:
y_pred.append(a)
y_true.append(b)
logging.info('Wrong positions: %d', w_pos)
y_true = np.array(y_true)
y_pred = np.array(y_pred)
try:
from sklearn.metrics import precision_score, recall_score, accuracy_score
from sklearn.metrics import confusion_matrix
logging.info(confusion_matrix(y_true, y_pred))
recall = recall_score(y_true, y_pred, average='macro')
precision = precision_score(y_true, y_pred, average='macro')
accuracy = accuracy_score(y_true, y_pred)
logging.info('Recall: %f', recall)
logging.info('Precision: %f', precision)
logging.info('Accuracy: %f', accuracy)
except:
pass
if __name__ == "__main__":
test_groundtruth()
| gpl-3.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/_pylab_helpers.py | 8 | 4008 | """
Manage figures for pyplot interface.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import sys
import gc
import atexit
def error_msg(msg):
print(msg, file=sys.stderr)
class Gcf(object):
"""
Singleton to manage a set of integer-numbered figures.
This class is never instantiated; it consists of two class
attributes (a list and a dictionary), and a set of static
methods that operate on those attributes, accessing them
directly as class attributes.
Attributes:
*figs*:
dictionary of the form {*num*: *manager*, ...}
*_activeQue*:
list of *managers*, with active one at the end
"""
_activeQue = []
figs = {}
@classmethod
def get_fig_manager(cls, num):
"""
If figure manager *num* exists, make it the active
figure and return the manager; otherwise return *None*.
"""
manager = cls.figs.get(num, None)
if manager is not None:
cls.set_active(manager)
return manager
@classmethod
def destroy(cls, num):
"""
Try to remove all traces of figure *num*.
In the interactive backends, this is bound to the
window "destroy" and "delete" events.
"""
if not cls.has_fignum(num):
return
manager = cls.figs[num]
manager.canvas.mpl_disconnect(manager._cidgcf)
# There must be a good reason for the following careful
# rebuilding of the activeQue; what is it?
oldQue = cls._activeQue[:]
cls._activeQue = []
for f in oldQue:
if f != manager:
cls._activeQue.append(f)
del cls.figs[num]
manager.destroy()
gc.collect(1)
@classmethod
def destroy_fig(cls, fig):
"*fig* is a Figure instance"
num = None
for manager in six.itervalues(cls.figs):
if manager.canvas.figure == fig:
num = manager.num
break
if num is not None:
cls.destroy(num)
@classmethod
def destroy_all(cls):
# this is need to ensure that gc is available in corner cases
# where modules are being torn down after install with easy_install
import gc # noqa
for manager in list(cls.figs.values()):
manager.canvas.mpl_disconnect(manager._cidgcf)
manager.destroy()
cls._activeQue = []
cls.figs.clear()
gc.collect(1)
@classmethod
def has_fignum(cls, num):
"""
Return *True* if figure *num* exists.
"""
return num in cls.figs
@classmethod
def get_all_fig_managers(cls):
"""
Return a list of figure managers.
"""
return list(cls.figs.values())
@classmethod
def get_num_fig_managers(cls):
"""
Return the number of figures being managed.
"""
return len(cls.figs)
@classmethod
def get_active(cls):
"""
Return the manager of the active figure, or *None*.
"""
if len(cls._activeQue) == 0:
return None
else:
return cls._activeQue[-1]
@classmethod
def set_active(cls, manager):
"""
Make the figure corresponding to *manager* the active one.
"""
oldQue = cls._activeQue[:]
cls._activeQue = []
for m in oldQue:
if m != manager:
cls._activeQue.append(m)
cls._activeQue.append(manager)
cls.figs[manager.num] = manager
@classmethod
def draw_all(cls, force=False):
"""
Redraw all figures registered with the pyplot
state machine.
"""
for f_mgr in cls.get_all_fig_managers():
if force or f_mgr.canvas.figure.stale:
f_mgr.canvas.draw_idle()
atexit.register(Gcf.destroy_all)
| mit |
cython-testbed/pandas | pandas/core/arrays/period.py | 1 | 19598 | # -*- coding: utf-8 -*-
from datetime import timedelta
import warnings
import numpy as np
from pandas._libs import lib
from pandas._libs.tslib import NaT, iNaT
from pandas._libs.tslibs.period import (
Period, IncompatibleFrequency, DIFFERENT_FREQ_INDEX,
get_period_field_arr, period_asfreq_arr)
from pandas._libs.tslibs import period as libperiod
from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds, Timedelta
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas import compat
from pandas.util._decorators import (cache_readonly, deprecate_kwarg)
from pandas.core.dtypes.common import (
is_integer_dtype, is_float_dtype, is_period_dtype,
is_datetime64_dtype)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import ABCSeries
import pandas.core.common as com
from pandas.tseries import frequencies
from pandas.tseries.offsets import Tick, DateOffset
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = frequencies.get_freq_code(self.freq)
result = get_period_field_arr(alias, self._ndarray_values, base)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _period_array_cmp(cls, op):
"""
Wrap comparison operations to convert Period-like to PeriodDtype
"""
opname = '__{name}__'.format(name=op.__name__)
nat_result = True if opname == '__ne__' else False
def wrapper(self, other):
op = getattr(self._ndarray_values, opname)
if isinstance(other, Period):
if other.freq != self.freq:
msg = DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = op(other.ordinal)
elif isinstance(other, PeriodArrayMixin):
if other.freq != self.freq:
msg = DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = op(other._ndarray_values)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is NaT:
result = np.empty(len(self._ndarray_values), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
result = op(other.ordinal)
if self.hasnans:
result[self._isnan] = nat_result
return result
return compat.set_function_name(wrapper, opname, cls)
class PeriodArrayMixin(DatetimeLikeArrayMixin):
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
@cache_readonly
def dtype(self):
return PeriodDtype.construct_from_string(self.freq)
@property
def _ndarray_values(self):
# Ordinals
return self._data
@property
def asi8(self):
return self._ndarray_values.view('i8')
@property
def freq(self):
"""Return the frequency object if it is set, otherwise None"""
return self._freq
@freq.setter
def freq(self, value):
msg = ('Setting {cls}.freq has been deprecated and will be '
'removed in a future version; use {cls}.asfreq instead. '
'The {cls}.freq setter is not guaranteed to work.')
warnings.warn(msg.format(cls=type(self).__name__),
FutureWarning, stacklevel=2)
self._freq = value
# --------------------------------------------------------------------
# Constructors
_attributes = ["freq"]
def __new__(cls, values, freq=None, **kwargs):
if is_period_dtype(values):
# PeriodArray, PeriodIndex
if freq is not None and values.freq != freq:
raise IncompatibleFrequency(freq, values.freq)
freq = values.freq
values = values.asi8
elif is_datetime64_dtype(values):
# TODO: what if it has tz?
values = dt64arr_to_periodarr(values, freq)
return cls._simple_new(values, freq=freq, **kwargs)
@classmethod
def _simple_new(cls, values, freq=None, **kwargs):
"""
Values can be any type that can be coerced to Periods.
Ordinals in an ndarray are fastpath-ed to `_from_ordinals`
"""
if is_period_dtype(values):
freq = dtl.validate_dtype_freq(values.dtype, freq)
values = values.asi8
if not is_integer_dtype(values):
values = np.array(values, copy=False)
if len(values) > 0 and is_float_dtype(values):
raise TypeError("{cls} can't take floats"
.format(cls=cls.__name__))
return cls(values, freq=freq, **kwargs)
return cls._from_ordinals(values, freq=freq, **kwargs)
@classmethod
def _from_ordinals(cls, values, freq=None, **kwargs):
"""
Values should be int ordinals
`__new__` & `_simple_new` cooerce to ordinals and call this method
"""
# **kwargs are included so that the signature matches PeriodIndex,
# letting us share _simple_new
values = np.array(values, dtype='int64', copy=False)
result = object.__new__(cls)
result._data = values
if freq is None:
raise ValueError('freq is not specified and cannot be inferred')
result._freq = Period._maybe_convert_freq(freq)
return result
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if com.count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
# --------------------------------------------------------------------
# Vectorized analogues of Period properties
year = _field_accessor('year', 0, "The year of the period")
month = _field_accessor('month', 3, "The month as January=1, December=12")
day = _field_accessor('day', 4, "The days of the period")
hour = _field_accessor('hour', 5, "The hour of the period")
minute = _field_accessor('minute', 6, "The minute of the period")
second = _field_accessor('second', 7, "The second of the period")
weekofyear = _field_accessor('week', 8, "The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 10,
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = day_of_year = _field_accessor('dayofyear', 9,
"The ordinal day of the year")
quarter = _field_accessor('quarter', 2, "The quarter of the date")
qyear = _field_accessor('qyear', 1)
days_in_month = _field_accessor('days_in_month', 11,
"The number of days in the month")
daysinmonth = days_in_month
@property
def is_leap_year(self):
""" Logical indicating if the date belongs to a leap year """
return isleapyear_arr(np.asarray(self.year))
@property
def start_time(self):
return self.to_timestamp(how='start')
@property
def end_time(self):
return self.to_timestamp(how='end')
def asfreq(self, freq=None, how='E'):
"""
Convert the Period Array/Index to the specified frequency `freq`.
Parameters
----------
freq : str
a frequency
how : str {'E', 'S'}
'E', 'END', or 'FINISH' for end,
'S', 'START', or 'BEGIN' for start.
Whether the elements should be aligned to the end
or start within pa period. January 31st ('END') vs.
January 1st ('START') for example.
Returns
-------
new : Period Array/Index with the new frequency
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
<class 'pandas.core.indexes.period.PeriodIndex'>
[2010, ..., 2015]
Length: 6, Freq: A-DEC
>>> pidx.asfreq('M')
<class 'pandas.core.indexes.period.PeriodIndex'>
[2010-12, ..., 2015-12]
Length: 6, Freq: M
>>> pidx.asfreq('M', how='S')
<class 'pandas.core.indexes.period.PeriodIndex'>
[2010-01, ..., 2015-01]
Length: 6, Freq: M
"""
how = libperiod._validate_end_alias(how)
freq = Period._maybe_convert_freq(freq)
base1, mult1 = frequencies.get_freq_code(self.freq)
base2, mult2 = frequencies.get_freq_code(freq)
asi8 = self.asi8
# mult1 can't be negative or 0
end = how == 'E'
if end:
ordinal = asi8 + mult1 - 1
else:
ordinal = asi8
new_data = period_asfreq_arr(ordinal, base1, base2, end)
if self.hasnans:
new_data[self._isnan] = iNaT
return self._shallow_copy(new_data, freq=freq)
def to_timestamp(self, freq=None, how='start'):
"""
Cast to DatetimeArray/Index
Parameters
----------
freq : string or DateOffset, optional
Target frequency. The default is 'D' for week or longer,
'S' otherwise
how : {'s', 'e', 'start', 'end'}
Returns
-------
DatetimeArray/Index
"""
from pandas.core.arrays.datetimes import DatetimeArrayMixin
how = libperiod._validate_end_alias(how)
end = how == 'E'
if end:
if freq == 'B':
# roll forward to ensure we land on B date
adjust = Timedelta(1, 'D') - Timedelta(1, 'ns')
return self.to_timestamp(how='start') + adjust
else:
adjust = Timedelta(1, 'ns')
return (self + 1).to_timestamp(how='start') - adjust
if freq is None:
base, mult = frequencies.get_freq_code(self.freq)
freq = frequencies.get_to_timestamp_base(base)
else:
freq = Period._maybe_convert_freq(freq)
base, mult = frequencies.get_freq_code(freq)
new_data = self.asfreq(freq, how=how)
new_data = libperiod.periodarr_to_dt64arr(new_data._ndarray_values,
base)
return DatetimeArrayMixin(new_data, freq='infer')
# ------------------------------------------------------------------
# Arithmetic Methods
_create_comparison_method = classmethod(_period_array_cmp)
def _sub_datelike(self, other):
assert other is not NaT
return NotImplemented
def _sub_period(self, other):
# If the operation is well-defined, we return an object-Index
# of DateOffsets. Null entries are filled with pd.NaT
if self.freq != other.freq:
msg = DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
asi8 = self.asi8
new_data = asi8 - other.ordinal
new_data = np.array([self.freq * x for x in new_data])
if self.hasnans:
new_data[self._isnan] = NaT
return new_data
def _add_offset(self, other):
assert not isinstance(other, Tick)
base = frequencies.get_base_alias(other.rule_code)
if base != self.freq.rule_code:
msg = DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
return self._time_shift(other.n)
def _add_delta_td(self, other):
assert isinstance(other, (timedelta, np.timedelta64, Tick))
nanos = delta_to_nanoseconds(other)
own_offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(own_offset, Tick):
offset_nanos = delta_to_nanoseconds(own_offset)
if np.all(nanos % offset_nanos == 0):
return self._time_shift(nanos // offset_nanos)
# raise when input doesn't have freq
raise IncompatibleFrequency("Input has different freq from "
"{cls}(freq={freqstr})"
.format(cls=type(self).__name__,
freqstr=self.freqstr))
def _add_delta(self, other):
ordinal_delta = self._maybe_convert_timedelta(other)
return self._time_shift(ordinal_delta)
@deprecate_kwarg(old_arg_name='n', new_arg_name='periods')
def shift(self, periods):
"""
Shift index by desired number of increments.
This method is for shifting the values of period indexes
by a specified time increment.
Parameters
----------
periods : int
Number of periods (or increments) to shift by,
can be positive or negative.
.. versionchanged:: 0.24.0
Returns
-------
pandas.PeriodIndex
Shifted index.
See Also
--------
DatetimeIndex.shift : Shift values of DatetimeIndex.
"""
return self._time_shift(periods)
def _time_shift(self, n):
values = self._ndarray_values + n * self.freq.n
if self.hasnans:
values[self._isnan] = iNaT
return self._shallow_copy(values=values)
def _maybe_convert_timedelta(self, other):
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(
other, (timedelta, np.timedelta64, Tick, np.ndarray)):
offset = frequencies.to_offset(self.freq.rule_code)
if isinstance(offset, Tick):
if isinstance(other, np.ndarray):
nanos = np.vectorize(delta_to_nanoseconds)(other)
else:
nanos = delta_to_nanoseconds(other)
offset_nanos = delta_to_nanoseconds(offset)
check = np.all(nanos % offset_nanos == 0)
if check:
return nanos // offset_nanos
elif isinstance(other, DateOffset):
freqstr = other.rule_code
base = frequencies.get_base_alias(freqstr)
if base == self.freq.rule_code:
return other.n
msg = DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
elif lib.is_integer(other):
# integer is passed to .shift via
# _add_datetimelike_methods basically
# but ufunc may pass integer to _add_delta
return other
# raise when input doesn't have freq
msg = "Input has different freq from {cls}(freq={freqstr})"
raise IncompatibleFrequency(msg.format(cls=type(self).__name__,
freqstr=self.freqstr))
PeriodArrayMixin._add_comparison_ops()
PeriodArrayMixin._add_datetimelike_methods()
# -------------------------------------------------------------------
# Constructor Helpers
def dt64arr_to_periodarr(data, freq, tz=None):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
freq = Period._maybe_convert_freq(freq)
base, mult = frequencies.get_freq_code(freq)
return libperiod.dt64arr_to_periodarr(data.view('i8'), base, tz)
def _get_ordinal_range(start, end, periods, freq, mult=1):
if com.count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
if freq is not None:
_, mult = frequencies.get_freq_code(freq)
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError('start and end must have same freq')
if (start is NaT or end is NaT):
raise ValueError('start and end must not be NaT')
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError('Could not infer freq from start/end')
if periods is not None:
periods = periods * mult
if start is None:
data = np.arange(end.ordinal - periods + mult,
end.ordinal + 1, mult,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods, mult,
dtype=np.int64)
else:
data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)
return data, freq
def _range_from_fields(year=None, month=None, quarter=None, day=None,
hour=None, minute=None, second=None, freq=None):
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = 'Q'
base = frequencies.FreqGroup.FR_QTR
else:
base, mult = frequencies.get_freq_code(freq)
if base != frequencies.FreqGroup.FR_QTR:
raise AssertionError("base must equal FR_QTR")
year, quarter = _make_field_arrays(year, quarter)
for y, q in compat.zip(year, quarter):
y, m = libperiod.quarter_to_myear(y, q, freq)
val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
base, mult = frequencies.get_freq_code(freq)
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in compat.zip(*arrays):
ordinals.append(libperiod.period_ordinal(
y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields):
length = None
for x in fields:
if isinstance(x, (list, np.ndarray, ABCSeries)):
if length is not None and len(x) != length:
raise ValueError('Mismatched Period array lengths')
elif length is None:
length = len(x)
arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries))
else np.repeat(x, length) for x in fields]
return arrays
| bsd-3-clause |
Myasuka/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
pprett/statsmodels | docs/source/conf.py | 1 | 10296 | # -*- coding: utf-8 -*-
#
# statsmodels documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 22 11:17:58 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.pngmath', 'sphinx.ext.viewcode', 'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives',
'ipython_console_highlighting',
'ipython_directive',
'numpy_ext.numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'statsmodels'
copyright = u'2009-2011,Josef Perktold, Skipper Seabold, Jonathan Taylor, statsmodels-developers'
autosummary_generate = True
autoclass_content = 'class'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from statsmodels.version import version, full_version
release = version
# The full version, including dev tag.
version = full_version
# set inheritance_graph_attrs
# you need graphviz installed to use this
# see: http://sphinx.pocoo.org/ext/inheritance.html
# and graphviz dot documentation http://www.graphviz.org/content/attrs
#NOTE: giving the empty string to size allows graphviz to figure out
# the size
inheritance_graph_attrs = dict(size='""', ratio="compress", fontsize=14,
rankdir="LR")
#inheritance_node_attrs = dict(shape='ellipse', fontsize=14, height=0.75,
# color='dodgerblue1', style='filled')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['*/autosummary/class.rst', '*/autosummary/glmfamilies.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
if 'htmlhelp' in sys.argv:
#html_theme = 'statsmodels_htmlhelp' #doesn't look nice yet
html_theme = 'default'
print '################# using statsmodels_htmlhelp ############'
else:
html_theme = 'statsmodels'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'images/statsmodels_hybi_banner.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/statsmodels_hybi_favico.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index' : ['indexsidebar.html','searchbox.html','sidelinks.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'statsmodelsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'statsmodels.tex', u'statsmodels Documentation',
u'Josef Perktold, Skipper Seabold', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'statsmodels', u'statsmodels Documentation',
[u'Josef Perktold, Skipper Seabold, Jonathan Taylor'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'statsmodels'
epub_author = u'Josef Perktold, Skipper Seabold'
epub_publisher = u'Josef Perktold, Skipper Seabold'
epub_copyright = u'2009-2011, Josef Perktold, Skipper Seabold, Jonathan Taylor, statsmodels-developers'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'numpy' : ('http://docs.scipy.org/doc/numpy/', None),
'python' : ('http://docs.python.org/3.2', None),
'pydagogue' : ('http://matthew-brett.github.com/pydagogue/', None)}
from os.path import dirname, abspath, join
plot_basedir = join(dirname(dirname(os.path.abspath(__file__))), 'source')
| bsd-3-clause |
adammenges/statsmodels | statsmodels/examples/ex_multivar_kde.py | 34 | 1504 |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import statsmodels.api as sm
"""
This example illustrates the nonparametric estimation of a
bivariate bi-modal distribution that is a mixture of two normal
distributions.
author: George Panterov
"""
if __name__ == '__main__':
np.random.seed(123456)
# generate the data
nobs = 500
BW = 'cv_ml'
mu1 = [3, 4]
mu2 = [6, 1]
cov1 = np.asarray([[1, 0.7], [0.7, 1]])
cov2 = np.asarray([[1, -0.7], [-0.7, 1]])
ix = np.random.uniform(size=nobs) > 0.5
V = np.random.multivariate_normal(mu1, cov1, size=nobs)
V[ix, :] = np.random.multivariate_normal(mu2, cov2, size=nobs)[ix, :]
x = V[:, 0]
y = V[:, 1]
dens = sm.nonparametric.KDEMultivariate(data=[x, y], var_type='cc', bw=BW,
defaults=sm.nonparametric.EstimatorSettings(efficient=True))
supportx = np.linspace(min(x), max(x), 60)
supporty = np.linspace(min(y), max(y), 60)
X, Y = np.meshgrid(supportx, supporty)
edat = np.column_stack([X.ravel(), Y.ravel()])
Z = dens.pdf(edat).reshape(X.shape)
# plot
fig = plt.figure(1)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet,
linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.figure(2)
plt.imshow(Z)
plt.show()
| bsd-3-clause |
apeyser/nest-simulator | pynest/examples/sinusoidal_gamma_generator.py | 3 | 11701 | # -*- coding: utf-8 -*-
#
# sinusoidal_gamma_generator.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#
'''
Sinusoidal gamma generator example
----------------------------------
This script demonstrates the use of the `sinusoidal_gamma_generator`
and its different parameters and modes. The source code of the model
can be found in models/sinusoidal_gamma_generator.h.
The script is structured into two parts, each of which generates its
own figure. In part 1A, two generators are created with different
orders of the underlying gamma process and their resulting PST
(Peristiumulus time) and ISI (Inter-spike interval) histograms are
plotted. Part 1B illustrates the effect of the
``individual_spike_trains`` switch. In Part 2, the effects of
different settings for rate, phase and frequency are demonstrated.
'''
'''
First, we import all necessary modules for simulation, analysis and
plotting.
'''
import nest
import matplotlib.pyplot as plt
import numpy as np
nest.ResetKernel() # in case we run the script multiple times from iPython
'''
We first create a figure for the plot and set the resolution of NEST.
'''
plt.figure()
nest.SetKernelStatus({'resolution': 0.01})
'''
Then we create two instances of the `sinusoidal_gamma_generator`
with two different orders of the underlying gamma process using
`Create`. Moreover, we create devices to record firing rates
(`multimeter`) and spikes (`spike_detector`) and connect them to the
generators using `Connect`.
'''
g = nest.Create('sinusoidal_gamma_generator', n=2,
params=[{'rate': 10000.0, 'amplitude': 5000.0,
'frequency': 10.0, 'phase': 0.0, 'order': 2.0},
{'rate': 10000.0, 'amplitude': 5000.0,
'frequency': 10.0, 'phase': 0.0, 'order': 10.0}])
m = nest.Create('multimeter', n=2, params={'interval': 0.1, 'withgid': False,
'record_from': ['rate']})
s = nest.Create('spike_detector', n=2, params={'withgid': False})
nest.Connect(m, g, 'one_to_one')
nest.Connect(g, s, 'one_to_one')
nest.Simulate(200)
'''
After simulating, the spikes are extracted from the
`spike_detector` using `GetStatus` and plots are created with panels
for the PST and ISI histograms.
'''
colors = ['b', 'g']
for j in range(2):
ev = nest.GetStatus([m[j]])[0]['events']
t = ev['times']
r = ev['rate']
sp = nest.GetStatus([s[j]])[0]['events']['times']
plt.subplot(221)
h, e = np.histogram(sp, bins=np.arange(0., 201., 5.))
plt.plot(t, r, color=colors[j])
plt.step(e[:-1], h * 1000 / 5., color=colors[j], where='post')
plt.title('PST histogram and firing rates')
plt.ylabel('Spikes per second')
plt.subplot(223)
plt.hist(np.diff(sp), bins=np.arange(0., 0.505, 0.01),
histtype='step', color=colors[j])
plt.title('ISI histogram')
'''
The kernel is reset and the number of threads set to 4.
'''
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4})
'''
First, a `sinusoidal_gamma_generator` with
`individual_spike_trains` set to ``True`` is created and connected to
20 parrot neurons whose spikes are recorded by a spike detector. After
simulating, a raster plot of the spikes is created.
'''
g = nest.Create('sinusoidal_gamma_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0, 'order': 3.,
'individual_spike_trains': True})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_detector')
nest.Connect(g, p)
nest.Connect(p, s)
nest.Simulate(200)
ev = nest.GetStatus(s)[0]['events']
plt.subplot(222)
plt.plot(ev['times'], ev['senders'] - min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('Individual spike trains for each target')
'''
The kernel is reset again and the whole procedure is repeated for
a `sinusoidal_gamma_generator` with `individual_spike_trains` set to ``False``.
The plot shows that in this case, all neurons receive the same spike train from
the `sinusoidal_gamma_generator`.
'''
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4})
g = nest.Create('sinusoidal_gamma_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0, 'order': 3.,
'individual_spike_trains': False})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_detector')
nest.Connect(g, p)
nest.Connect(p, s)
nest.Simulate(200)
ev = nest.GetStatus(s)[0]['events']
plt.subplot(224)
plt.plot(ev['times'], ev['senders'] - min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('One spike train for all targets')
'''
In part 2, multiple generators are created with different settings
for rate, phase and frequency. First, we define an auxiliary function
which simulates ``n`` generators for ``t`` ms. After ``t/2``, the
parameter dictionary of the generators is changed from initial to
after.
'''
def step(t, n, initial, after, seed=1, dt=0.05):
"""Simulates for n generators for t ms. Step at t/2."""
nest.ResetKernel()
nest.SetStatus([0], [{"resolution": dt}])
nest.SetStatus([0], [{"grng_seed": 256 * seed + 1}])
nest.SetStatus([0], [{"rng_seeds": [256 * seed + 2]}])
g = nest.Create('sinusoidal_gamma_generator', n, params=initial)
sd = nest.Create('spike_detector')
nest.Connect(g, sd)
nest.Simulate(t / 2)
nest.SetStatus(g, after)
nest.Simulate(t / 2)
return nest.GetStatus(sd, 'events')[0]
'''
This function serves to plot a histogram of the emitted spikes.
'''
def plot_hist(spikes):
plt.hist(spikes['times'],
bins=np.arange(0., max(spikes['times']) + 1.5, 1.),
histtype='step')
t = 1000
n = 1000
dt = 1.0
steps = int(t / dt)
offset = t / 1000. * 2 * np.pi
'''
We create a figure with a 2x3 grid.
'''
grid = (2, 3)
fig = plt.figure(figsize=(15, 10))
'''
Simulate a `sinusoidal_gamma_generator` with default parameter
values, i.e. ac=0 and the DC value being changed from 20 to 50 after
``t/2`` and plot the number of spikes per second over time.
'''
plt.subplot(grid[0], grid[1], 1)
spikes = step(t, n,
{'rate': 20.0},
{'rate': 50.0, },
seed=123, dt=dt)
plot_hist(spikes)
exp = np.ones(steps)
exp[:int(steps / 2)] *= 20
exp[int(steps / 2):] *= 50
plt.plot(exp, 'r')
plt.title('DC rate: 20 -> 50')
plt.ylabel('Spikes per second')
'''
Simulate a `sinusoidal_gamma_generator` with the DC value being
changed from 80 to 40 after ``t/2`` and plot the number of spikes per
second over time.
'''
plt.subplot(grid[0], grid[1], 2)
spikes = step(t, n,
{'order': 6.0, 'rate': 80.0, 'amplitude': 0.,
'frequency': 0., 'phase': 0.},
{'order': 6.0, 'rate': 40.0, 'amplitude': 0.,
'frequency': 0., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.ones(steps)
exp[:int(steps / 2)] *= 80
exp[int(steps / 2):] *= 40
plt.plot(exp, 'r')
plt.title('DC rate: 80 -> 40')
'''
Simulate a `sinusoidal_gamma_generator` with the AC value being
changed from 40 to 20 after ``t/2`` and plot the number of spikes per
second over time.
'''
plt.subplot(grid[0], grid[1], 3)
spikes = step(t, n,
{'order': 3.0, 'rate': 40.0, 'amplitude': 40.,
'frequency': 10., 'phase': 0.},
{'order': 3.0, 'rate': 40.0, 'amplitude': 20.,
'frequency': 10., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = (40. +
40. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2))))
exp[int(steps / 2):] = (40. + 20. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2)) + offset))
plt.plot(exp, 'r')
plt.title('Rate Modulation: 40 -> 20')
'''
Simulate a `sinusoidal_gamma_generator` with a non-zero AC value
and the DC value being changed from 80 to 40 after ``t/2`` and plot
the number of spikes per second over time.
'''
plt.subplot(grid[0], grid[1], 4)
spikes = step(t, n,
{'order': 6.0, 'rate': 20.0, 'amplitude': 20.,
'frequency': 10., 'phase': 0.},
{'order': 6.0, 'rate': 50.0, 'amplitude': 50.,
'frequency': 10., 'phase': 0.},
seed=123, dt=dt)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = (20. + 20. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2))))
exp[int(steps / 2):] = (50. + 50. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2)) + offset))
plt.plot(exp, 'r')
plt.title('DC Rate and Rate Modulation: 20 -> 50')
plt.ylabel('Spikes per second')
plt.xlabel('Time [ms]')
'''
Simulate a `sinusoidal_gamma_generator` with the AC value being
changed from 0 to 40 after ``t/2`` and plot the number of spikes per
second over time.
'''
plt.subplot(grid[0], grid[1], 5)
spikes = step(t, n,
{'rate': 40.0, },
{'amplitude': 40.0, 'frequency': 20.},
seed=123, dt=1.)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = 40. * np.ones(steps / 2)
exp[int(steps / 2):] = (40. + 40. * np.sin(np.arange(0, t / 1000. * np.pi * 20,
t / 1000. * np.pi * 20. /
(steps / 2))))
plt.plot(exp, 'r')
plt.title('Rate Modulation: 0 -> 40')
plt.xlabel('Time [ms]')
'''
Simulate a `sinusoidal_gamma_generator` with a phase shift at
``t/2`` and plot the number of spikes per second over time.
'''
# Phase shift
plt.subplot(grid[0], grid[1], 6)
spikes = step(t, n,
{'order': 6.0, 'rate': 60.0, 'amplitude': 60.,
'frequency': 10., 'phase': 0.},
{'order': 6.0, 'rate': 60.0, 'amplitude': 60.,
'frequency': 10., 'phase': 180.},
seed=123, dt=1.)
plot_hist(spikes)
exp = np.zeros(int(steps))
exp[:int(steps / 2)] = (60. + 60. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2))))
exp[int(steps / 2):] = (60. + 60. * np.sin(np.arange(0, t / 1000. * np.pi * 10,
t / 1000. * np.pi * 10. /
(steps / 2)) +
offset + np.pi))
plt.plot(exp, 'r')
plt.title('Modulation Phase: 0 -> Pi')
plt.xlabel('Time [ms]')
| gpl-2.0 |
guitarmanj/king-phisher | king_phisher/client/dialogs/configuration.py | 2 | 15230 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/dialogs/configuration.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import logging
import os
import string
import urllib.parse
from king_phisher import its
from king_phisher import utilities
from king_phisher.client import graphs
from king_phisher.client import gui_utilities
from gi.repository import GObject
from gi.repository import Gtk
__all__ = ('ConfigurationDialog',)
if its.mocked:
_Gtk_Frame = type('Gtk.Frame', (object,), {'__module__': ''})
else:
_Gtk_Frame = Gtk.Frame
OptionWidget = collections.namedtuple('OptionWidget', ('option', 'widget'))
class PluginsConfigurationFrame(_Gtk_Frame):
def __init__(self, application, plugin_klass):
super(PluginsConfigurationFrame, self).__init__()
self.application = application
self.config = application.config
self.plugin_klass = plugin_klass
self.option_widgets = {}
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
plugin_config = self.config['plugins'].get(plugin_klass.name) or {} # use or instead of get incase the value is actually None
grid = Gtk.Grid()
self.add(grid)
grid.set_property('margin-start', 12)
grid.set_property('column-spacing', 3)
grid.set_property('hexpand', True)
grid.set_property('row-spacing', 3)
grid.insert_column(0)
grid.insert_column(0)
grid.attach(self._get_title_box(), 0, 0, 2, 1)
for row, opt in enumerate(plugin_klass.options, 1):
grid.insert_row(row)
name_label = Gtk.Label()
name_label.set_property('tooltip-text', opt.description)
name_label.set_property('width-request', 175)
name_label.set_text(opt.display_name)
grid.attach(name_label, 0, row, 1, 1)
widget = opt.get_widget(self.application, plugin_config.get(opt.name, opt.default))
widget.set_property('tooltip-text', opt.description)
grid.attach(widget, 1, row, 1, 1)
self.option_widgets[opt.name] = OptionWidget(opt, widget)
self.show_all()
def _get_title_box(self):
menu = Gtk.Menu()
menu.set_property('valign', Gtk.Align.START)
menu_item = Gtk.MenuItem.new_with_label('Restore Default Options')
menu_item.connect('activate', self.signal_activate_plugin_reset, self.plugin_klass)
menu.append(menu_item)
menu.show_all()
self.menu = menu
plugin_menu_button = Gtk.MenuButton()
plugin_menu_button.set_property('direction', Gtk.ArrowType.LEFT)
plugin_menu_button.set_popup(menu)
title_box = Gtk.Box(Gtk.Orientation.HORIZONTAL, 3)
title_box.pack_start(Gtk.Label(label=self.plugin_klass.title), False, True, 0)
title_box.pack_end(plugin_menu_button, False, False, 0)
return title_box
def signal_activate_plugin_reset(self, _, plugin_klass):
self.logger.info("restoring the default options for plugin: {0}".format(plugin_klass.name))
default_config = {}
for option_widget in self.option_widgets.values():
option = option_widget.option
widget = option_widget.widget
default_config[option.name] = option.default
option.set_widget_value(widget, option.default)
self.application.config['plugins'][plugin_klass.name] = default_config
class ConfigurationDialog(gui_utilities.GladeGObject):
"""
Display the King Phisher client configuration dialog. Running this dialog
via the :py:meth:`.interact` method will cause some server settings to be
loaded.
"""
dependencies = gui_utilities.GladeDependencies(
children=(
# Server Tab
'entry_server',
'entry_server_username',
'entry_email_address',
'entry_sms_phone_number',
'combobox_sms_carrier',
# SMTP Server Tab
'entry_smtp_server',
'entry_smtp_username',
'frame_smtp_ssh',
'spinbutton_smtp_max_send_rate',
'switch_smtp_ssl_enable',
'switch_smtp_ssh_enable',
'entry_sftp_client',
'entry_ssh_server',
'entry_ssh_username',
# Client Tab
'combobox_spf_check_level',
# Plugins Tab
'box_plugin_options'
),
top_level=(
'SMSCarriers',
'SMTPSendRate',
'SPFCheckLevels'
)
)
top_gobject = 'dialog'
def __init__(self, *args, **kwargs):
super(ConfigurationDialog, self).__init__(*args, **kwargs)
smtp_ssh_enabled = self.gobjects['switch_smtp_ssh_enable'].get_active()
self.gobjects['entry_smtp_server'].set_sensitive(not smtp_ssh_enabled)
self.gobjects['frame_smtp_ssh'].set_sensitive(smtp_ssh_enabled)
# connect to the signal here so the settings can be loaded with modifications
self.gobjects['switch_smtp_ssh_enable'].connect('notify::active', self.signal_switch_smtp_ssh)
self._plugin_option_widgets = collections.defaultdict(dict)
checkbutton = self.gtk_builder_get('checkbutton_pip_install_dependencies')
checkbutton.set_active(self.config['plugins.pip.install_dependencies'])
def signal_switch_smtp_ssh(self, switch, _):
active = switch.get_property('active')
entry = self.gobjects['entry_smtp_server']
self.gtk_builder_get('frame_smtp_ssh').set_sensitive(active)
if active:
entry.set_sensitive(False)
current_text = entry.get_text()
if current_text.startswith('!'):
entry.set_text(current_text[1:])
else:
entry.set_text('localhost:25')
else:
entry.set_sensitive(True)
def signal_toggle_alert_subscribe(self, cbutton):
active = cbutton.get_property('active')
if active:
remote_method = 'campaign/alerts/subscribe'
else:
remote_method = 'campaign/alerts/unsubscribe'
self.application.rpc(remote_method, self.config['campaign_id'])
def signal_toggle_reject_after_credentials(self, cbutton):
max_credentials = (1 if cbutton.get_property('active') else None)
self.application.rpc('db/table/set', 'campaigns', self.config['campaign_id'], 'max_credentials', max_credentials)
def signal_changed_spf_check_level(self, combobox):
ti = combobox.get_active_iter()
if not ti:
return
model = combobox.get_model()
label = self.gtk_builder_get('label_spf_level_description')
level_description = model[ti][2]
label.set_text(level_description)
def _configure_settings_dashboard(self):
if not graphs.has_matplotlib:
self.gtk_builder_get('frame_dashboard').set_sensitive(False)
return
graph_providers = Gtk.ListStore(str, str)
for graph in graphs.get_graphs():
graph = graphs.get_graph(graph)
graph_providers.append([graph.name_human, graph.name])
for dash_position in ['top_left', 'top_right', 'bottom']:
combobox = self.gtk_builder_get('combobox_dashboard_' + dash_position)
combobox.set_model(graph_providers)
ti = gui_utilities.gtk_list_store_search(graph_providers, self.config.get('dashboard.' + dash_position), column=1)
combobox.set_active_iter(ti)
def _configure_settings_plugin_options(self, plugin_klass):
frame = PluginsConfigurationFrame(self.application, plugin_klass)
self.gobjects['box_plugin_options'].pack_start(frame, True, True, 0)
self._plugin_option_widgets[plugin_klass.name] = frame.option_widgets
def _configure_settings_plugins(self):
pm = self.application.plugin_manager
plugin_klasses = [klass for _, klass in pm if klass.options and klass.is_compatible]
plugin_klasses = sorted(plugin_klasses, key=lambda k: k.title)
for plugin_klass in plugin_klasses:
self._configure_settings_plugin_options(plugin_klass)
def _configure_settings_proxy(self):
if not self.config['proxy.url']:
return
formatted_proxy_url = urllib.parse.urlparse(self.config['proxy.url'])
netloc = formatted_proxy_url.netloc
if formatted_proxy_url.username or formatted_proxy_url.password:
if formatted_proxy_url.port:
netloc = '{}:{}'.format(formatted_proxy_url.hostname, formatted_proxy_url.port)
else:
netloc = formatted_proxy_url.hostname
self.gtk_builder_get('entry_proxy_username').set_text(formatted_proxy_url.username)
self.gtk_builder_get('entry_proxy_password').set_text(formatted_proxy_url.password)
proxy_url = urllib.parse.urlunparse((formatted_proxy_url.scheme, netloc, formatted_proxy_url.path, '', '', ''))
self.gtk_builder_get('entry_proxy_url').set_text(proxy_url)
def _configure_settings_server(self):
cb_subscribed = self.gtk_builder_get('checkbutton_alert_subscribe')
cb_reject_after_creds = self.gtk_builder_get('checkbutton_reject_after_credentials')
entry_beef_hook = self.gtk_builder_get('entry_server_beef_hook')
server_config = self.application.rpc('config/get', ['beef.hook_url', 'server.require_id', 'server.secret_id'])
entry_beef_hook.set_property('text', server_config.get('beef.hook_url', ''))
self.config['server_config']['server.require_id'] = server_config['server.require_id']
self.config['server_config']['server.secret_id'] = server_config['server.secret_id']
# older versions of GObject.signal_handler_find seem to have a bug which cause a segmentation fault in python
if GObject.pygobject_version < (3, 10):
cb_subscribed.set_property('active', self.application.rpc('campaign/alerts/is_subscribed', self.config['campaign_id']))
cb_reject_after_creds.set_property('active', self.application.get_graphql_campaign()['maxCredentials'])
else:
with gui_utilities.gobject_signal_blocked(cb_subscribed, 'toggled'):
cb_subscribed.set_property('active', self.application.rpc('campaign/alerts/is_subscribed', self.config['campaign_id']))
cb_reject_after_creds.set_property('active', self.application.get_graphql_campaign()['maxCredentials'])
cb_reject_after_creds.set_sensitive(self.config['server_config']['server.require_id'])
def _finialize_settings_dashboard(self):
dashboard_changed = False
for dash_position in ['top_left', 'top_right', 'bottom']:
combobox = self.gtk_builder_get('combobox_dashboard_' + dash_position)
ti = combobox.get_active_iter()
if not ti:
continue
graph_providers = combobox.get_model()
graph_name = graph_providers[ti][1]
if self.config.get('dashboard.' + dash_position) == graph_name:
continue
self.config['dashboard.' + dash_position] = graph_name
dashboard_changed = True
if dashboard_changed:
gui_utilities.show_dialog_info('The dashboard layout has been updated.', self.parent, 'The new settings will be applied the next time the application starts.')
def interact(self):
self._configure_settings_dashboard()
self._configure_settings_plugins()
self._configure_settings_server()
self._configure_settings_proxy()
self.gtk_builder_get('combobox_spf_check_level').emit('changed')
self.dialog.show_all()
response = self.dialog.run()
if response != Gtk.ResponseType.CANCEL:
self.objects_save_to_config()
self.save_proxy_settings()
self.save_plugin_options()
self.save_alert_settings()
entry_beef_hook = self.gtk_builder_get('entry_server_beef_hook')
self.application.rpc('config/set', {'beef.hook_url': entry_beef_hook.get_property('text').strip()})
if graphs.has_matplotlib:
self._finialize_settings_dashboard()
self.dialog.destroy()
return response
def save_proxy_settings(self):
proxy_url = urllib.parse.urlparse(self.gtk_builder_get('entry_proxy_url').get_text().strip())
proxy_username = self.gtk_builder_get('entry_proxy_username').get_text().strip()
proxy_password = self.gtk_builder_get('entry_proxy_password').get_text().strip()
if not proxy_url.geturl():
self.config['proxy.url'] = None
os.environ.pop('HTTP_PROXY', None)
os.environ.pop('HTTPS_PROXY', None)
return
if not (proxy_url.hostname and proxy_url.scheme):
gui_utilities.show_dialog_warning('Invalid Proxy Settings', self.parent, 'The proxy url you have submitted is not valid.')
return
try:
proxy_url.port
except ValueError:
gui_utilities.show_dialog_warning('Invalid Proxy Settings', self.parent, 'The port must be an integer between 1-65535 inclusive.')
return
netloc = proxy_url.netloc
if proxy_username:
netloc = '{}:{}@{}'.format(proxy_username, proxy_password, proxy_url.netloc)
formatted_proxy_url = urllib.parse.urlunparse((proxy_url.scheme, netloc, proxy_url.path, '', '', ''))
self.config['proxy.url'] = formatted_proxy_url
os.environ['HTTP_PROXY'] = formatted_proxy_url
os.environ['HTTPS_PROXY'] = formatted_proxy_url
def save_plugin_options(self):
for name, option_widgets in self._plugin_option_widgets.items():
if name not in self.config['plugins']:
self.config['plugins'][name] = {}
plugin_config = self.config['plugins'][name] # use or instead of get incase the value is actually None
for option_name, option_widget in option_widgets.items():
plugin_config[option_name] = option_widget.option.get_widget_value(option_widget.widget)
checkbutton = self.gtk_builder_get('checkbutton_pip_install_dependencies')
self.config['plugins.pip.install_dependencies'] = checkbutton.get_active()
def save_alert_settings(self):
email_address = gui_utilities.gobject_get_value(self.gobjects['entry_email_address'])
phone_number = gui_utilities.gobject_get_value(self.gobjects['entry_sms_phone_number'])
sms_carrier = gui_utilities.gobject_get_value(self.gobjects['combobox_sms_carrier'])
server_user = self.application.server_user
if email_address and not utilities.is_valid_email_address(email_address):
gui_utilities.show_dialog_warning('Invalid Email Address', self.parent, 'The email address you have entered is not valid.')
return
if phone_number:
phone_number = ''.join(d for d in phone_number if d in string.digits)
if len(phone_number) > 11:
gui_utilities.show_dialog_warning('Invalid Phone Number', self.parent, 'The phone number must not contain more than 11 digits')
return
email_address = utilities.nonempty_string(email_address)
phone_number = utilities.nonempty_string(phone_number)
sms_carrier = utilities.nonempty_string(sms_carrier)
self.application.rpc('db/table/set', 'users', server_user.id, ('email_address', 'phone_number', 'phone_carrier'), (email_address, phone_number, sms_carrier))
| bsd-3-clause |
jaeilepp/eggie | mne/viz/utils.py | 2 | 12829 | """Utility functions for plotting M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import math
from copy import deepcopy
from functools import partial
import difflib
import webbrowser
from warnings import warn
import tempfile
import numpy as np
from ..io import show_fiff
from ..utils import verbose
COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74',
'#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493']
DEFAULTS = dict(color=dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r',
emg='k', ref_meg='steelblue', misc='k', stim='k',
resp='k', chpi='k', exci='k', ias='k', syst='k'),
units=dict(eeg='uV', grad='fT/cm', mag='fT', misc='AU'),
scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0),
scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6,
eog=150e-6, ecg=5e-4, emg=1e-3,
ref_meg=1e-12, misc=1e-3,
stim=1, resp=1, chpi=1e-4, exci=1,
ias=1, syst=1),
ylim=dict(mag=(-600., 600.), grad=(-200., 200.),
eeg=(-200., 200.), misc=(-5., 5.)),
titles=dict(eeg='EEG', grad='Gradiometers',
mag='Magnetometers', misc='misc'),
mask_params=dict(marker='o',
markerfacecolor='w',
markeredgecolor='k',
linewidth=0,
markeredgewidth=1,
markersize=4))
def _mutable_defaults(*mappings):
""" To avoid dicts as default keyword arguments
Use this function instead to resolve default dict values.
Example usage:
scalings, units = _mutable_defaults(('scalings', scalings,
'units', units))
"""
out = []
for k, v in mappings:
this_mapping = DEFAULTS[k]
if v is not None:
this_mapping = deepcopy(DEFAULTS[k])
this_mapping.update(v)
out += [this_mapping]
return out
def _setup_vmin_vmax(data, vmin, vmax):
"""Aux function to handle vmin and vamx parameters"""
if vmax is None and vmin is None:
vmax = np.abs(data).max()
vmin = -vmax
else:
if callable(vmin):
vmin = vmin(data)
elif vmin is None:
vmin = np.min(data)
if callable(vmax):
vmax = vmax(data)
elif vmin is None:
vmax = np.max(data)
return vmin, vmax
def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
""" Adjust subplot parameters to give specified padding.
Note. For plotting please use this function instead of plt.tight_layout
Parameters
----------
pad : float
padding between the figure edge and the edges of subplots, as a
fraction of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
"""
import matplotlib.pyplot as plt
if fig is None:
fig = plt.gcf()
try: # see https://github.com/matplotlib/matplotlib/issues/2654
fig.canvas.draw()
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
except:
msg = ('Matplotlib function \'tight_layout\'%s.'
' Skipping subpplot adjusment.')
if not hasattr(plt, 'tight_layout'):
case = ' is not available'
else:
case = (' is not supported by your backend: `%s`'
% plt.get_backend())
warn(msg % case)
def _check_delayed_ssp(container):
""" Aux function to be used for interactive SSP selection
"""
if container.proj is True or\
all([p['active'] for p in container.info['projs']]):
raise RuntimeError('Projs are already applied. Please initialize'
' the data with proj set to False.')
elif len(container.info['projs']) < 1:
raise RuntimeError('No projs found in evoked.')
def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
"""Return a colormap similar to that used by mne_analyze
Parameters
----------
limits : list (or array) of length 3
Bounds for the colormap.
format : str
Type of colormap to return. If 'matplotlib', will return a
matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
return an RGBA array of shape (256, 4).
Returns
-------
cmap : instance of matplotlib.pyplot.colormap | array
A teal->blue->gray->red->yellow colormap.
Notes
-----
For this will return a colormap that will display correctly for data
that are scaled by the plotting function to span [-fmax, fmax].
Examples
--------
The following code will plot a STC using standard MNE limits:
colormap = mne.viz.mne_analyze_colormap(limits=[5, 10, 15])
brain = stc.plot('fsaverage', 'inflated', 'rh', colormap)
brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False)
"""
l = np.asarray(limits, dtype='float')
if len(l) != 3:
raise ValueError('limits must have 3 elements')
if any(l < 0):
raise ValueError('limits must all be positive')
if any(np.diff(l) <= 0):
raise ValueError('limits must be monotonically increasing')
if format == 'matplotlib':
from matplotlib import colors
l = (np.concatenate((-np.flipud(l), l)) + l[-1]) / (2 * l[-1])
cdict = {'red': ((l[0], 0.0, 0.0),
(l[1], 0.0, 0.0),
(l[2], 0.5, 0.5),
(l[3], 0.5, 0.5),
(l[4], 1.0, 1.0),
(l[5], 1.0, 1.0)),
'green': ((l[0], 1.0, 1.0),
(l[1], 0.0, 0.0),
(l[2], 0.5, 0.5),
(l[3], 0.5, 0.5),
(l[4], 0.0, 0.0),
(l[5], 1.0, 1.0)),
'blue': ((l[0], 1.0, 1.0),
(l[1], 1.0, 1.0),
(l[2], 0.5, 0.5),
(l[3], 0.5, 0.5),
(l[4], 0.0, 0.0),
(l[5], 0.0, 0.0))}
return colors.LinearSegmentedColormap('mne_analyze', cdict)
elif format == 'mayavi':
l = np.concatenate((-np.flipud(l), [0], l)) / l[-1]
r = np.array([0, 0, 0, 0, 1, 1, 1])
g = np.array([1, 0, 0, 0, 0, 0, 1])
b = np.array([1, 1, 1, 0, 0, 0, 0])
a = np.array([1, 1, 0, 0, 0, 1, 1])
xp = (np.arange(256) - 128) / 128.0
colormap = np.r_[[np.interp(xp, l, 255 * c) for c in [r, g, b, a]]].T
return colormap
else:
raise ValueError('format must be either matplotlib or mayavi')
def _toggle_options(event, params):
"""Toggle options (projectors) dialog"""
import matplotlib.pyplot as plt
if len(params['projs']) > 0:
if params['fig_opts'] is None:
_draw_proj_checkbox(event, params, draw_current_state=False)
else:
# turn off options dialog
plt.close(params['fig_opts'])
del params['proj_checks']
params['fig_opts'] = None
def _toggle_proj(event, params):
"""Operation to perform when proj boxes clicked"""
# read options if possible
if 'proj_checks' in params:
bools = [x[0].get_visible() for x in params['proj_checks'].lines]
for bi, (b, p) in enumerate(zip(bools, params['projs'])):
# see if they tried to deactivate an active one
if not b and p['active']:
bools[bi] = True
else:
bools = [True] * len(params['projs'])
compute_proj = False
if not 'proj_bools' in params:
compute_proj = True
elif not np.array_equal(bools, params['proj_bools']):
compute_proj = True
# if projectors changed, update plots
if compute_proj is True:
params['plot_update_proj_callback'](params, bools)
def _prepare_trellis(n_cells, max_col):
"""Aux function
"""
import matplotlib.pyplot as plt
if n_cells == 1:
nrow = ncol = 1
elif n_cells <= max_col:
nrow, ncol = 1, n_cells
else:
nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
fig, axes = plt.subplots(nrow, ncol, figsize=(7.4, 1.5 * nrow + 1))
axes = [axes] if ncol == nrow == 1 else axes.flatten()
for ax in axes[n_cells:]: # hide unused axes
ax.set_visible(False)
return fig, axes
def _draw_proj_checkbox(event, params, draw_current_state=True):
"""Toggle options (projectors) dialog"""
import matplotlib.pyplot as plt
import matplotlib as mpl
projs = params['projs']
# turn on options dialog
labels = [p['desc'] for p in projs]
actives = ([p['active'] for p in projs] if draw_current_state else
[True] * len(params['projs']))
width = max([len(p['desc']) for p in projs]) / 6.0 + 0.5
height = len(projs) / 6.0 + 0.5
fig_proj = figure_nobar(figsize=(width, height))
fig_proj.canvas.set_window_title('SSP projection vectors')
ax_temp = plt.axes((0, 0, 1, 1))
ax_temp.get_yaxis().set_visible(False)
ax_temp.get_xaxis().set_visible(False)
fig_proj.add_axes(ax_temp)
proj_checks = mpl.widgets.CheckButtons(ax_temp, labels=labels,
actives=actives)
# change already-applied projectors to red
for ii, p in enumerate(projs):
if p['active'] is True:
for x in proj_checks.lines[ii]:
x.set_color('r')
# make minimal size
# pass key presses from option dialog over
proj_checks.on_clicked(partial(_toggle_proj, params=params))
params['proj_checks'] = proj_checks
# this should work for non-test cases
try:
fig_proj.canvas.draw()
fig_proj.show()
except Exception:
pass
@verbose
def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent=' ',
read_limit=np.inf, max_str=30, verbose=None):
"""Compare the contents of two fiff files using diff and show_fiff
Parameters
----------
fname_1 : str
First file to compare.
fname_2 : str
Second file to compare.
fname_out : str | None
Filename to store the resulting diff. If None, a temporary
file will be created.
show : bool
If True, show the resulting diff in a new tab in a web browser.
indent : str
How to indent the lines.
read_limit : int
Max number of bytes of data to read from a tag. Can be np.inf
to always read all data (helps test read completion).
max_str : int
Max number of characters of string representation to print for
each tag's data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fname_out : str
The filename used for storing the diff. Could be useful for
when a temporary file is used.
"""
file_1 = show_fiff(fname_1, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
file_2 = show_fiff(fname_2, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
if fname_out is not None:
f = open(fname_out, 'w')
else:
f = tempfile.NamedTemporaryFile('w', delete=False)
fname_out = f.name
with f as fid:
fid.write(diff)
if show is True:
webbrowser.open_new_tab(fname_out)
return fname_out
def figure_nobar(*args, **kwargs):
"""Make matplotlib figure with no toolbar"""
import matplotlib.pyplot as plt
import matplotlib as mpl
old_val = mpl.rcParams['toolbar']
try:
mpl.rcParams['toolbar'] = 'none'
fig = plt.figure(*args, **kwargs)
# remove button press catchers (for toolbar)
cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
for key in cbs:
fig.canvas.callbacks.disconnect(key)
except Exception as ex:
raise ex
finally:
mpl.rcParams['toolbar'] = old_val
return fig
| bsd-2-clause |
Jannik029/terapy2 | test.py | 1 | 3580 | import TDSData as tds
import matplotlib.pyplot as plt
from TimeDomainData import TimeDomainData
import numpy as np
from OneLayerSystem import calculate_thickness
from OneLayerSystem import calculate_n
#t, amplitude = tds.TDSData._load_from_txt_file('F:/CloudStation/Uni/terapy2/testdata/2017-03-11T17-58-26.240799-GlassEmitterM1-Reference.txt')
#t_sample, amplitude_sample = tds.TDSData._load_from_txt_file('F:/CloudStation/Uni/terapy2/testdata/2017-03-11T18-02-11.465128-GlassEmitterM1-Cuvette1.txt')
#F:/CloudStation/Uni/terapy2/
#data = tds.TDSData.load_from_txt_file('testdata/2017-03-11T17-58-26.240799-GlassEmitterM1-Reference.txt')
#max_value=2e2, min_value=1e2)
#data = tds.TDSData.load_from_txt_file('testdata/2017-03-11T18-02-11.465128-GlassEmitterM1-Cuvette1.txt')
#data.set_frequency_resolution(0.5)
#data.plot_frequency_domain()
#data.set_frequency_boundaries(300e9, 4e12)
#data.remove_phase_offset()
#data.plot_frequency_domain()
#data.plot_time_domain()
#plt.plot(t, amplitude)
#plt.plot(t_sample, amplitude_sample)
#reference = TimeDomainData.load_from_txt_file('testdata/2017-03-11T17-58-26.240799-GlassEmitterM1-Reference.txt', time_factor=1e-12)
#sample = TimeDomainData.load_from_txt_file('testdata/2017-03-11T18-02-11.465128-GlassEmitterM1-Cuvette1.txt', time_factor=1e-12)
#reference = TimeDomainData.load_from_txt_file('C:/Users/lehrj/CloudStation/Uni/T-Age/Messungen/2017-08-04/13-11-44/TD/TD_Data_14_342_275.txt', time_factor=1e-12)
#sample = TimeDomainData.load_from_txt_file('C:/Users/lehrj/CloudStation/Uni/T-Age/Messungen/2017-08-04/13-11-44/TD/TD_Data_15_380_279.txt', time_factor=1e-12)
#reference = TimeDomainData.load_from_txt_file('C:/Users/lehrj/CloudStation/Uni/T-Age/Messungen/2017-08-04/13-11-44/TD/TD_Data_22_724_290.txt', time_factor=1e-12)
#sample = TimeDomainData.load_from_txt_file('C:/Users/lehrj/CloudStation/Uni/T-Age/Messungen/2017-08-04/13-11-44/TD/TD_Data_23_718_342.txt', time_factor=1e-12)
reference = TimeDomainData.load_from_txt_file('C:/Users/lehrj/CloudStation/Uni/T-Age/Messungen/2017-08-04/13-11-44/TD/TD_Data_30_462_337.txt', time_factor=1e-12, min_value=9e-12)
sample = TimeDomainData.load_from_txt_file('C:/Users/lehrj/CloudStation/Uni/T-Age/Messungen/2017-08-04/13-11-44/TD/TD_Data_31_430_335.txt', time_factor=1e-12, min_value=9e-12)
reference.apply_axis(16e-12, 28e-12, reference.time_step)
reference.plot()
#reference = TimeDomainData.load_from_txt_file('C:/Users/lehrj/CloudStation/Uni/T-Age/Messungen/2017-08-04/13-11-44/TD/TD_Data_9_705_202.txt', time_factor=1e-12, min_value=9e-12)
#sample = TimeDomainData.load_from_txt_file('C:/Users/lehrj/CloudStation/Uni/T-Age/Messungen/2017-08-04/13-11-44/TD/TD_Data_10_158_261.txt', time_factor=1e-12, min_value=9e-12)
#d = calculate_thickness(sample,
# reference,
# 1100e-6,
# 300e9, 1.25e12,
# 300e9, 1.25e12,
# 7,
# thickness_step=20e-6,
# thickness_interval=400e-6,
# do_plot=True, window_slope=17e-12)
#print(d)
#n = calculate_n(sample,
# reference,
# 1090e-6,
# 300e9, 1.25e12,
# 300e9, 1.25e12,
# do_plot=True)
#reference.remove_background(5e-12)
#reference.plot()
#reference.apply_axis(1.3e-10, 2.5e-10, 2.5e-14)
#reference.apply_peak_window(window_type='blackman', plot=True)
#reference.amplitude += 0.5 * np.max(reference.amplitude)
#reference.plot() | gpl-3.0 |
alvarofierroclavero/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
ThomasMiconi/htmresearch | projects/sequence_prediction/continuous_sequence/data/processSineWave.py | 13 | 2231 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import pandas as pd
import csv
import numpy as np
angles = np.linspace(0, 120, num=2400)
sine = np.sin(angles*4)
outputFile = open('sine.csv',"w")
csvWriter = csv.writer(outputFile)
csvWriter.writerow(['angle', 'data'])
csvWriter.writerow(['float', 'float'])
csvWriter.writerow(['', ''])
for i in range(2000):
csvWriter.writerow([angles[i], sine[i]])
outputFile.close()
outputFile = open('sine_cont.csv',"w")
csvWriter = csv.writer(outputFile)
csvWriter.writerow(['angle', 'data'])
csvWriter.writerow(['float', 'float'])
csvWriter.writerow(['', ''])
for i in range(2001, 2200):
csvWriter.writerow([angles[i], sine[i]])
outputFile.close()
# outputFile = open('data/sine_der.csv',"w")
# csvWriter = csv.writer(outputFile)
# csvWriter.writerow(['angle', 'data'])
# csvWriter.writerow(['float', 'float'])
# csvWriter.writerow(['', ''])
# for i in range(1,2000):
# csvWriter.writerow([angles[i], sine[i]-sine[i-1]])
# outputFile.close()
#
#
# outputFile = open('data/sine_der_cont.csv',"w")
# csvWriter = csv.writer(outputFile)
# csvWriter.writerow(['angle', 'data'])
# csvWriter.writerow(['float', 'float'])
# csvWriter.writerow(['', ''])
# for i in range(2001, 2200):
# csvWriter.writerow([angles[i], sine[i]-sine[i-1]])
# outputFile.close()
| agpl-3.0 |
sibis-platform/ncanda-datacore | scripts/import/webcnp/cnp.py | 2 | 3519 | #!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
import os.path
import pandas
# Dictionary mapping instruments to their internal REDCap names (for "*_complete" fields)
instruments = { 'test_sessions' : 'test_sessions',
'cpf' : 'penn_facial_memory_test',
'cpfd' : 'penn_facial_memory_test_delayed_version',
'cpw' : 'penn_word_memory_test',
'cpwd' : 'penn_word_memory_test_delayed_version',
'medf36' : 'measured_emotion_differentiation_task_36_items',
'er40d' : 'emotion_recognition_task_form_d_40_items',
'mpract' : 'motor_praxis_test',
'pcet' : 'penn_conditional_exclusion_test_form_a',
'pmat24a' : 'penn_matrix_analysis_test_form_a_24_items',
'pvoc' : 'penn_vocabulary_test',
'pvrt' : 'penn_logical_reasoning_test_short_version',
'sfnb2' : 'short_fractalnback_2_back_version',
'shortvolt' : 'visual_object_learning_test_short_version',
'spcptnl' : 'short_penn_continuous_performance_task_number_letter_version',
'svdelay' : 'visual_object_learning_test_delayed_short_version' }
# Get a list of variables to copy as the intersections of source and target project field names
def get_copy_variables( project_src, project_dst ):
copy_vars = []
form_dst_vars = [ field['field_name'][4:] for field in project_dst.metadata if (field['form_name'] == 'cnp_summary' ) ] ## get fields from 'cnp_summary' form and remove 'cnp_prefix'
for form in list(instruments.values()):
form_src_vars = [ field['field_name'] for field in project_src.metadata if (field['form_name'] == form) ]
copy_vars += list( set( form_src_vars ) & set( form_dst_vars ) )
return copy_vars
# Lookup tables for age-specific z-score
module_dir = os.path.dirname(os.path.abspath(__file__))
mean_sdev_byage_table = pandas.io.parsers.read_csv( os.path.join( module_dir, 'norm_means_stdev_byage.csv' ), header=0, index_col=[0] )
# This table maps fields in the summary form (keys) that need z-scores to fields in the imported lookup table that contain mean and standard deviation by age.
mean_sdev_by_field_dict = { 'cpf_ifac_tot' : 'cpf-a_cr',
'cpf_ifac_rtc' : 'cpf-a_rtcr',
'er40d_er40_cr' : 'k-er40-d_cr',
'er40d_er40_crt' : 'k-er40-d_rtcr',
'cpw_iwrd_tot' : 'k-cpw_cr',
'cpw_iwrd_rtc' : 'k-cpw_rtcr',
'medf36_medf36_a' : 'medf36-a_cr',
'medf36_medf36_t' : 'medf36-a_rtcr',
'mpract_mp2rtcr' : 'mpraxis_mp2rtcr',
'pmat24a_pmat24_a_cr' : 'pmat24-a_cr',
'pmat24a_pmat24_a_rtcr' : 'pmat24-a_rtcr',
'shortvolt_svt' : 'svolt_cr',
'shortvolt_svtcrt' : 'svolt_rtcr',
'pcet_pcetrtcr' : 'pcet_rtcr',
'pcet_pcet_acc2' : 'pcet_acc2',
'spcptnl_scpt_tp' : 'spcptnl_t_tp',
'spcptnl_scpt_tprt' : 'spcptnl_t_tprt' }
| bsd-3-clause |
nhuntwalker/astroML | book_figures/chapter10/fig_fft_example.py | 3 | 3047 | """
Fast Fourier Transform Example
------------------------------
Figure 10.5
The discrete Fourier transform (bottom panel) for two noisy data sets shown in
the top panel. For 512 evenly sampled times t (dt = 0.977), points are drawn
from h(t) = a + sin(t)G(t), where G(t) is a Gaussian N(mu = 0,sigma = 10).
Gaussian noise with sigma = 0.05 (top data set) and 0.005 (bottom data set)
is added to signal h(t). The value of the offset a is 0.15 and 0, respectively.
The discrete Fourier transform is computed as described in Section 10.2.3.
For both noise realizations, the correct frequency f = (2pi)-1 ~ 0.159 is
easily discernible in the bottom panel. Note that the height of peaks is the
same for both noise realizations. The large value of abs(H(f = 0)) for data
with larger noise is due to the vertical offset.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy.fftpack import fft
from scipy.stats import norm
from astroML.fourier import PSD_continuous
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Draw the data
np.random.seed(1)
tj = np.linspace(-25, 25, 512)
hj = np.sin(tj)
hj *= norm(0, 10).pdf(tj)
#------------------------------------------------------------
# plot the results
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(hspace=0.25)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
offsets = (0, 0.15)
colors = ('black', 'gray')
linewidths = (1, 2)
errors = (0.005, 0.05)
for (offset, color, error, linewidth) in zip(offsets, colors,
errors, linewidths):
# compute the PSD
err = np.random.normal(0, error, size=hj.shape)
hj_N = hj + err + offset
fk, PSD = PSD_continuous(tj, hj_N)
# plot the data and PSD
ax1.scatter(tj, hj_N, s=4, c=color, lw=0)
ax1.plot(tj, 0 * tj + offset, '-', c=color, lw=1)
ax2.plot(fk, PSD, '-', c=color, lw=linewidth)
# vertical line marking the expected peak location
ax2.plot([0.5 / np.pi, 0.5 / np.pi], [-0.1, 1], ':k', lw=1)
ax1.set_xlim(-25, 25)
ax1.set_ylim(-0.1, 0.3001)
ax1.set_xlabel('$t$')
ax1.set_ylabel('$h(t)$')
ax1.yaxis.set_major_locator(plt.MultipleLocator(0.1))
ax2.set_xlim(0, 0.8)
ax2.set_ylim(-0.101, 0.801)
ax2.set_xlabel('$f$')
ax2.set_ylabel('$PSD(f)$')
plt.show()
| bsd-2-clause |
wolfiex/DSMACC-testing | dsmacc/graph/graph.py | 1 | 3368 | '''
graph generation library to use networkx
'''
import networkx as nx
import numpy as np
import pandas as pd
def normalise(x):
x = x[:]#deepcopy error
x -= min(x)
x /= max(x)
return x
def jgraph(posjac):
'''
networkx graph object from posjac at timestep
'''
posjac = 1 - normalise(np.log10(posjac).replace([np.inf,-np.inf],np.nan).dropna())
split = [i.split('->') for i in posjac.index]
#graph
G = nx.DiGraph()
for e in range(len(split)):
G.add_edge(split[e][0],split[e][1],weight=posjac[e])
G.remove_edges_from(G.selfloop_edges())
return G
def rm_nodes (G,ignore):
for i in ignore:
try:G.remove_node(i)
except:None
return G
def getnx(self, ts ,save=False,ignore=[] ):
'''
Create a networkx graph from a DSMACC new class
Usage:
getnx(a,a.ts[-1], 'propane')
'''
try: self.posjac
except:self.create_posjac()
G = nx.DiGraph()
posjac = self.posjac.loc[ts,:]
split = [i.split('->') for i in posjac.index]
p = [i for i in posjac if i != 0 ]
mn = np.min(p)
mx = np.log10(np.max(p) - mn )
mn = np.log10(mn)
for e in range(len(split)):
if posjac[e] > 0 :
G.add_edge(split[e][0],split[e][1],weight=1e-4+(np.log10(posjac[e])-mn)/mx )
G.remove_edges_from(G.selfloop_edges())
#no more zero concentration edges
G = rm_nodes (G, set(G.nodes()) - set(self.spec.columns))
G = rm_nodes (G, ignore)
#rm isolates
G = rm_nodes(G,list(nx.isolates(G)))
if save:
nx.write_weighted_edgelist(G, save+'.wedgelist')
#G=nx.read_weighted_edgelist('propane.wedgelist',create_using=nx.DiGraph)
return G
def pagerank(a):
return geobj2df(metric(tograph(group_hour(a.jacsp))))
def tograph(jac):
'''
Use hourly avg
'''
rt = []
for t in jac.iterrows():
jacsp=t[1]
#inverse negative links
index = np.array(jacsp.index)
lt = list(jacsp<0)
index[lt] = map(lambda x: '->'.join(reversed(x.split('->'))),index[lt])
jacsp.index = index
jacsp = jacsp.abs()
#normalize jacsp
jacsp = jacsp*1.01 - jacsp.min().min()
jacsp /= jacsp.max().max()
split = [i.split('->') for i in jacsp.index]
#graph
G = nx.DiGraph()
for e in range(len(split)):
G.add_edge(split[e][0],split[e][1],weight=jacsp[e])
G.remove_edges_from(G.selfloop_edges())
rt.append({'graph':G,'time':t[0]})
return rt
def metric(GS,met = 'nx.pagerank'):
'''
GS - out array from to_graph
'''
metfn = eval(met)
for gt in range(len(GS)):
res = metfn(GS[gt]['graph'])
res = [[key, res[key]] for key, value in sorted(res.iteritems(), key=lambda k,v: (v,k))]
GS[gt][met] = res
return GS
def geobj2df(GS,what = 'nx.pagerank'):
res = []
index = []
for s in GS:
index.append(s['time'])
s = pd.DataFrame(s[what])
s.index = s[0]
s=s[1]
res.append(s)
df = pd.concat(res,axis = 1).T
df.index = index
df = (df*1.1).subtract(df.min(axis=0
))
df=df.divide(df.max(axis=1),axis=0)
import zcreate_centrality as p
#p.createhtml(df)
return df
| gpl-3.0 |
rseubert/scikit-learn | sklearn/cross_validation.py | 7 | 65132 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['Bootstrap',
'KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n, indices=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
self._indices = indices
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
indices = self._indices
if indices:
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
if indices:
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p, indices=None):
super(LeavePOut, self).__init__(n, indices)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, indices, shuffle, random_state):
super(_BaseKFold, self).__init__(n, indices)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, indices, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, indices=None, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, indices, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels, indices=None):
super(LeaveOneLabelOut, self).__init__(len(labels), indices)
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p, indices=None):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels), indices)
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class Bootstrap(object):
"""Random sampling with replacement cross-validation iterator
Provides train/test indices to split data in train test sets
while resampling the input n_iter times: each time a new
random split of the data is performed and then samples are drawn
(with replacement) on each side of the split to build the training
and test sets.
Note: contrary to other cross-validation strategies, bootstrapping
will allow some samples to occur several times in each splits. However
a sample that occurs in the train split will never occur in the test
split and vice-versa.
If you want each sample to occur at most once you should probably
use ShuffleSplit cross validation instead.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default is 3)
Number of bootstrapping iterations
train_size : int or float (default is 0.5)
If int, number of samples to include in the training split
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split.
test_size : int or float or None (default is None)
If int, number of samples to include in the training set
(should be smaller than the total number of samples passed
in the dataset).
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split.
If None, n_test is set as the complement of n_train.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> bs = cross_validation.Bootstrap(9, random_state=0)
>>> len(bs)
3
>>> print(bs)
Bootstrap(9, n_iter=3, train_size=5, test_size=4, random_state=0)
>>> for train_index, test_index in bs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [1 8 7 7 8] TEST: [0 3 0 5]
TRAIN: [5 4 2 4 2] TEST: [6 7 1 0]
TRAIN: [4 7 0 1 1] TEST: [5 3 6 5]
See also
--------
ShuffleSplit: cross validation using random permutations.
"""
# Static marker to be able to introspect the CV type
indices = True
def __init__(self, n, n_iter=3, train_size=.5, test_size=None,
random_state=None):
# See, e.g., http://youtu.be/BzHz0J9a6k0?t=9m38s for a motivation
# behind this deprecation
warnings.warn("Bootstrap will no longer be supported as a " +
"cross-validation method as of version 0.15 and " +
"will be removed in 0.17", DeprecationWarning)
self.n = n
self.n_iter = n_iter
if isinstance(train_size, numbers.Integral):
self.train_size = train_size
elif (isinstance(train_size, numbers.Real) and train_size >= 0.0
and train_size <= 1.0):
self.train_size = int(ceil(train_size * n))
else:
raise ValueError("Invalid value for train_size: %r" %
train_size)
if self.train_size > n:
raise ValueError("train_size=%d should not be larger than n=%d" %
(self.train_size, n))
if isinstance(test_size, numbers.Integral):
self.test_size = test_size
elif isinstance(test_size, numbers.Real) and 0.0 <= test_size <= 1.0:
self.test_size = int(ceil(test_size * n))
elif test_size is None:
self.test_size = self.n - self.train_size
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if self.test_size > n - self.train_size:
raise ValueError(("test_size + train_size=%d, should not be " +
"larger than n=%d") %
(self.test_size + self.train_size, n))
self.random_state = random_state
def __iter__(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_train = permutation[:self.train_size]
ind_test = permutation[self.train_size:self.train_size
+ self.test_size]
# bootstrap in each split individually
train = rng.randint(0, self.train_size,
size=(self.train_size,))
test = rng.randint(0, self.test_size,
size=(self.test_size,))
yield ind_train[train], ind_test[test]
def __repr__(self):
return ('%s(%d, n_iter=%d, train_size=%d, test_size=%d, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
self.train_size,
self.test_size,
self.random_state,
))
def __len__(self):
return self.n_iter
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
if indices is None:
indices = True
else:
warnings.warn("The indices parameter is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning)
self.n = n
self.n_iter = n_iter
if n_iterations is not None: # pragma: no cover
warnings.warn("n_iterations was renamed to n_iter for consistency "
" and will be removed in 0.16.")
self.n_iter = n_iterations
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self._indices = indices
self.n_train, self.n_test = _validate_shuffle_split(n,
test_size,
train_size)
@property
def indices(self):
warnings.warn("The indices attribute is deprecated and will be "
"removed (assumed True) in 0.17", DeprecationWarning,
stacklevel=1)
return self._indices
def __iter__(self):
if self._indices:
for train, test in self._iter_indices():
yield train, test
return
for train, test in self._iter_indices():
train_m = np.zeros(self.n, dtype=bool)
test_m = np.zeros(self.n, dtype=bool)
train_m[train] = True
test_m[test] = True
yield train_m, test_m
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
See also
--------
Bootstrap: cross-validation using re-sampling with replacement.
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
indices=None, random_state=None, n_iterations=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, indices, random_state,
n_iterations)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold, indices=None):
super(PredefinedSplit, self).__init__(len(test_fold), indices)
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, X.shape[0]):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
return _check_cv(cv, X=X, y=y, classifier=classifier, warn_mask=True)
def _check_cv(cv, X=None, y=None, classifier=False, warn_mask=False):
# This exists for internal use while indices is being deprecated.
is_sparse = sp.issparse(X)
needs_indices = is_sparse or not hasattr(X, "shape")
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if warn_mask and not needs_indices:
warnings.warn('check_cv will return indices instead of boolean '
'masks from 0.17', DeprecationWarning)
else:
needs_indices = None
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv, indices=needs_indices)
else:
cv = KFold(_num_samples(y), cv, indices=needs_indices)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv, indices=needs_indices)
if needs_indices and not getattr(cv, "_indices", True):
raise ValueError("Sparse data and lists require indices-based cross"
" validation generator, got: %r", cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> a, b = np.arange(10).reshape((5, 2)), range(5)
>>> a
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(b)
[0, 1, 2, 3, 4]
>>> a_train, a_test, b_train, b_test = train_test_split(
... a, b, test_size=0.33, random_state=42)
...
>>> a_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> b_train
[2, 0, 3]
>>> a_test
array([[2, 3],
[8, 9]])
>>> b_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
vbtdung/DCTCP-assignment | dctcp-assignment/util/plot_queue.py | 2 | 4530 | '''
Plot queue occupancy over time
'''
from helper import *
import plot_defaults
plot_defaults.quarter_size()
from matplotlib.ticker import MaxNLocator
from pylab import figure
parser = argparse.ArgumentParser()
parser.add_argument('--files', '-f',
help="Queue timeseries output to one plot",
required=True,
action="store",
nargs='+',
dest="files")
parser.add_argument('--maxy',
help="Max mbps on y-axis..",
type=int,
default=1000,
action="store",
dest="maxy")
parser.add_argument('--miny',
help="Min mbps on y-axis..",
type=int,
default=0,
action="store",
dest="miny")
parser.add_argument('--legend', '-l',
help="Legend to use if there are multiple plots. File names used as default.",
action="store",
nargs="+",
default=None,
dest="legend")
parser.add_argument('--out', '-o',
help="Output png file for the plot.",
default=None, # Will show the plot
dest="out")
parser.add_argument('-s', '--summarise',
help="Summarise the time series plot (boxplot). First 10 and last 10 values are ignored.",
default=False,
dest="summarise",
action="store_true")
parser.add_argument('--cdf',
help="Plot CDF of queue timeseries (first 10 and last 10 values are ignored)",
default=False,
dest="cdf",
action="store_true")
parser.add_argument('--labels',
help="Labels for x-axis if summarising; defaults to file names",
required=False,
default=[],
nargs="+",
dest="labels")
parser.add_argument('--every',
help="If the plot has a lot of data points, plot one every EVERY (x,y) point (default 1).",
default=1,
type=int)
args = parser.parse_args()
if args.labels is None:
args.labels = args.files
if args.legend is None:
args.legend = []
for file in args.files:
args.legend.append(file)
to_plot=[]
def get_style(i):
if i == 0:
return {'color': 'red'}
else:
return {'color': 'black', 'ls': '-.'}
print args.files
fig = figure()
ax = fig.add_subplot(111)
for i, f in enumerate(args.files):
data = read_list(f)
xaxis = map(float, col(0, data))
start_time = xaxis[0]
xaxis = map(lambda x: x - start_time, xaxis)
qlens = map(float, col(1, data))
if args.summarise or args.cdf:
to_plot.append(qlens[10:-10])
else:
xaxis = xaxis[::args.every]
qlens = qlens[::args.every]
ax.plot(xaxis, qlens, label=args.legend[i], lw=2, **get_style(i))
ax.xaxis.set_major_locator(MaxNLocator(4))
#plt.title("Queue sizes")
plt.title("")
plt.ylabel("Packets")
plt.grid(True)
#yaxis = range(0, 1101, 50)
#ylabels = map(lambda y: str(y) if y%100==0 else '', yaxis)
#plt.yticks(yaxis, ylabels)
#plt.ylim((0,1100))
plt.ylim((args.miny,args.maxy))
if args.summarise:
plt.xlabel("Link Rates")
plt.boxplot(to_plot)
xaxis = range(1, 1+len(args.files))
plt.xticks(xaxis, args.labels)
for x in xaxis:
y = pc99(to_plot[x-1])
print x, y
if x == 1:
s = '99pc: %d' % y
offset = (-20,20)
else:
s = str(y)
offset = (-10, 20)
plt.annotate(s, (x,y+1), xycoords='data',
xytext=offset, textcoords='offset points',
arrowprops=dict(arrowstyle="->"))
elif args.cdf:
fig = figure()
ax = fig.add_subplot(111)
for i,data in enumerate(to_plot):
xs, ys = cdf(map(int, data))
ax.plot(xs, ys, label=args.legend[i], lw=2, **get_style(i))
plt.ylabel("Fraction")
plt.xlabel("Packets")
plt.ylim((0, 1.0))
plt.legend(args.legend, loc="upper left")
plt.title("")
ax.xaxis.set_major_locator(MaxNLocator(4))
else:
plt.xlabel("Seconds")
if args.legend:
plt.legend(args.legend, loc="upper left")
else:
plt.legend(args.files)
if args.out:
plt.savefig(args.out)
else:
plt.show()
| unlicense |
simvisage/oricreate | docs/conf.py | 1 | 8690 | # -*- coding: utf-8 -*-
#
# oricreate documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 27 11:03:20 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
root_path_simvisage = os.path.abspath('../../../')
sys.path.insert(0, root_path_simvisage)
print('PATH', sys.path)
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
# 'sphinx.ext.autodoc',
# 'sphinx.ext.doctest',
# 'matplotlib.sphinxext.mathmpl',
# 'matplotlib.sphinxext.only_directives',
# 'matplotlib.sphinxext.plot_directive',
# 'sphinx.ext.inheritance_diagram',
# 'sphinx.ext.graphviz',
# 'sphinx.ext.todo',
# 'sphinx.ext.coverage',
# 'sphinx.ext.pngmath',
# 'sphinx.ext.ifconfig',
# 'sphinx.ext.viewcode',
# 'sphinxcontrib.programoutput'
]
[extensions]
autodoc_member_order = 'bysource'
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['docs', '_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf - 8 - sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'oricreate'
copyright = '2020, IMB'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.1'
# The full version, including alpha/beta/rc tags.
release = '0.9.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'oricreatedoc'
# -- Options for LaTeX output --------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '12pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'oricreate.tex', 'oricreate Documentation',
'Rostislav Chudoba', 'manual', True),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
pngmath_latex_preamble = r'''
\newcommand{\inl}[2]{\mbox{$\displaystyle\int_{#1}^{#2}$}}
\newcommand{\sul}[2]{\mbox{$\displaystyle\sum_{#1}^{#2}\,$}}
\newcommand{\del}[2]{\displaystyle\frac{#1}{#2}}
\newcommand{\pard}[2]{\displaystyle\frac{\partial #1}{\partial #2}}
\newcommand{\norm}[1]{\left\| \bm{#1} \right\|}
\newcommand{\dotp}[2]{\bm{#1} \cdot \bm{#2}}
'''
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\newcommand{\inl}[2]{\mbox{$\displaystyle\int_{#1}^{#2}$}}
\newcommand{\sul}[2]{\mbox{$\displaystyle\sum_{#1}^{#2}\,$}}
\newcommand{\bm}[1]{\mbox{\boldmath$#1$}}
\newcommand{\del}[2]{\mbox{$\displaystyle\\frac{#1}{#2}$}}
'''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
latex_elements = {
'papersize': 'a4paper',
'pointsize': '11pt',
'classoptions': ',openany',
'babel': '\\usepackage[english]{babel}',
'preamble': latex_preamble
}
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'oricreate', 'oricreate Documentation',
['Rostislav Chudoba'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| gpl-3.0 |
nmayorov/scipy | scipy/spatial/_geometric_slerp.py | 20 | 7668 | from __future__ import division, print_function, absolute_import
__all__ = ['geometric_slerp']
import warnings
import numpy as np
from scipy.spatial.distance import euclidean
def _geometric_slerp(start, end, t):
# create an orthogonal basis using QR decomposition
basis = np.vstack([start, end])
Q, R = np.linalg.qr(basis.T)
signs = 2 * (np.diag(R) >= 0) - 1
Q = Q.T * signs.T[:, np.newaxis]
R = R.T * signs.T[:, np.newaxis]
# calculate the angle between `start` and `end`
c = np.dot(start, end)
s = np.linalg.det(R)
omega = np.arctan2(s, c)
# interpolate
start, end = Q
s = np.sin(t * omega)
c = np.cos(t * omega)
return start * c[:, np.newaxis] + end * s[:, np.newaxis]
def geometric_slerp(start,
end,
t,
tol=1e-7):
"""
Geometric spherical linear interpolation.
The interpolation occurs along a unit-radius
great circle arc in arbitrary dimensional space.
Parameters
----------
start : (n_dimensions, ) array-like
Single n-dimensional input coordinate in a 1-D array-like
object. `n` must be greater than 1.
end : (n_dimensions, ) array-like
Single n-dimensional input coordinate in a 1-D array-like
object. `n` must be greater than 1.
t: float or (n_points,) array-like
A float or array-like of doubles representing interpolation
parameters, with values required in the inclusive interval
between 0 and 1. A common approach is to generate the array
with ``np.linspace(0, 1, n_pts)`` for linearly spaced points.
Ascending, descending, and scrambled orders are permitted.
tol: float
The absolute tolerance for determining if the start and end
coordinates are antipodes.
Returns
-------
result : (t.size, D)
An array of doubles containing the interpolated
spherical path and including start and
end when 0 and 1 t are used. The
interpolated values should correspond to the
same sort order provided in the t array. The result
may be 1-dimensional if ``t`` is a float.
Raises
------
ValueError
If ``start`` and ``end`` are antipodes, not on the
unit n-sphere, or for a variety of degenerate conditions.
Notes
-----
The implementation is based on the mathematical formula provided in [1]_,
and the first known presentation of this algorithm, derived from study of
4-D geometry, is credited to Glenn Davis in a footnote of the original
quaternion Slerp publication by Ken Shoemake [2]_.
.. versionadded:: 1.5.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Slerp#Geometric_Slerp
.. [2] Ken Shoemake (1985) Animating rotation with quaternion curves.
ACM SIGGRAPH Computer Graphics, 19(3): 245-254.
See Also
--------
scipy.spatial.transform.Slerp : 3-D Slerp that works with quaternions
Examples
--------
Interpolate four linearly-spaced values on the circumference of
a circle spanning 90 degrees:
>>> from scipy.spatial import geometric_slerp
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> start = np.array([1, 0])
>>> end = np.array([0, 1])
>>> t_vals = np.linspace(0, 1, 4)
>>> result = geometric_slerp(start,
... end,
... t_vals)
The interpolated results should be at 30 degree intervals
recognizable on the unit circle:
>>> ax.scatter(result[...,0], result[...,1], c='k')
>>> circle = plt.Circle((0, 0), 1, color='grey')
>>> ax.add_artist(circle)
>>> ax.set_aspect('equal')
>>> plt.show()
Attempting to interpolate between antipodes on a circle is
ambiguous because there are two possible paths, and on a
sphere there are infinite possible paths on the geodesic surface.
Nonetheless, one of the ambiguous paths is returned along
with a warning:
>>> opposite_pole = np.array([-1, 0])
>>> with np.testing.suppress_warnings() as sup:
... sup.filter(UserWarning)
... geometric_slerp(start,
... opposite_pole,
... t_vals)
array([[ 1.00000000e+00, 0.00000000e+00],
[ 5.00000000e-01, 8.66025404e-01],
[-5.00000000e-01, 8.66025404e-01],
[-1.00000000e+00, 1.22464680e-16]])
Extend the original example to a sphere and plot interpolation
points in 3D:
>>> from mpl_toolkits.mplot3d import proj3d
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
Plot the unit sphere for reference (optional):
>>> u = np.linspace(0, 2 * np.pi, 100)
>>> v = np.linspace(0, np.pi, 100)
>>> x = np.outer(np.cos(u), np.sin(v))
>>> y = np.outer(np.sin(u), np.sin(v))
>>> z = np.outer(np.ones(np.size(u)), np.cos(v))
>>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
Interpolating over a larger number of points
may provide the appearance of a smooth curve on
the surface of the sphere, which is also useful
for discretized integration calculations on a
sphere surface:
>>> start = np.array([1, 0, 0])
>>> end = np.array([0, 0, 1])
>>> t_vals = np.linspace(0, 1, 200)
>>> result = geometric_slerp(start,
... end,
... t_vals)
>>> ax.plot(result[...,0],
... result[...,1],
... result[...,2],
... c='k')
>>> plt.show()
"""
start = np.asarray(start, dtype=np.float64)
end = np.asarray(end, dtype=np.float64)
if start.ndim != 1 or end.ndim != 1:
raise ValueError("Start and end coordinates "
"must be one-dimensional")
if start.size != end.size:
raise ValueError("The dimensions of start and "
"end must match (have same size)")
if start.size < 2 or end.size < 2:
raise ValueError("The start and end coordinates must "
"both be in at least two-dimensional "
"space")
if np.array_equal(start, end):
return [start] * np.asarray(t).size
# for points that violate equation for n-sphere
for coord in [start, end]:
if not np.allclose(np.linalg.norm(coord), 1.0,
rtol=1e-9,
atol=0):
raise ValueError("start and end are not"
" on a unit n-sphere")
if not isinstance(tol, float):
raise ValueError("tol must be a float")
else:
tol = np.fabs(tol)
coord_dist = euclidean(start, end)
# diameter of 2 within tolerance means antipodes, which is a problem
# for all unit n-spheres (even the 0-sphere would have an ambiguous path)
if np.allclose(coord_dist, 2.0, rtol=0, atol=tol):
warnings.warn("start and end are antipodes"
" using the specified tolerance;"
" this may cause ambiguous slerp paths")
t = np.asarray(t, dtype=np.float64)
if t.size == 0:
return np.empty((0, start.size))
if t.min() < 0 or t.max() > 1:
raise ValueError("interpolation parameter must be in [0, 1]")
if t.ndim == 0:
return _geometric_slerp(start,
end,
np.atleast_1d(t)).ravel()
else:
return _geometric_slerp(start,
end,
t)
| bsd-3-clause |
xiaoxiamii/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
sonnyhu/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 50 | 13330 | import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative and should be normalized
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0]))
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
evaluate_every=1,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
| bsd-3-clause |
huongttlan/statsmodels | statsmodels/examples/ex_kernel_regression3.py | 34 | 2380 | # -*- coding: utf-8 -*-
"""script to try out Censored kernel regression
Created on Wed Jan 02 13:43:44 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
import statsmodels.nonparametric.api as nparam
if __name__ == '__main__':
np.random.seed(500)
nobs = [250, 1000][0]
sig_fac = 1
x = np.random.uniform(-2, 2, size=nobs)
x.sort()
x2 = x**2 + 0.02 * np.random.normal(size=nobs)
y_true = np.sin(x*5)/x + 2*x - 3 * x2
y = y_true + sig_fac * (np.sqrt(np.abs(3+x))) * np.random.normal(size=nobs)
cens_side = ['left', 'right', 'random'][2]
if cens_side == 'left':
c_val = 0.5
y_cens = np.clip(y, c_val, 100)
elif cens_side == 'right':
c_val = 3.5
y_cens = np.clip(y, -100, c_val)
elif cens_side == 'random':
c_val = 3.5 + 3 * np.random.randn(nobs)
y_cens = np.minimum(y, c_val)
model = nparam.KernelCensoredReg(endog=[y_cens],
#exog=[np.column_stack((x, x**2))], reg_type='lc',
exog=[x, x2], reg_type='ll',
var_type='cc', bw='aic', #'cv_ls', #[0.23, 434697.22], #'cv_ls',
censor_val=c_val[:,None],
#defaults=nparam.EstimatorSettings(efficient=True)
)
sm_bw = model.bw
sm_mean, sm_mfx = model.fit()
# model1 = nparam.KernelReg(endog=[y],
# exog=[x], reg_type='lc',
# var_type='c', bw='cv_ls')
# mean1, mfx1 = model1.fit()
model2 = nparam.KernelReg(endog=[y_cens],
exog=[x, x2], reg_type='ll',
var_type='cc', bw='aic',# 'cv_ls'
)
mean2, mfx2 = model2.fit()
print(model.bw)
#print model1.bw
print(model2.bw)
ix = np.argsort(y_cens)
ix_rev = np.zeros(nobs, int)
ix_rev[ix] = np.arange(nobs)
ix_rev = model.sortix_rev
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(x, y, 'o', alpha=0.5)
ax.plot(x, y_cens, 'o', alpha=0.5)
ax.plot(x, y_true, lw=2, label='DGP mean')
ax.plot(x, sm_mean[ix_rev], lw=2, label='model 0 mean')
ax.plot(x, mean2, lw=2, label='model 2 mean')
ax.legend()
plt.show()
| bsd-3-clause |
stylianos-kampakis/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
sserrot/champion_relationships | venv/share/doc/networkx-2.4/examples/drawing/plot_atlas.py | 1 | 2796 | #!/usr/bin/env python
"""
=====
Atlas
=====
Atlas of all graphs of 6 nodes or less.
"""
# Author: Aric Hagberg ([email protected])
# Copyright (C) 2004-2019 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import random
try:
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
except ImportError:
try:
import pydot
from networkx.drawing.nx_pydot import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either "
"PyGraphviz or pydot.")
import matplotlib.pyplot as plt
import networkx as nx
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
from networkx.generators.atlas import graph_atlas_g
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas = graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U = nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree = [n for n in G if G.degree(n) == 0]
for n in zerodegree:
G.remove_node(n)
U = nx.disjoint_union(U, G)
# iterator of graphs of all connected components
C = (U.subgraph(c) for c in nx.connected_components(U))
UU = nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist = [] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G, nlist):
nlist.append(G)
UU = nx.disjoint_union(UU, G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1, G2):
return True
return False
if __name__ == '__main__':
G = atlas6()
print("graph has %d nodes with %d edges"
% (nx.number_of_nodes(G), nx.number_of_edges(G)))
print(nx.number_connected_components(G), "connected components")
plt.figure(1, figsize=(8, 8))
# layout graphs with positions using graphviz neato
pos = graphviz_layout(G, prog="neato")
# color nodes the same in each connected subgraph
C = (G.subgraph(c) for c in nx.connected_components(G))
for g in C:
c = [random.random()] * nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.show()
| mit |
jkthompson/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pyplot.py | 69 | 77521 | import sys
import matplotlib
from matplotlib import _pylab_helpers, interactive
from matplotlib.cbook import dedent, silent_list, is_string_like, is_numlike
from matplotlib.figure import Figure, figaspect
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.image import imread as _imread
from matplotlib import rcParams, rcParamsDefault, get_backend
from matplotlib.rcsetup import interactive_bk as _interactive_bk
from matplotlib.artist import getp, get, Artist
from matplotlib.artist import setp as _setp
from matplotlib.axes import Axes
from matplotlib.projections import PolarAxes
from matplotlib import mlab # for csv2rec in plotfile
from matplotlib.scale import get_scale_docs, get_scale_names
from matplotlib import cm
from matplotlib.cm import get_cmap
# We may not need the following imports here:
from matplotlib.colors import Normalize, normalize # latter for backwards compat.
from matplotlib.lines import Line2D
from matplotlib.text import Text, Annotation
from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
from matplotlib.widgets import SubplotTool, Button, Slider, Widget
from ticker import TickHelper, Formatter, FixedFormatter, NullFormatter,\
FuncFormatter, FormatStrFormatter, ScalarFormatter,\
LogFormatter, LogFormatterExponent, LogFormatterMathtext,\
Locator, IndexLocator, FixedLocator, NullLocator,\
LinearLocator, LogLocator, AutoLocator, MultipleLocator,\
MaxNLocator
## Backend detection ##
def _backend_selection():
""" If rcParams['backend_fallback'] is true, check to see if the
current backend is compatible with the current running event
loop, and if not switches to a compatible one.
"""
backend = rcParams['backend']
if not rcParams['backend_fallback'] or \
backend not in _interactive_bk:
return
is_agg_backend = rcParams['backend'].endswith('Agg')
if 'wx' in sys.modules and not backend in ('WX', 'WXAgg'):
import wx
if wx.App.IsMainLoopRunning():
rcParams['backend'] = 'wx' + 'Agg' * is_agg_backend
elif 'qt' in sys.modules and not backend == 'QtAgg':
import qt
if not qt.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qtAgg'
elif 'PyQt4.QtCore' in sys.modules and not backend == 'Qt4Agg':
import PyQt4.QtGui
if not PyQt4.QtGui.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qt4Agg'
elif 'gtk' in sys.modules and not backend in ('GTK', 'GTKAgg',
'GTKCairo'):
import gobject
if gobject.MainLoop().is_running():
rcParams['backend'] = 'gtk' + 'Agg' * is_agg_backend
elif 'Tkinter' in sys.modules and not backend == 'TkAgg':
#import Tkinter
pass #what if anything do we need to do for tkinter?
_backend_selection()
## Global ##
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def findobj(o=None, match=None):
if o is None:
o = gcf()
return o.findobj(match)
findobj.__doc__ = Artist.findobj.__doc__
def switch_backend(newbackend):
"""
Switch the default backend to newbackend. This feature is
**experimental**, and is only expected to work switching to an
image backend. Eg, if you have a bunch of PostScript scripts that
you want to run from an interactive ipython session, you may want
to switch to the PS backend before running them to avoid having a
bunch of GUI windows popup. If you try to interactively switch
from one GUI backend to another, you will explode.
Calling this command will close all open windows.
"""
close('all')
global new_figure_manager, draw_if_interactive, show
matplotlib.use(newbackend, warn=False)
reload(matplotlib.backends)
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def isinteractive():
"""
Return the interactive status
"""
return matplotlib.is_interactive()
def ioff():
'Turn interactive mode off.'
matplotlib.interactive(False)
def ion():
'Turn interactive mode on.'
matplotlib.interactive(True)
def rc(*args, **kwargs):
matplotlib.rc(*args, **kwargs)
if matplotlib.rc.__doc__ is not None:
rc.__doc__ = dedent(matplotlib.rc.__doc__)
def rcdefaults():
matplotlib.rcdefaults()
draw_if_interactive()
if matplotlib.rcdefaults.__doc__ is not None:
rcdefaults.__doc__ = dedent(matplotlib.rcdefaults.__doc__)
# The current "image" (ScalarMappable) is tracked here on a
# per-pylab-session basis:
def gci():
"""
Get the current :class:`~matplotlib.cm.ScalarMappable` instance
(image or patch collection), or *None* if no images or patch
collections have been defined. The commands
:func:`~matplotlib.pyplot.imshow` and
:func:`~matplotlib.pyplot.figimage` create
:class:`~matplotlib.image.Image` instances, and the commands
:func:`~matplotlib.pyplot.pcolor` and
:func:`~matplotlib.pyplot.scatter` create
:class:`~matplotlib.collections.Collection` instances.
"""
return gci._current
gci._current = None
def sci(im):
"""
Set the current image (target of colormap commands like
:func:`~matplotlib.pyplot.jet`, :func:`~matplotlib.pyplot.hot` or
:func:`~matplotlib.pyplot.clim`).
"""
gci._current = im
## Any Artist ##
# (getp is simply imported)
def setp(*args, **kwargs):
ret = _setp(*args, **kwargs)
draw_if_interactive()
return ret
if _setp.__doc__ is not None:
setp.__doc__ = _setp.__doc__
## Figures ##
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize = None, # defaults to rc figure.figsize
dpi = None, # defaults to rc figure.dpi
facecolor = None, # defaults to rc figure.facecolor
edgecolor = None, # defaults to rc figure.edgecolor
frameon = True,
FigureClass = Figure,
**kwargs
):
"""
call signature::
figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
Create a new figure and return a :class:`matplotlib.figure.Figure`
instance. If *num* = *None*, the figure number will be incremented and
a new figure will be created. The returned figure objects have a
*number* attribute holding this number.
If *num* is an integer, and ``figure(num)`` already exists, make it
active and return the handle to it. If ``figure(num)`` does not exist
it will be created. Numbering starts at 1, matlab style::
figure(1)
If you are creating many figures, make sure you explicitly call "close"
on the figures you are not using, because this will enable pylab
to properly clean up the memory.
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
figsize width x height in inches; defaults to rc figure.figsize
dpi resolution; defaults to rc figure.dpi
facecolor the background color; defaults to rc figure.facecolor
edgecolor the border color; defaults to rc figure.edgecolor
========= =======================================================
rcParams defines the default values, which can be modified in the
matplotlibrc file
*FigureClass* is a :class:`~matplotlib.figure.Figure` or derived
class that will be passed on to :meth:`new_figure_manager` in the
backends which allows you to hook custom Figure classes into the
pylab interface. Additional kwargs will be passed on to your
figure init function.
"""
if figsize is None : figsize = rcParams['figure.figsize']
if dpi is None : dpi = rcParams['figure.dpi']
if facecolor is None : facecolor = rcParams['figure.facecolor']
if edgecolor is None : edgecolor = rcParams['figure.edgecolor']
if num is None:
allnums = [f.num for f in _pylab_helpers.Gcf.get_all_fig_managers()]
if allnums:
num = max(allnums) + 1
else:
num = 1
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
if get_backend().lower() == 'ps': dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
figManager.canvas.figure.number = num
draw_if_interactive()
return figManager.canvas.figure
def gcf():
"Return a handle to the current figure."
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
return figManager.canvas.figure
else:
return figure()
def get_current_fig_manager():
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
gcf() # creates an active figure as a side effect
figManager = _pylab_helpers.Gcf.get_active()
return figManager
# note we check for __doc__ is not None since py2exe optimize removes
# the docstrings
def connect(s, func):
return get_current_fig_manager().canvas.mpl_connect(s, func)
if FigureCanvasBase.mpl_connect.__doc__ is not None:
connect.__doc__ = dedent(FigureCanvasBase.mpl_connect.__doc__)
def disconnect(cid):
return get_current_fig_manager().canvas.mpl_disconnect(cid)
if FigureCanvasBase.mpl_disconnect.__doc__ is not None:
disconnect.__doc__ = dedent(FigureCanvasBase.mpl_disconnect.__doc__)
def close(*args):
"""
Close a figure window
``close()`` by itself closes the current figure
``close(num)`` closes figure number *num*
``close(h)`` where *h* is a :class:`Figure` instance, closes that figure
``close('all')`` closes all the figure windows
"""
if len(args)==0:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None: return
else:
figManager.canvas.mpl_disconnect(figManager._cidgcf)
_pylab_helpers.Gcf.destroy(figManager.num)
elif len(args)==1:
arg = args[0]
if arg=='all':
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
elif isinstance(arg, int):
_pylab_helpers.Gcf.destroy(arg)
elif isinstance(arg, Figure):
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
if manager.canvas.figure==arg:
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
else:
raise TypeError('Unrecognized argument type %s to close'%type(arg))
else:
raise TypeError('close takes 0 or 1 arguments')
def clf():
"""
Clear the current figure
"""
gcf().clf()
draw_if_interactive()
def draw():
'redraw the current figure'
get_current_fig_manager().canvas.draw()
def savefig(*args, **kwargs):
fig = gcf()
return fig.savefig(*args, **kwargs)
if Figure.savefig.__doc__ is not None:
savefig.__doc__ = dedent(Figure.savefig.__doc__)
def ginput(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is negative, does not timeout.
"""
return gcf().ginput(*args, **kwargs)
if Figure.ginput.__doc__ is not None:
ginput.__doc__ = dedent(Figure.ginput.__doc__)
def waitforbuttonpress(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* key or mouse clicks from the user and
return a list containing True's for keyboard clicks and False's
for mouse clicks.
If *timeout* is negative, does not timeout.
"""
return gcf().waitforbuttonpress(*args, **kwargs)
if Figure.waitforbuttonpress.__doc__ is not None:
waitforbuttonpress.__doc__ = dedent(Figure.waitforbuttonpress.__doc__)
# Putting things in figures
def figtext(*args, **kwargs):
ret = gcf().text(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.text.__doc__ is not None:
figtext.__doc__ = dedent(Figure.text.__doc__)
def suptitle(*args, **kwargs):
ret = gcf().suptitle(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.suptitle.__doc__ is not None:
suptitle.__doc__ = dedent(Figure.suptitle.__doc__)
def figimage(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
ret = gcf().figimage(*args, **kwargs)
draw_if_interactive()
gci._current = ret
return ret
if Figure.figimage.__doc__ is not None:
figimage.__doc__ = dedent(Figure.figimage.__doc__) + """
Addition kwargs: hold = [True|False] overrides default hold state"""
def figlegend(handles, labels, loc, **kwargs):
"""
Place a legend in the figure.
*labels*
a sequence of strings
*handles*
a sequence of :class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances
*loc*
can be a string or an integer specifying the legend
location
A :class:`matplotlib.legend.Legend` instance is returned.
Example::
figlegend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right' )
.. seealso::
:func:`~matplotlib.pyplot.legend`:
For information about the location codes
"""
l = gcf().legend(handles, labels, loc, **kwargs)
draw_if_interactive()
return l
## Figure and Axes hybrid ##
def hold(b=None):
"""
Set the hold state. If *b* is None (default), toggle the
hold state, else set the hold state to boolean value *b*::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
When *hold* is *True*, subsequent plot commands will be added to
the current axes. When *hold* is *False*, the current axes and
figure will be cleared on the next plot command.
"""
fig = gcf()
ax = fig.gca()
fig.hold(b)
ax.hold(b)
# b=None toggles the hold state, so let's get get the current hold
# state; but should pyplot hold toggle the rc setting - me thinks
# not
b = ax.ishold()
rc('axes', hold=b)
def ishold():
"""
Return the hold status of the current axes
"""
return gca().ishold()
def over(func, *args, **kwargs):
"""
over calls::
func(*args, **kwargs)
with ``hold(True)`` and then restores the hold state.
"""
h = ishold()
hold(True)
func(*args, **kwargs)
hold(h)
## Axes ##
def axes(*args, **kwargs):
"""
Add an axes at position rect specified by:
- ``axes()`` by itself creates a default full ``subplot(111)`` window axis.
- ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width,
height] in normalized (0, 1) units. *axisbg* is the background
color for the axis, default white.
- ``axes(h)`` where *h* is an axes instance makes *h* the current
axis. An :class:`~matplotlib.axes.Axes` instance is returned.
======= ============ ================================================
kwarg Accepts Desctiption
======= ============ ================================================
axisbg color the axes background color
frameon [True|False] display the frame?
sharex otherax current axes shares xaxis attribute with otherax
sharey otherax current axes shares yaxis attribute with otherax
polar [True|False] use a polar axes?
======= ============ ================================================
Examples:
* :file:`examples/pylab_examples/axes_demo.py` places custom axes.
* :file:`examples/pylab_examples/shared_axis_demo.py` uses
*sharex* and *sharey*.
"""
nargs = len(args)
if len(args)==0: return subplot(111, **kwargs)
if nargs>1:
raise TypeError('Only one non keyword arg to axes allowed')
arg = args[0]
if isinstance(arg, Axes):
a = gcf().sca(arg)
else:
rect = arg
a = gcf().add_axes(rect, **kwargs)
draw_if_interactive()
return a
def delaxes(*args):
"""
``delaxes(ax)``: remove *ax* from the current figure. If *ax*
doesn't exist, an error will be raised.
``delaxes()``: delete the current axes
"""
if not len(args):
ax = gca()
else:
ax = args[0]
ret = gcf().delaxes(ax)
draw_if_interactive()
return ret
def gca(**kwargs):
"""
Return the current axis instance. This can be used to control
axis properties either using set or the
:class:`~matplotlib.axes.Axes` methods, for example, setting the
xaxis range::
plot(t,s)
set(gca(), 'xlim', [0,10])
or::
plot(t,s)
a = gca()
a.set_xlim([0,10])
"""
ax = gcf().gca(**kwargs)
return ax
# More ways of creating axes:
def subplot(*args, **kwargs):
"""
Create a subplot command, creating axes with::
subplot(numRows, numCols, plotNum)
where *plotNum* = 1 is the first plot number and increasing *plotNums*
fill rows first. max(*plotNum*) == *numRows* * *numCols*
You can leave out the commas if *numRows* <= *numCols* <=
*plotNum* < 10, as in::
subplot(211) # 2 rows, 1 column, first (upper) plot
``subplot(111)`` is the default axis.
New subplots that overlap old will delete the old axes. If you do
not want this behavior, use
:meth:`matplotlib.figure.Figure.add_subplot` or the
:func:`~matplotlib.pyplot.axes` command. Eg.::
from pylab import *
plot([1,2,3]) # implicitly creates subplot(111)
subplot(211) # overlaps, subplot(111) is killed
plot(rand(12), rand(12))
subplot(212, axisbg='y') # creates 2nd subplot with yellow background
Keyword arguments:
*axisbg*:
The background color of the subplot, which can be any valid
color specifier. See :mod:`matplotlib.colors` for more
information.
*polar*:
A boolean flag indicating whether the subplot plot should be
a polar projection. Defaults to False.
*projection*:
A string giving the name of a custom projection to be used
for the subplot. This projection must have been previously
registered. See :func:`matplotlib.projections.register_projection`
.. seealso::
:func:`~matplotlib.pyplot.axes`:
For additional information on :func:`axes` and
:func:`subplot` keyword arguments.
:file:`examples/pylab_examples/polar_scatter.py`
**Example:**
.. plot:: mpl_examples/pylab_examples/subplot_demo.py
"""
fig = gcf()
a = fig.add_subplot(*args, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def twinx(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the xaxis. The ticks for *ax2* will be placed on
the right, and the *ax2* instance is returned.
.. seealso::
:file:`examples/api_examples/two_scales.py`
"""
if ax is None:
ax=gca()
ax1 = ax.twinx()
draw_if_interactive()
return ax1
def twiny(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the yaxis. The ticks for *ax2* will be placed on
the top, and the *ax2* instance is returned.
"""
if ax is None:
ax=gca()
ax1 = ax.twiny()
draw_if_interactive()
return ax1
def subplots_adjust(*args, **kwargs):
"""
call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
Tune the subplot layout via the
:class:`matplotlib.figure.SubplotParams` mechanism. The parameter
meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
The actual defaults are controlled by the rc file
"""
fig = gcf()
fig.subplots_adjust(*args, **kwargs)
draw_if_interactive()
def subplot_tool(targetfig=None):
"""
Launch a subplot tool window for *targetfig* (default gcf).
A :class:`matplotlib.widgets.SubplotTool` instance is returned.
"""
tbar = rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
rcParams['toolbar'] = 'None'
if targetfig is None:
manager = get_current_fig_manager()
targetfig = manager.canvas.figure
else:
# find the manager for this figure
for manager in _pylab_helpers.Gcf._activeQue:
if manager.canvas.figure==targetfig: break
else: raise RuntimeError('Could not find manager for targetfig')
toolfig = figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
ret = SubplotTool(targetfig, toolfig)
rcParams['toolbar'] = tbar
_pylab_helpers.Gcf.set_active(manager) # restore the current figure
return ret
def box(on=None):
"""
Turn the axes box on or off according to *on*.
If *on* is *None*, toggle state.
"""
ax = gca()
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
draw_if_interactive()
def title(s, *args, **kwargs):
"""
Set the title of the current axis to *s*.
Default font override is::
override = {'fontsize': 'medium',
'verticalalignment': 'bottom',
'horizontalalignment': 'center'}
.. seealso::
:func:`~matplotlib.pyplot.text`:
for information on how override and the optional args work.
"""
l = gca().set_title(s, *args, **kwargs)
draw_if_interactive()
return l
## Axis ##
def axis(*v, **kwargs):
"""
Set/Get the axis properties:
>>> axis()
returns the current axes limits ``[xmin, xmax, ymin, ymax]``.
>>> axis(v)
sets the min and max of the x and y axes, with
``v = [xmin, xmax, ymin, ymax]``.
>>> axis('off')
turns off the axis lines and labels.
>>> axis('equal')
changes limits of *x* or *y* axis so that equal increments of *x*
and *y* have the same length; a circle is circular.
>>> axis('scaled')
achieves the same result by changing the dimensions of the plot box instead
of the axis data limits.
>>> axis('tight')
changes *x* and *y* axis limits such that all data is shown. If
all data is already shown, it will move it to the center of the
figure without modifying (*xmax* - *xmin*) or (*ymax* -
*ymin*). Note this is slightly different than in matlab.
>>> axis('image')
is 'scaled' with the axis limits equal to the data limits.
>>> axis('auto')
and
>>> axis('normal')
are deprecated. They restore default behavior; axis limits are automatically
scaled to make the data fit comfortably within the plot box.
if ``len(*v)==0``, you can pass in *xmin*, *xmax*, *ymin*, *ymax*
as kwargs selectively to alter just those limits without changing
the others.
The xmin, xmax, ymin, ymax tuple is returned
.. seealso::
:func:`xlim`, :func:`ylim`
"""
ax = gca()
v = ax.axis(*v, **kwargs)
draw_if_interactive()
return v
def xlabel(s, *args, **kwargs):
"""
Set the *x* axis label of the current axis to *s*
Default override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'top',
'horizontalalignment' : 'center'
}
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args work
"""
l = gca().set_xlabel(s, *args, **kwargs)
draw_if_interactive()
return l
def ylabel(s, *args, **kwargs):
"""
Set the *y* axis label of the current axis to *s*.
Defaults override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'center',
'horizontalalignment' : 'right',
'rotation'='vertical' : }
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args
work.
"""
l = gca().set_ylabel(s, *args, **kwargs)
draw_if_interactive()
return l
def xlim(*args, **kwargs):
"""
Set/Get the xlimits of the current axes::
xmin, xmax = xlim() # return the current xlim
xlim( (xmin, xmax) ) # set the xlim to xmin, xmax
xlim( xmin, xmax ) # set the xlim to xmin, xmax
If you do not specify args, you can pass the xmin and xmax as
kwargs, eg.::
xlim(xmax=3) # adjust the max leaving min unchanged
xlim(xmin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_xlim(*args, **kwargs)
draw_if_interactive()
return ret
def ylim(*args, **kwargs):
"""
Set/Get the ylimits of the current axes::
ymin, ymax = ylim() # return the current ylim
ylim( (ymin, ymax) ) # set the ylim to ymin, ymax
ylim( ymin, ymax ) # set the ylim to ymin, ymax
If you do not specify args, you can pass the *ymin* and *ymax* as
kwargs, eg.::
ylim(ymax=3) # adjust the max leaving min unchanged
ylim(ymin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_ylim(*args, **kwargs)
draw_if_interactive()
return ret
def xscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the x-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_xscale(*args, **kwargs)
draw_if_interactive()
return ret
xscale.__doc__ = dedent(xscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def yscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the y-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_yscale(*args, **kwargs)
draw_if_interactive()
return ret
yscale.__doc__ = dedent(yscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def xticks(*args, **kwargs):
"""
Set/Get the xlimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = xticks()
# set the locations of the xticks
xticks( arange(6) )
# set the locations and labels of the xticks
xticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_xticks()
labels = ax.get_xticklabels()
elif len(args)==1:
locs = ax.set_xticks(args[0])
labels = ax.get_xticklabels()
elif len(args)==2:
locs = ax.set_xticks(args[0])
labels = ax.set_xticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to xticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return locs, silent_list('Text xticklabel', labels)
def yticks(*args, **kwargs):
"""
Set/Get the ylimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = yticks()
# set the locations of the yticks
yticks( arange(6) )
# set the locations and labels of the yticks
yticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_yticks()
labels = ax.get_yticklabels()
elif len(args)==1:
locs = ax.set_yticks(args[0])
labels = ax.get_yticklabels()
elif len(args)==2:
locs = ax.set_yticks(args[0])
labels = ax.set_yticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to yticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return ( locs,
silent_list('Text yticklabel', labels)
)
def rgrids(*args, **kwargs):
"""
Set/Get the radial locations of the gridlines and ticklabels on a
polar plot.
call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, **kwargs)
When called with no arguments, :func:`rgrid` simply returns the
tuple (*lines*, *labels*), where *lines* is an array of radial
gridlines (:class:`~matplotlib.lines.Line2D` instances) and
*labels* is an array of tick labels
(:class:`~matplotlib.text.Text` instances). When called with
arguments, the labels will appear at the specified radial
distances and angles.
*labels*, if not *None*, is a len(*radii*) list of strings of the
labels to use at each angle.
If *labels* is None, the rformatter will be used
Examples::
# set the locations of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.yaxis.get_ticklines()
labels = ax.yaxis.get_ticklabels()
else:
lines, labels = ax.set_rgrids(*args, **kwargs)
draw_if_interactive()
return ( silent_list('Line2D rgridline', lines),
silent_list('Text rgridlabel', labels) )
def thetagrids(*args, **kwargs):
"""
Set/Get the theta locations of the gridlines and ticklabels.
If no arguments are passed, return a tuple (*lines*, *labels*)
where *lines* is an array of radial gridlines
(:class:`~matplotlib.lines.Line2D` instances) and *labels* is an
array of tick labels (:class:`~matplotlib.text.Text` instances)::
lines, labels = thetagrids()
Otherwise the syntax is::
lines, labels = thetagrids(angles, labels=None, fmt='%d', frac = 1.1)
set the angles at which to place the theta grids (these gridlines
are equal along the theta dimension).
*angles* is in degrees.
*labels*, if not *None*, is a len(angles) list of strings of the
labels to use at each angle.
If *labels* is *None*, the labels will be ``fmt%angle``.
*frac* is the fraction of the polar axes radius at which to place
the label (1 is the edge). Eg. 1.05 is outside the axes and 0.95
is inside the axes.
Return value is a list of tuples (*lines*, *labels*):
- *lines* are :class:`~matplotlib.lines.Line2D` instances
- *labels* are :class:`~matplotlib.text.Text` instances.
Note that on input, the *labels* argument is a list of strings,
and on output it is a list of :class:`~matplotlib.text.Text`
instances.
Examples::
# set the locations of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90) )
# set the locations and labels of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(*args, **kwargs)
draw_if_interactive()
return (silent_list('Line2D thetagridline', lines),
silent_list('Text thetagridlabel', labels)
)
## Plotting Info ##
def plotting():
"""
Plotting commands
=============== =========================================================
Command Description
=============== =========================================================
axes Create a new axes
axis Set or return the current axis limits
bar make a bar chart
boxplot make a box and whiskers chart
cla clear current axes
clabel label a contour plot
clf clear a figure window
close close a figure window
colorbar add a colorbar to the current figure
cohere make a plot of coherence
contour make a contour plot
contourf make a filled contour plot
csd make a plot of cross spectral density
draw force a redraw of the current figure
errorbar make an errorbar graph
figlegend add a legend to the figure
figimage add an image to the figure, w/o resampling
figtext add text in figure coords
figure create or change active figure
fill make filled polygons
fill_between make filled polygons
gca return the current axes
gcf return the current figure
gci get the current image, or None
getp get a handle graphics property
hist make a histogram
hold set the hold state on current axes
legend add a legend to the axes
loglog a log log plot
imread load image file into array
imshow plot image data
matshow display a matrix in a new figure preserving aspect
pcolor make a pseudocolor plot
plot make a line plot
plotfile plot data from a flat file
psd make a plot of power spectral density
quiver make a direction field (arrows) plot
rc control the default params
savefig save the current figure
scatter make a scatter plot
setp set a handle graphics property
semilogx log x axis
semilogy log y axis
show show the figures
specgram a spectrogram plot
stem make a stem plot
subplot make a subplot (numrows, numcols, axesnum)
table add a table to the axes
text add some text at location x,y to the current axes
title add a title to the current axes
xlabel add an xlabel to the current axes
ylabel add a ylabel to the current axes
=============== =========================================================
The following commands will set the default colormap accordingly:
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
"""
pass
def get_plot_commands(): return ( 'axes', 'axis', 'bar', 'boxplot', 'cla', 'clf',
'close', 'colorbar', 'cohere', 'csd', 'draw', 'errorbar',
'figlegend', 'figtext', 'figimage', 'figure', 'fill', 'gca',
'gcf', 'gci', 'get', 'gray', 'barh', 'jet', 'hist', 'hold', 'imread',
'imshow', 'legend', 'loglog', 'quiver', 'rc', 'pcolor', 'pcolormesh', 'plot', 'psd',
'savefig', 'scatter', 'set', 'semilogx', 'semilogy', 'show',
'specgram', 'stem', 'subplot', 'table', 'text', 'title', 'xlabel',
'ylabel', 'pie', 'polar')
def colors():
"""
This is a do nothing function to provide you with help on how
matplotlib handles colors.
Commands which take color arguments can use several formats to
specify the colors. For the basic builtin colors, you can use a
single letter
===== =======
Alias Color
===== =======
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
===== =======
For a greater range of colors, you have two options. You can
specify the color using an html hex string, as in::
color = '#eeefff'
or you can pass an R,G,B tuple, where each of R,G,B are in the
range [0,1].
You can also use any legal html name for a color, for example::
color = 'red',
color = 'burlywood'
color = 'chartreuse'
The example below creates a subplot with a dark
slate gray background
subplot(111, axisbg=(0.1843, 0.3098, 0.3098))
Here is an example that creates a pale turqoise title::
title('Is this the best color?', color='#afeeee')
"""
pass
def colormaps():
"""
matplotlib provides the following colormaps.
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
You can set the colormap for an image, pcolor, scatter, etc,
either as a keyword argument::
imshow(X, cmap=cm.hot)
or post-hoc using the corresponding pylab interface function::
imshow(X)
hot()
jet()
In interactive mode, this will update the colormap allowing you to
see which one works best for your data.
"""
pass
## Plotting part 1: manually generated functions and wrappers ##
from matplotlib.colorbar import colorbar_doc
def colorbar(mappable=None, cax=None, ax=None, **kw):
if mappable is None:
mappable = gci()
if ax is None:
ax = gca()
ret = gcf().colorbar(mappable, cax = cax, ax=ax, **kw)
draw_if_interactive()
return ret
colorbar.__doc__ = colorbar_doc
def clim(vmin=None, vmax=None):
"""
Set the color limits of the current image
To apply clim to all axes images do::
clim(0, 0.5)
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images,
use, for example::
for im in gca().get_images():
im.set_clim(0, 0.05)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, eg with imshow')
im.set_clim(vmin, vmax)
draw_if_interactive()
def imread(*args, **kwargs):
return _imread(*args, **kwargs)
if _imread.__doc__ is not None:
imread.__doc__ = dedent(_imread.__doc__)
def matshow(A, fignum=None, **kw):
"""
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first
dimension of the array) are displayed horizontally. The aspect
ratio of the figure window is that of the array, unless this would
make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
With the exception of fignum, keyword arguments are passed to
:func:`~matplotlib.pyplot.imshow`.
*fignum*: [ None | integer | False ]
By default, :func:`matshow` creates a new figure window with
automatic numbering. If *fignum* is given as an integer, the
created figure will use this figure number. Because of how
:func:`matshow` tries to set the figure aspect ratio to be the
one of the array, if you provide the number of an already
existing figure, strange things may happen.
If *fignum* is *False* or 0, a new figure window will **NOT** be created.
"""
if fignum is False or fignum is 0:
ax = gca()
else:
# Extract actual aspect ratio of array and make appropriately sized figure
fig = figure(fignum, figsize=figaspect(A))
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
im = ax.matshow(A, **kw)
gci._current = im
draw_if_interactive()
return im
def polar(*args, **kwargs):
"""
call signature::
polar(theta, r, **kwargs)
Make a polar plot. Multiple *theta*, *r* arguments are supported,
with format strings, as in :func:`~matplotlib.pyplot.plot`.
"""
ax = gca(polar=True)
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
return ret
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',',
**kwargs):
"""
Plot the data in *fname*
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, eg., integer
column numbers in both or column names in both.
*comments*, *skiprows*, *checkrows*, and *delimiter* are all passed on to
:func:`matplotlib.pylab.csv2rec` to load the data into a record array.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'), plotfuncs={'volume': 'semilogy'})
"""
fig = figure()
if len(cols)<1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = dict()
r = mlab.csv2rec(fname, comments=comments,
skiprows=skiprows, checkrows=checkrows, delimiter=delimiter)
def getname_val(identifier):
'return the name and column data for identifier'
if is_string_like(identifier):
return identifier, r[identifier]
elif is_numlike(identifier):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
if len(cols)==1:
ax1 = fig.add_subplot(1,1,1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_xlabel(xname)
else:
N = len(cols)
for i in range(1,N):
if i==1:
ax = ax1 = fig.add_subplot(N-1,1,i)
ax.grid(True)
else:
ax = fig.add_subplot(N-1,1,i, sharex=ax1)
ax.grid(True)
yname, y = getname_val(cols[i])
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if xname=='date':
fig.autofmt_xdate()
draw_if_interactive()
## Plotting part 2: autogenerated wrappers for axes methods ##
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def acorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().acorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.acorr.__doc__ is not None:
acorr.__doc__ = dedent(Axes.acorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def arrow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().arrow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.arrow.__doc__ is not None:
arrow.__doc__ = dedent(Axes.arrow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhline.__doc__ is not None:
axhline.__doc__ = dedent(Axes.axhline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhspan.__doc__ is not None:
axhspan.__doc__ = dedent(Axes.axhspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvline.__doc__ is not None:
axvline.__doc__ = dedent(Axes.axvline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvspan.__doc__ is not None:
axvspan.__doc__ = dedent(Axes.axvspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().bar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.bar.__doc__ is not None:
bar.__doc__ = dedent(Axes.bar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barh.__doc__ is not None:
barh.__doc__ = dedent(Axes.barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def broken_barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().broken_barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.broken_barh.__doc__ is not None:
broken_barh.__doc__ = dedent(Axes.broken_barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def boxplot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().boxplot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.boxplot.__doc__ is not None:
boxplot.__doc__ = dedent(Axes.boxplot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cohere(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().cohere(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.cohere.__doc__ is not None:
cohere.__doc__ = dedent(Axes.cohere.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def clabel(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().clabel(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.clabel.__doc__ is not None:
clabel.__doc__ = dedent(Axes.clabel.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contour(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contour(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contour.__doc__ is not None:
contour.__doc__ = dedent(Axes.contour.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contourf(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contourf(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contourf.__doc__ is not None:
contourf.__doc__ = dedent(Axes.contourf.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def csd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().csd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.csd.__doc__ is not None:
csd.__doc__ = dedent(Axes.csd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def errorbar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().errorbar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.errorbar.__doc__ is not None:
errorbar.__doc__ = dedent(Axes.errorbar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill.__doc__ is not None:
fill.__doc__ = dedent(Axes.fill.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill_between(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill_between(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill_between.__doc__ is not None:
fill_between.__doc__ = dedent(Axes.fill_between.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hexbin(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hexbin(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.hexbin.__doc__ is not None:
hexbin.__doc__ = dedent(Axes.hexbin.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hist(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hist(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hist.__doc__ is not None:
hist.__doc__ = dedent(Axes.hist.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hlines.__doc__ is not None:
hlines.__doc__ = dedent(Axes.hlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def imshow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().imshow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.imshow.__doc__ is not None:
imshow.__doc__ = dedent(Axes.imshow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def loglog(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().loglog(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.loglog.__doc__ is not None:
loglog.__doc__ = dedent(Axes.loglog.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolor(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolor(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolor.__doc__ is not None:
pcolor.__doc__ = dedent(Axes.pcolor.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolormesh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolormesh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolormesh.__doc__ is not None:
pcolormesh.__doc__ = dedent(Axes.pcolormesh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pie(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pie(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.pie.__doc__ is not None:
pie.__doc__ = dedent(Axes.pie.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot.__doc__ is not None:
plot.__doc__ = dedent(Axes.plot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot_date(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot_date(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot_date.__doc__ is not None:
plot_date.__doc__ = dedent(Axes.plot_date.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def psd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().psd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.psd.__doc__ is not None:
psd.__doc__ = dedent(Axes.psd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiver(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiver(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.quiver.__doc__ is not None:
quiver.__doc__ = dedent(Axes.quiver.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiverkey(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiverkey(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.quiverkey.__doc__ is not None:
quiverkey.__doc__ = dedent(Axes.quiverkey.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def scatter(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().scatter(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.scatter.__doc__ is not None:
scatter.__doc__ = dedent(Axes.scatter.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogx(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogx(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogx.__doc__ is not None:
semilogx.__doc__ = dedent(Axes.semilogx.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogy.__doc__ is not None:
semilogy.__doc__ = dedent(Axes.semilogy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def specgram(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().specgram(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret[-1]
hold(b)
return ret
if Axes.specgram.__doc__ is not None:
specgram.__doc__ = dedent(Axes.specgram.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().spy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.spy.__doc__ is not None:
spy.__doc__ = dedent(Axes.spy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def stem(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().stem(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.stem.__doc__ is not None:
stem.__doc__ = dedent(Axes.stem.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def step(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().step(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.step.__doc__ is not None:
step.__doc__ = dedent(Axes.step.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def vlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().vlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.vlines.__doc__ is not None:
vlines.__doc__ = dedent(Axes.vlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def xcorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().xcorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.xcorr.__doc__ is not None:
xcorr.__doc__ = dedent(Axes.xcorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barbs(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barbs(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barbs.__doc__ is not None:
barbs.__doc__ = dedent(Axes.barbs.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cla(*args, **kwargs):
ret = gca().cla(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.cla.__doc__ is not None:
cla.__doc__ = dedent(Axes.cla.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def grid(*args, **kwargs):
ret = gca().grid(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.grid.__doc__ is not None:
grid.__doc__ = dedent(Axes.grid.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def legend(*args, **kwargs):
ret = gca().legend(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.legend.__doc__ is not None:
legend.__doc__ = dedent(Axes.legend.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def table(*args, **kwargs):
ret = gca().table(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.table.__doc__ is not None:
table.__doc__ = dedent(Axes.table.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def text(*args, **kwargs):
ret = gca().text(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.text.__doc__ is not None:
text.__doc__ = dedent(Axes.text.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def annotate(*args, **kwargs):
ret = gca().annotate(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.annotate.__doc__ is not None:
annotate.__doc__ = dedent(Axes.annotate.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def autumn():
'''
set the default colormap to autumn and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='autumn')
im = gci()
if im is not None:
im.set_cmap(cm.autumn)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bone():
'''
set the default colormap to bone and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='bone')
im = gci()
if im is not None:
im.set_cmap(cm.bone)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cool():
'''
set the default colormap to cool and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='cool')
im = gci()
if im is not None:
im.set_cmap(cm.cool)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def copper():
'''
set the default colormap to copper and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='copper')
im = gci()
if im is not None:
im.set_cmap(cm.copper)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def flag():
'''
set the default colormap to flag and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='flag')
im = gci()
if im is not None:
im.set_cmap(cm.flag)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def gray():
'''
set the default colormap to gray and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='gray')
im = gci()
if im is not None:
im.set_cmap(cm.gray)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hot():
'''
set the default colormap to hot and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hot')
im = gci()
if im is not None:
im.set_cmap(cm.hot)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hsv():
'''
set the default colormap to hsv and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hsv')
im = gci()
if im is not None:
im.set_cmap(cm.hsv)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def jet():
'''
set the default colormap to jet and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='jet')
im = gci()
if im is not None:
im.set_cmap(cm.jet)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pink():
'''
set the default colormap to pink and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='pink')
im = gci()
if im is not None:
im.set_cmap(cm.pink)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def prism():
'''
set the default colormap to prism and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='prism')
im = gci()
if im is not None:
im.set_cmap(cm.prism)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spring():
'''
set the default colormap to spring and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spring')
im = gci()
if im is not None:
im.set_cmap(cm.spring)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def summer():
'''
set the default colormap to summer and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='summer')
im = gci()
if im is not None:
im.set_cmap(cm.summer)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def winter():
'''
set the default colormap to winter and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='winter')
im = gci()
if im is not None:
im.set_cmap(cm.winter)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spectral():
'''
set the default colormap to spectral and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spectral')
im = gci()
if im is not None:
im.set_cmap(cm.spectral)
draw_if_interactive()
| gpl-3.0 |
tomlof/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 46 | 3387 | import numpy as np
from sklearn.utils import testing
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils.testing import assert_equal, assert_array_equal
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have received X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have received X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
def test_kw_arg():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
# Test that rounding is correct
assert_array_equal(F.transform(X),
np.around(X, decimals=3))
def test_kw_arg_update():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args['decimals'] = 1
# Test that rounding is correct
assert_array_equal(F.transform(X), np.around(X, decimals=1))
def test_kw_arg_reset():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args = dict(decimals=1)
# Test that rounding is correct
assert_array_equal(F.transform(X), np.around(X, decimals=1))
def test_inverse_transform():
X = np.array([1, 4, 9, 16]).reshape((2, 2))
# Test that inverse_transform works correctly
F = FunctionTransformer(
func=np.sqrt,
inverse_func=np.around, inv_kw_args=dict(decimals=3),
)
assert_array_equal(
F.inverse_transform(F.transform(X)),
np.around(np.sqrt(X), decimals=3),
)
| bsd-3-clause |
dkuner/example-modules | modules/modeling/basic/linear_svc_estimator/main.py | 2 | 1630 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from specparser import get_settings_from_file
from pprint import pprint
import csv
from sklearn.svm import LinearSVC
import numpy as np
from sklearn.externals import joblib
import matplotlib
matplotlib.use('Agg')
import datetime
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
def drawPrecisionRecall(X,Y,output_file):
pdf = PdfPages(output_file)
plt.figure(figsize=(len(Y), len(X)))
plt.plot(Y, X, 'r-o')
plt.title('Precision/Recall')
pdf.savefig() # saves the current figure into a pdf page
plt.close()
pdf.close()
def readcolumn(filename):
column = []
with open(filename,"r") as fconcl:
for line in fconcl:
column.append(line.rstrip('\n'))
return column
def main():
settings = get_settings_from_file("spec.json")
print(settings)
X = np.genfromtxt(settings.Input.X, delimiter=',', skip_header=1)
svc = joblib.load(settings.Input.MODEL)
Y_out = svc.predict(X)
Y_list = [Y_out]
np.savetxt("./conclusion.csv", Y_out, fmt="%d", delimiter=",")
conclusion = readcolumn("./conclusion.csv")
label = readcolumn(settings.Input.Y)
precision_list = []
recall_list = []
hits = 0
for i in range(len(label)):
if conclusion[i] == label[i]:
hits+=1
precision_list.append(1.0*hits/(i+1))
recall_list.append(1.0*hits/(len(label)))
drawPrecisionRecall(precision_list,recall_list,settings.Output.report)
print("Done")
if __name__ == "__main__":
main()
| bsd-3-clause |
wwf5067/statsmodels | statsmodels/tsa/statespace/sarimax.py | 1 | 81942 | """
SARIMAX Model
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
import numpy as np
import pandas as pd
from .kalman_filter import KalmanFilter, FilterResults
from .mlemodel import MLEModel, MLEResults, MLEResultsWrapper
from .tools import (
companion_matrix, diff, is_invertible, constrain_stationary_univariate,
unconstrain_stationary_univariate
)
from scipy.linalg import solve_discrete_lyapunov
from statsmodels.tools.tools import Bunch
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tools.decorators import cache_readonly
import statsmodels.base.wrapper as wrap
class SARIMAX(MLEModel):
r"""
Seasonal AutoRegressive Integrated Moving Average with eXogenous regressors
model
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
exog : array_like, optional
Array of exogenous regressors, shaped nobs x k.
order : iterable or iterable of iterables, optional
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters. `d` must be an integer
indicating the integration order of the process, while
`p` and `q` may either be an integers indicating the AR and MA
orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. Default is
an AR(1) model: (1,0,0).
seasonal_order : iterable, optional
The (P,D,Q,s) order of the seasonal component of the model for the
AR parameters, differences, MA parameters, and periodicity.
`d` must be an integer indicating the integration order of the process,
while `p` and `q` may either be an integers indicating the AR and MA
orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. `s` is an
integer giving the periodicity (number of periods in season), often it
is 4 for quarterly data or 12 for monthly data. Default is no seasonal
effect.
trend : str{'n','c','t','ct'} or iterable, optional
Parameter controlling the deterministic trend polynomial :math:`A(t)`.
Can be specified as a string where 'c' indicates a constant (i.e. a
degree zero component of the trend polynomial), 't' indicates a
linear trend with time, and 'ct' is both. Can also be specified as an
iterable defining the polynomial as in `numpy.poly1d`, where
`[1,1,0,1]` would denote :math:`a + bt + ct^3`. Default is to not
include a trend component.
measurement_error : boolean, optional
Whether or not to assume the endogenous observations `endog` were
measured with error. Default is False.
time_varying_regression : boolean, optional
Used when an explanatory variables, `exog`, are provided provided
to select whether or not coefficients on the exogenous regressors are
allowed to vary over time. Default is False.
mle_regression : boolean, optional
Whether or not to use estimate the regression coefficients for the
exogenous variables as part of maximum likelihood estimation or through
the Kalman filter (i.e. recursive least squares). If
`time_varying_regression` is True, this must be set to False. Default
is True.
simple_differencing : boolean, optional
Whether or not to use partially conditional maximum likelihood
estimation. If True, differencing is performed prior to estimation,
which discards the first :math:`s D + d` initial rows but reuslts in a
smaller state-space formulation. If False, the full SARIMAX model is
put in state-space form so that all datapoints can be used in
estimation. Default is False.
enforce_stationarity : boolean, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
enforce_invertibility : boolean, optional
Whether or not to transform the MA parameters to enforce invertibility
in the moving average component of the model. Default is True.
hamilton_representation : boolean, optional
Whether or not to use the Hamilton representation of an ARMA process
(if True) or the Harvey representation (if False). Default is False.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices or for Kalman filtering options. See `Representation`, and
`KalmanFilter` for more details.
Attributes
----------
measurement_error : boolean
Whether or not to assume the endogenous
observations `endog` were measured with error.
state_error : boolean
Whether or not the transition equation has an error component.
mle_regression : boolean
Whether or not the regression coefficients for
the exogenous variables were estimated via maximum
likelihood estimation.
state_regression : boolean
Whether or not the regression coefficients for
the exogenous variables are included as elements
of the state space and estimated via the Kalman
filter.
time_varying_regression : boolean
Whether or not coefficients on the exogenous
regressors are allowed to vary over time.
simple_differencing : boolean
Whether or not to use partially conditional maximum likelihood
estimation.
enforce_stationarity : boolean
Whether or not to transform the AR parameters
to enforce stationarity in the autoregressive
component of the model.
enforce_invertibility : boolean
Whether or not to transform the MA parameters
to enforce invertibility in the moving average
component of the model.
hamilton_representation : boolean
Whether or not to use the Hamilton representation of an ARMA process.
trend : str{'n','c','t','ct'} or iterable
Parameter controlling the deterministic
trend polynomial :math:`A(t)`. See the class
parameter documentation for more information.
polynomial_ar : array
Array containing autoregressive lag polynomial
coefficients, ordered from lowest degree to highest.
Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
polynomial_ma : array
Array containing moving average lag polynomial
coefficients, ordered from lowest degree to highest.
Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
polynomial_seasonal_ar : array
Array containing seasonal moving average lag
polynomial coefficients, ordered from lowest degree
to highest. Initialized with ones, unless a
coefficient is constrained to be zero (in which
case it is zero).
polynomial_seasonal_ma : array
Array containing seasonal moving average lag
polynomial coefficients, ordered from lowest degree
to highest. Initialized with ones, unless a
coefficient is constrained to be zero (in which
case it is zero).
polynomial_trend : array
Array containing trend polynomial coefficients,
ordered from lowest degree to highest. Initialized
with ones, unless a coefficient is constrained to be
zero (in which case it is zero).
k_ar : int
Highest autoregressive order in the model, zero-indexed.
k_ar_params : int
Number of autoregressive parameters to be estimated.
k_diff : int
Order of intergration.
k_ma : int
Highest moving average order in the model, zero-indexed.
k_ma_params : int
Number of moving average parameters to be estimated.
k_seasons : int
Number of periods in a season.
k_seasonal_ar : int
Highest seasonal autoregressive order in the model, zero-indexed.
k_seasonal_ar_params : int
Number of seasonal autoregressive parameters to be estimated.
k_seasonal_diff : int
Order of seasonal intergration.
k_seasonal_ma : int
Highest seasonal moving average order in the model, zero-indexed.
k_seasonal_ma_params : int
Number of seasonal moving average parameters to be estimated.
k_trend : int
Order of the trend polynomial plus one (i.e. the constant polynomial
would have `k_trend=1`).
k_exog : int
Number of exogenous regressors.
Notes
-----
The SARIMA model is specified :math:`(p, d, q) \times (P, D, Q)_s`.
.. math::
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D y_t = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
In terms of a univariate structural model, this can be represented as
.. math::
y_t & = u_t + \eta_t \\
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t & = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
where :math:`\eta_t` is only applicable in the case of measurement error
(although it is also used in the case of a pure regression model, i.e. if
p=q=0).
In terms of this model, regression with SARIMA errors can be represented
easily as
.. math::
y_t & = \beta_t x_t + u_t \\
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t & = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
this model is the one used when exogenous regressors are provided.
Note that the reduced form lag polynomials will be written as:
.. math::
\Phi (L) \equiv \phi_p (L) \tilde \phi_P (L^s) \\
\Theta (L) \equiv \theta_q (L) \tilde \theta_Q (L^s)
If `mle_regression` is True, regression coefficients are treated as
additional parameters to be estimated via maximum likelihood. Otherwise
they are included as part of the state with a diffuse initialization.
In this case, however, with approximate diffuse initialization, results
can be sensitive to the initial variance.
This class allows two different underlying representations of ARMA models
as state space models: that of Hamilton and that of Harvey. Both are
equivalent in the sense that they are analytical representations of the
ARMA model, but the state vectors of each have different meanings. For
this reason, maximum likelihood does not result in identical parameter
estimates and even the same set of parameters will result in different
loglikelihoods.
The Harvey representation is convenient because it allows integrating
differencing into the state vector to allow using all observations for
estimation.
In this implementation of differenced models, the Hamilton representation
is not able to accomodate differencing in the state vector, so
`simple_differencing` (which performs differencing prior to estimation so
that the first d + sD observations are lost) must be used.
Many other packages use the Hamilton representation, so that tests against
Stata and R require using it along with simple differencing (as Stata
does).
Detailed information about state space models can be found in [1]_. Some
specific references are:
- Chapter 3.4 describes ARMA and ARIMA models in state space form (using
the Harvey representation), and gives references for basic seasonal
models and models with a multiplicative form (for example the airline
model). It also shows a state space model for a full ARIMA process (this
is what is done here if `simple_differencing=False`).
- Chapter 3.6 describes estimating regression effects via the Kalman filter
(this is performed if `mle_regression` is False), regression with
time-varying coefficients, and regression with ARMA errors (recall from
above that if regression effects are present, the model estimated by this
class is regression with SARIMA errors).
- Chapter 8.4 describes the application of an ARMA model to an example
dataset. A replication of this section is available in an example
IPython notebook in the documentation.
References
----------
.. [1] Durbin, James, and Siem Jan Koopman. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
def __init__(self, endog, exog=None, order=(1, 0, 0),
seasonal_order=(0, 0, 0, 0), trend=None,
measurement_error=False, time_varying_regression=False,
mle_regression=True, simple_differencing=False,
enforce_stationarity=True, enforce_invertibility=True,
hamilton_representation=False, **kwargs):
# Model parameters
self.k_seasons = seasonal_order[3]
self.measurement_error = measurement_error
self.time_varying_regression = time_varying_regression
self.mle_regression = mle_regression
self.simple_differencing = simple_differencing
self.enforce_stationarity = enforce_stationarity
self.enforce_invertibility = enforce_invertibility
self.hamilton_representation = hamilton_representation
# Save given orders
self.order = order
self.seasonal_order = seasonal_order
# Enforce non-MLE coefficients if time varying coefficients is
# specified
if self.time_varying_regression and self.mle_regression:
raise ValueError('Models with time-varying regression coefficients'
' must integrate the coefficients as part of the'
' state vector, so that `mle_regression` must'
' be set to False.')
# Lag polynomials
# Assume that they are given from lowest degree to highest, that all
# degrees except for the constant are included, and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(order[0], int):
self.polynomial_ar = np.r_[1., np.ones(order[0])]
else:
self.polynomial_ar = np.r_[1., order[0]]
if isinstance(order[2], int):
self.polynomial_ma = np.r_[1., np.ones(order[2])]
else:
self.polynomial_ma = np.r_[1., order[2]]
# Assume that they are given from lowest degree to highest, that the
# degrees correspond to (1*s, 2*s, ..., P*s), and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(seasonal_order[0], int):
self.polynomial_seasonal_ar = np.r_[
1., # constant
([0] * (self.k_seasons - 1) + [1]) * seasonal_order[0]
]
else:
self.polynomial_seasonal_ar = np.r_[
1., [0] * self.k_seasons * len(seasonal_order[0])
]
for i in range(len(seasonal_order[0])):
self.polynomial_seasonal_ar[(i + 1) * self.k_seasons] = (
seasonal_order[0][i]
)
if isinstance(seasonal_order[2], int):
self.polynomial_seasonal_ma = np.r_[
1., # constant
([0] * (self.k_seasons - 1) + [1]) * seasonal_order[2]
]
else:
self.polynomial_seasonal_ma = np.r_[
1., [0] * self.k_seasons * len(seasonal_order[2])
]
for i in range(len(seasonal_order[2])):
self.polynomial_seasonal_ma[(i + 1) * self.k_seasons] = (
seasonal_order[2][i]
)
# Deterministic trend polynomial
self.trend = trend
if trend is None or trend == 'n':
self.polynomial_trend = np.ones((0))
elif trend == 'c':
self.polynomial_trend = np.r_[1]
elif trend == 't':
self.polynomial_trend = np.r_[0, 1]
elif trend == 'ct':
self.polynomial_trend = np.r_[1, 1]
else:
self.polynomial_trend = (np.array(trend) > 0).astype(int)
# Model orders
# Note: k_ar, k_ma, k_seasonal_ar, k_seasonal_ma do not include the
# constant term, so they may be zero.
# Note: for a typical ARMA(p,q) model, p = k_ar_params = k_ar - 1 and
# q = k_ma_params = k_ma - 1, although this may not be true for models
# with arbitrary log polynomials.
self.k_ar = int(self.polynomial_ar.shape[0] - 1)
self.k_ar_params = int(np.sum(self.polynomial_ar) - 1)
self.k_diff = int(order[1])
self.k_ma = int(self.polynomial_ma.shape[0] - 1)
self.k_ma_params = int(np.sum(self.polynomial_ma) - 1)
self.k_seasonal_ar = int(self.polynomial_seasonal_ar.shape[0] - 1)
self.k_seasonal_ar_params = (
int(np.sum(self.polynomial_seasonal_ar) - 1)
)
self.k_seasonal_diff = int(seasonal_order[1])
self.k_seasonal_ma = int(self.polynomial_seasonal_ma.shape[0] - 1)
self.k_seasonal_ma_params = (
int(np.sum(self.polynomial_seasonal_ma) - 1)
)
# Make internal copies of the differencing orders because if we use
# simple differencing, then we will need to internally use zeros after
# the simple differencing has been performed
self._k_diff = self.k_diff
self._k_seasonal_diff = self.k_seasonal_diff
# We can only use the Hamilton representation if differencing is not
# performed as a part of the state space
if (self.hamilton_representation and not (self.simple_differencing or
self._k_diff == self._k_seasonal_diff == 0)):
raise ValueError('The Hamilton representation is only available'
' for models in which there is no differencing'
' integrated into the state vector. Set'
' `simple_differencing` to True or set'
' `hamilton_representation` to False')
# Note: k_trend is not the degree of the trend polynomial, because e.g.
# k_trend = 1 corresponds to the degree zero polynomial (with only a
# constant term).
self.k_trend = int(np.sum(self.polynomial_trend))
# Model order
# (this is used internally in a number of locations)
self._k_order = max(self.k_ar + self.k_seasonal_ar,
self.k_ma + self.k_seasonal_ma + 1)
if self._k_order == 1 and self.k_ar + self.k_seasonal_ar == 0:
self._k_order = 0
# Exogenous data
self.k_exog = 0
if exog is not None:
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
self.k_exog = exog.shape[1]
# Redefine mle_regression to be true only if it was previously set to
# true and there are exogenous regressors
self.mle_regression = (
self.mle_regression and exog is not None and self.k_exog > 0
)
# State regression is regression with coefficients estiamted within
# the state vector
self.state_regression = (
not self.mle_regression and exog is not None and self.k_exog > 0
)
# If all we have is a regression (so k_ar = k_ma = 0), then put the
# error term as measurement error
if self.state_regression and self._k_order == 0:
self.measurement_error = True
# Number of states
k_states = self._k_order
if not self.simple_differencing:
k_states += self.k_seasons * self._k_seasonal_diff + self._k_diff
if self.state_regression:
k_states += self.k_exog
# Number of diffuse states
k_diffuse_states = k_states
if self.enforce_stationarity:
k_diffuse_states -= self._k_order
# Number of positive definite elements of the state covariance matrix
k_posdef = int(self._k_order > 0)
# Only have an error component to the states if k_posdef > 0
self.state_error = k_posdef > 0
if self.state_regression and self.time_varying_regression:
k_posdef += self.k_exog
# Diffuse initialization can be more sensistive to the variance value
# in the case of state regression, so set a higher than usual default
# variance
if self.state_regression:
kwargs.setdefault('initial_variance', 1e10)
# Number of parameters
self.k_params = (
self.k_ar_params + self.k_ma_params +
self.k_seasonal_ar_params + self.k_seasonal_ar_params +
self.k_trend +
self.measurement_error + 1
)
if self.mle_regression:
self.k_params += self.k_exog
# We need to have an array or pandas at this point
self.orig_endog = endog
self.orig_exog = exog
if not _is_using_pandas(endog, None):
endog = np.asanyarray(endog)
# Update the differencing dimensions if simple differencing is applied
self.orig_k_diff = self._k_diff
self.orig_k_seasonal_diff = self._k_seasonal_diff
if (self.simple_differencing and
(self._k_diff > 0 or self._k_seasonal_diff > 0)):
self._k_diff = 0
self._k_seasonal_diff = 0
# Internally used in several locations
self._k_states_diff = (
self._k_diff + self.k_seasons * self._k_seasonal_diff
)
# Set some model variables now so they will be available for the
# initialize() method, below
self.nobs = len(endog)
self.k_states = k_states
self.k_posdef = k_posdef
# By default, do not calculate likelihood while it is controlled by
# diffuse initial conditions.
kwargs.setdefault('loglikelihood_burn', k_diffuse_states)
# Initialize the statespace
super(SARIMAX, self).__init__(
endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs
)
# Handle kwargs specified initialization
if self.ssm.initialization is not None:
self._manual_initialization = True
# Initialize the fixed components of the statespace model
self.ssm.design = self.initial_design
self.ssm.state_intercept = self.initial_state_intercept
self.ssm.transition = self.initial_transition
self.ssm.selection = self.initial_selection
# If we are estimating a simple ARMA model, then we can use a faster
# initialization method (unless initialization was already specified).
if k_diffuse_states == 0 and not self._manual_initialization:
self.initialize_stationary()
# update _init_keys attached by super
self._init_keys += ['order', 'seasonal_order', 'trend',
'measurement_error', 'time_varying_regression',
'mle_regression', 'simple_differencing',
'enforce_stationarity', 'enforce_invertibility',
'hamilton_representation'] + list(kwargs.keys())
# TODO: I think the kwargs or not attached, need to recover from ???
def _get_init_kwds(self):
kwds = super(SARIMAX, self)._get_init_kwds()
for key, value in kwds.items():
if value is None and hasattr(self.ssm, key):
kwds[key] = getattr(self.ssm, key)
return kwds
def prepare_data(self):
endog, exog = super(SARIMAX, self).prepare_data()
# Perform simple differencing if requested
if (self.simple_differencing and
(self.orig_k_diff > 0 or self.orig_k_seasonal_diff > 0)):
# Perform simple differencing
endog = diff(endog.copy(), self.orig_k_diff,
self.orig_k_seasonal_diff, self.k_seasons)
if exog is not None:
exog = diff(exog.copy(), self.orig_k_diff,
self.orig_k_seasonal_diff, self.k_seasons)
# Reset the nobs
self.nobs = endog.shape[0]
# Cache the arrays for calculating the intercept from the trend
# components
time_trend = np.arange(1, self.nobs + 1)
self._trend_data = np.zeros((self.nobs, self.k_trend))
i = 0
for k in self.polynomial_trend.nonzero()[0]:
if k == 0:
self._trend_data[:, i] = np.ones(self.nobs,)
else:
self._trend_data[:, i] = time_trend**k
i += 1
return endog, exog
def initialize(self):
"""
Initialize the SARIMAX model.
Notes
-----
These initialization steps must occur following the parent class
__init__ function calls.
"""
super(SARIMAX, self).initialize()
# Internal flag for whether the default mixed approximate diffuse /
# stationary initialization has been overridden with a user-supplied
# initialization
self._manual_initialization = False
# Cache the indexes of included polynomial orders (for update below)
# (but we do not want the index of the constant term, so exclude the
# first index)
self._polynomial_ar_idx = np.nonzero(self.polynomial_ar)[0][1:]
self._polynomial_ma_idx = np.nonzero(self.polynomial_ma)[0][1:]
self._polynomial_seasonal_ar_idx = np.nonzero(
self.polynomial_seasonal_ar
)[0][1:]
self._polynomial_seasonal_ma_idx = np.nonzero(
self.polynomial_seasonal_ma
)[0][1:]
# Save the indices corresponding to the reduced form lag polynomial
# parameters in the transition and selection matrices so that they
# don't have to be recalculated for each update()
start_row = self._k_states_diff
end_row = start_row + self.k_ar + self.k_seasonal_ar
col = self._k_states_diff
if not self.hamilton_representation:
self.transition_ar_params_idx = (
np.s_['transition', start_row:end_row, col]
)
else:
self.transition_ar_params_idx = (
np.s_['transition', col, start_row:end_row]
)
start_row += 1
end_row = start_row + self.k_ma + self.k_seasonal_ma
col = 0
if not self.hamilton_representation:
self.selection_ma_params_idx = (
np.s_['selection', start_row:end_row, col]
)
else:
self.design_ma_params_idx = (
np.s_['design', col, start_row:end_row]
)
# Cache indices for exog variances in the state covariance matrix
if self.state_regression and self.time_varying_regression:
idx = np.diag_indices(self.k_posdef)
self._exog_variance_idx = ('state_cov', idx[0][-self.k_exog:],
idx[1][-self.k_exog:])
def initialize_known(self, initial_state, initial_state_cov):
self._manual_initialization = True
self.ssm.initialize_known(initial_state, initial_state_cov)
initialize_known.__doc__ = KalmanFilter.initialize_known.__doc__
def initialize_approximate_diffuse(self, variance=None):
self._manual_initialization = True
self.ssm.initialize_approximate_diffuse(variance)
initialize_approximate_diffuse.__doc__ = (
KalmanFilter.initialize_approximate_diffuse.__doc__
)
def initialize_stationary(self):
self._manual_initialization = True
self.ssm.initialize_stationary()
initialize_stationary.__doc__ = (
KalmanFilter.initialize_stationary.__doc__
)
def initialize_state(self, variance=None):
"""
Initialize state and state covariance arrays in preparation for the
Kalman filter.
Parameters
----------
variance : float, optional
The variance for approximating diffuse initial conditions. Default
can be found in the Representation class documentation.
Notes
-----
Initializes the ARMA component of the state space to the typical
stationary values and the other components as approximate diffuse.
Can be overridden be calling one of the other initialization methods
before fitting the model.
"""
# Check if a manual initialization has already been specified
if self._manual_initialization:
return
# If we're not enforcing stationarity, then we can't initialize a
# stationary component
if not self.enforce_stationarity:
self.initialize_approximate_diffuse(variance)
return
# Otherwise, create the initial state and state covariance matrix
# as from a combination of diffuse and stationary components
# Create initialized non-stationary components
if variance is None:
variance = self.ssm.initial_variance
dtype = self.ssm.transition.dtype
initial_state = np.zeros(self.k_states, dtype=dtype)
initial_state_cov = np.eye(self.k_states, dtype=dtype) * variance
# Get the offsets (from the bottom or bottom right of the vector /
# matrix) for the stationary component.
if self.state_regression:
start = -(self.k_exog + self._k_order)
end = -self.k_exog if self.k_exog > 0 else None
else:
start = -self._k_order
end = None
# Add in the initialized stationary components
if self._k_order > 0:
selection_stationary = self.ssm.selection[start:end, :, 0]
selected_state_cov_stationary = np.dot(
np.dot(selection_stationary, self.ssm.state_cov[:, :, 0]),
selection_stationary.T
)
initial_state_cov_stationary = solve_discrete_lyapunov(
self.ssm.transition[start:end, start:end, 0],
selected_state_cov_stationary
)
initial_state_cov[start:end, start:end] = (
initial_state_cov_stationary
)
self.ssm.initialize_known(initial_state, initial_state_cov)
@property
def initial_design(self):
"""Initial design matrix"""
# Basic design matrix
design = np.r_[
[1] * self._k_diff,
([0] * (self.k_seasons - 1) + [1]) * self._k_seasonal_diff,
[1] * self.state_error, [0] * (self._k_order - 1)
]
# If we have exogenous regressors included as part of the state vector
# then the exogenous data is incorporated as a time-varying component
# of the design matrix
if self.state_regression:
if self._k_order > 0:
design = np.c_[
np.reshape(
np.repeat(design, self.nobs),
(design.shape[0], self.nobs)
).T,
self.exog
].T[None, :, :]
else:
design = self.exog.T[None, :, :]
return design
@property
def initial_state_intercept(self):
"""Initial state intercept vector"""
# TODO make this self.k_trend > 1 and adjust the update to take
# into account that if the trend is a constant, it is not time-varying
if self.k_trend > 0:
state_intercept = np.zeros((self.k_states, self.nobs))
else:
state_intercept = np.zeros((self.k_states,))
return state_intercept
@property
def initial_transition(self):
"""Initial transition matrix"""
transition = np.zeros((self.k_states, self.k_states))
# Exogenous regressors component
if self.state_regression:
start = -self.k_exog
# T_\beta
transition[start:, start:] = np.eye(self.k_exog)
# Autoregressive component
start = -(self.k_exog + self._k_order)
end = -self.k_exog if self.k_exog > 0 else None
else:
# Autoregressive component
start = -self._k_order
end = None
# T_c
transition[start:end, start:end] = companion_matrix(self._k_order)
if self.hamilton_representation:
transition[start:end, start:end] = np.transpose(
companion_matrix(self._k_order)
)
# Seasonal differencing component
# T^*
if self._k_seasonal_diff > 0:
seasonal_companion = companion_matrix(self.k_seasons).T
seasonal_companion[0, -1] = 1
for d in range(self._k_seasonal_diff):
start = self._k_diff + d * self.k_seasons
end = self._k_diff + (d + 1) * self.k_seasons
# T_c^*
transition[start:end, start:end] = seasonal_companion
# i
for i in range(d + 1, self._k_seasonal_diff):
transition[start, end + self.k_seasons - 1] = 1
# \iota
transition[start, self._k_states_diff] = 1
# Differencing component
if self._k_diff > 0:
idx = np.triu_indices(self._k_diff)
# T^**
transition[idx] = 1
# [0 1]
if self.k_seasons > 0:
start = self._k_diff
end = self._k_states_diff
transition[:self._k_diff, start:end] = (
([0] * (self.k_seasons - 1) + [1]) * self._k_seasonal_diff
)
# [1 0]
column = self._k_states_diff
transition[:self._k_diff, column] = 1
return transition
@property
def initial_selection(self):
"""Initial selection matrix"""
if not (self.state_regression and self.time_varying_regression):
if self.k_posdef > 0:
selection = np.r_[
[0] * (self._k_states_diff),
[1] * (self._k_order > 0), [0] * (self._k_order - 1),
[0] * ((1 - self.mle_regression) * self.k_exog)
][:, None]
else:
selection = np.zeros((self.k_states, 0))
else:
selection = np.zeros((self.k_states, self.k_posdef))
# Typical state variance
if self._k_order > 0:
selection[0, 0] = 1
# Time-varying regression coefficient variances
for i in range(self.k_exog, 0, -1):
selection[-i, -i] = 1
return selection
def filter(self, params, transformed=True, cov_type=None, return_ssm=False,
**kwargs):
params = np.array(params)
# Transform parameters if necessary
if not transformed:
params = self.transform_params(params)
transformed = True
# Get the state space output
result = super(SARIMAX, self).filter(params, transformed, cov_type,
return_ssm=True, **kwargs)
# Wrap in a results object
if not return_ssm:
result_kwargs = {}
if cov_type is not None:
result_kwargs['cov_type'] = cov_type
result = SARIMAXResultsWrapper(
SARIMAXResults(self, params, result, **result_kwargs)
)
return result
@staticmethod
def _conditional_sum_squares(endog, k_ar, polynomial_ar, k_ma,
polynomial_ma, k_trend=0, trend_data=None):
k = 2 * k_ma
r = max(k + k_ma, k_ar)
k_params_ar = 0 if k_ar == 0 else len(polynomial_ar.nonzero()[0]) - 1
k_params_ma = 0 if k_ma == 0 else len(polynomial_ma.nonzero()[0]) - 1
residuals = None
if k_ar + k_ma + k_trend > 0:
# If we have MA terms, get residuals from an AR(k) model to use
# as data for conditional sum of squares estimates of the MA
# parameters
if k_ma > 0:
Y = endog[k:]
X = lagmat(endog, k, trim='both')
params_ar = np.linalg.pinv(X).dot(Y)
residuals = Y - np.dot(X, params_ar)
# Run an ARMA(p,q) model using the just computed residuals as data
Y = endog[r:]
X = np.empty((Y.shape[0], 0))
if k_trend > 0:
if trend_data is None:
raise ValueError('Trend data must be provided if'
' `k_trend` > 0.')
X = np.c_[X, trend_data[:(-r if r > 0 else None), :]]
if k_ar > 0:
cols = polynomial_ar.nonzero()[0][1:] - 1
X = np.c_[X, lagmat(endog, k_ar)[r:, cols]]
if k_ma > 0:
cols = polynomial_ma.nonzero()[0][1:] - 1
X = np.c_[X, lagmat(residuals, k_ma)[r-k:, cols]]
# Get the array of [ar_params, ma_params]
params = np.linalg.pinv(X).dot(Y)
residuals = Y - np.dot(X, params)
# Default output
params_trend = []
params_ar = []
params_ma = []
params_variance = []
# Get the params
offset = 0
if k_trend > 0:
params_trend = params[offset:k_trend + offset]
offset += k_trend
if k_ar > 0:
params_ar = params[offset:k_params_ar + offset]
offset += k_params_ar
if k_ma > 0:
params_ma = params[offset:k_params_ma + offset]
offset += k_params_ma
if residuals is not None:
params_variance = (residuals[k_params_ma:]**2).mean()
return (params_trend, params_ar, params_ma,
params_variance)
@property
def start_params(self):
"""
Starting parameters for maximum likelihood estimation
"""
# Perform differencing if necessary (i.e. if simple differencing is
# false so that the state-space model will use the entire dataset)
trend_data = self._trend_data
if not self.simple_differencing and (
self._k_diff > 0 or self._k_seasonal_diff > 0):
endog = diff(self.endog, self._k_diff,
self._k_seasonal_diff, self.k_seasons)
if self.exog is not None:
exog = diff(self.exog, self._k_diff,
self._k_seasonal_diff, self.k_seasons)
else:
exog = None
trend_data = trend_data[:endog.shape[0], :]
else:
endog = self.endog.copy()
exog = self.exog.copy() if self.exog is not None else None
endog = endog.squeeze()
# Although the Kalman filter can deal with missing values in endog,
# conditional sum of squares cannot
if np.any(np.isnan(endog)):
endog = endog[~np.isnan(endog)]
if exog is not None:
exog = exog[~np.isnan(endog)]
if trend_data is not None:
trend_data = trend_data[~np.isnan(endog)]
# Regression effects via OLS
params_exog = []
if self.k_exog > 0:
params_exog = np.linalg.pinv(exog).dot(endog)
endog -= np.dot(exog, params_exog)
if self.state_regression:
params_exog = []
# Non-seasonal ARMA component and trend
(params_trend, params_ar, params_ma,
params_variance) = self._conditional_sum_squares(
endog, self.k_ar, self.polynomial_ar, self.k_ma,
self.polynomial_ma, self.k_trend, trend_data
)
# If we have estimated non-stationary start parameters but enforce
# stationarity is on, raise an error
invalid_ar = (
self.k_ar > 0 and
self.enforce_stationarity and
not is_invertible(np.r_[1, -params_ar])
)
if invalid_ar:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# If we have estimated non-invertible start parameters but enforce
# invertibility is on, raise an error
invalid_ma = (
self.k_ma > 0 and
self.enforce_invertibility and
not is_invertible(np.r_[1, params_ma])
)
if invalid_ma:
raise ValueError('non-invertible starting MA parameters found'
' with `enforce_invertibility` set to True.')
# Seasonal Parameters
_, params_seasonal_ar, params_seasonal_ma, params_seasonal_variance = (
self._conditional_sum_squares(
endog, self.k_seasonal_ar, self.polynomial_seasonal_ar,
self.k_seasonal_ma, self.polynomial_seasonal_ma
)
)
# If we have estimated non-stationary start parameters but enforce
# stationarity is on, raise an error
invalid_seasonal_ar = (
self.k_seasonal_ar > 0 and
self.enforce_stationarity and
not is_invertible(np.r_[1, -params_seasonal_ar])
)
if invalid_seasonal_ar:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# If we have estimated non-invertible start parameters but enforce
# invertibility is on, raise an error
invalid_seasonal_ma = (
self.k_seasonal_ma > 0 and
self.enforce_invertibility and
not is_invertible(np.r_[1, params_seasonal_ma])
)
if invalid_seasonal_ma:
raise ValueError('non-invertible starting seasonal moving average'
' parameters found with `enforce_invertibility`'
' set to True.')
# Variances
params_exog_variance = []
if self.state_regression and self.time_varying_regression:
# TODO how to set the initial variance parameters?
params_exog_variance = [1] * self.k_exog
if self.state_error and params_variance == []:
if not params_seasonal_variance == []:
params_variance = params_seasonal_variance
elif self.k_exog > 0:
params_variance = np.dot(endog, endog)
else:
params_variance = 1
params_measurement_variance = 1 if self.measurement_error else []
# Combine all parameters
return np.r_[
params_trend,
params_exog,
params_ar,
params_ma,
params_seasonal_ar,
params_seasonal_ma,
params_exog_variance,
params_measurement_variance,
params_variance
]
@property
def endog_names(self, latex=False):
"""Names of endogenous variables"""
diff = ''
if self.k_diff > 0:
if self.k_diff == 1:
diff = '\Delta' if latex else 'D'
else:
diff = ('\Delta^%d' if latex else 'D%d') % self.k_diff
seasonal_diff = ''
if self.k_seasonal_diff > 0:
if self.k_seasonal_diff == 1:
seasonal_diff = (('\Delta_%d' if latex else 'DS%d') %
(self.k_seasons))
else:
seasonal_diff = (('\Delta_%d^%d' if latex else 'D%dS%d') %
(self.k_seasonal_diff, self.k_seasons))
endog_diff = self.simple_differencing
if endog_diff and self.k_diff > 0 and self.k_seasonal_diff > 0:
return (('%s%s %s' if latex else '%s.%s.%s') %
(diff, seasonal_diff, self.data.ynames))
elif endog_diff and self.k_diff > 0:
return (('%s %s' if latex else '%s.%s') %
(diff, self.data.ynames))
elif endog_diff and self.k_seasonal_diff > 0:
return (('%s %s' if latex else '%s.%s') %
(seasonal_diff, self.data.ynames))
else:
return self.data.ynames
params_complete = [
'trend', 'exog', 'ar', 'ma', 'seasonal_ar', 'seasonal_ma',
'exog_variance', 'measurement_variance', 'variance'
]
@property
def param_terms(self):
"""
List of parameters actually included in the model, in sorted order.
TODO Make this an OrderedDict with slice or indices as the values.
"""
model_orders = self.model_orders
# Get basic list from model orders
params = [
order for order in self.params_complete
if model_orders[order] > 0
]
# k_exog may be positive without associated parameters if it is in the
# state vector
if 'exog' in params and not self.mle_regression:
params.remove('exog')
return params
@property
def param_names(self):
"""
List of human readable parameter names (for parameters actually
included in the model).
"""
params_sort_order = self.param_terms
model_names = self.model_names
return [
name for param in params_sort_order for name in model_names[param]
]
@property
def model_orders(self):
"""
The orders of each of the polynomials in the model.
"""
return {
'trend': self.k_trend,
'exog': self.k_exog,
'ar': self.k_ar,
'ma': self.k_ma,
'seasonal_ar': self.k_seasonal_ar,
'seasonal_ma': self.k_seasonal_ma,
'reduced_ar': self.k_ar + self.k_seasonal_ar,
'reduced_ma': self.k_ma + self.k_seasonal_ma,
'exog_variance': self.k_exog if (
self.state_regression and self.time_varying_regression) else 0,
'measurement_variance': int(self.measurement_error),
'variance': int(self.state_error),
}
@property
def model_names(self):
"""
The plain text names of all possible model parameters.
"""
return self._get_model_names(latex=False)
@property
def model_latex_names(self):
"""
The latex names of all possible model parameters.
"""
return self._get_model_names(latex=True)
def _get_model_names(self, latex=False):
names = {
'trend': None,
'exog': None,
'ar': None,
'ma': None,
'seasonal_ar': None,
'seasonal_ma': None,
'reduced_ar': None,
'reduced_ma': None,
'exog_variance': None,
'measurement_variance': None,
'variance': None,
}
# Trend
if self.k_trend > 0:
trend_template = 't_%d' if latex else 'trend.%d'
names['trend'] = []
for i in self.polynomial_trend.nonzero()[0]:
if i == 0:
names['trend'].append('intercept')
elif i == 1:
names['trend'].append('drift')
else:
names['trend'].append(trend_template % i)
# Exogenous coefficients
if self.k_exog > 0:
names['exog'] = self.exog_names
# Autoregressive
if self.k_ar > 0:
ar_template = '$\\phi_%d$' if latex else 'ar.L%d'
names['ar'] = []
for i in self.polynomial_ar.nonzero()[0][1:]:
names['ar'].append(ar_template % i)
# Moving Average
if self.k_ma > 0:
ma_template = '$\\theta_%d$' if latex else 'ma.L%d'
names['ma'] = []
for i in self.polynomial_ma.nonzero()[0][1:]:
names['ma'].append(ma_template % i)
# Seasonal Autoregressive
if self.k_seasonal_ar > 0:
seasonal_ar_template = (
'$\\tilde \\phi_%d$' if latex else 'ar.S.L%d'
)
names['seasonal_ar'] = []
for i in self.polynomial_seasonal_ar.nonzero()[0][1:]:
names['seasonal_ar'].append(seasonal_ar_template % i)
# Seasonal Moving Average
if self.k_seasonal_ma > 0:
seasonal_ma_template = (
'$\\tilde \\theta_%d$' if latex else 'ma.S.L%d'
)
names['seasonal_ma'] = []
for i in self.polynomial_seasonal_ma.nonzero()[0][1:]:
names['seasonal_ma'].append(seasonal_ma_template % i)
# Reduced Form Autoregressive
if self.k_ar > 0 or self.k_seasonal_ar > 0:
reduced_polynomial_ar = reduced_polynomial_ar = -np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
ar_template = '$\\Phi_%d$' if latex else 'ar.R.L%d'
names['reduced_ar'] = []
for i in reduced_polynomial_ar.nonzero()[0][1:]:
names['reduced_ar'].append(ar_template % i)
# Reduced Form Moving Average
if self.k_ma > 0 or self.k_seasonal_ma > 0:
reduced_polynomial_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
ma_template = '$\\Theta_%d$' if latex else 'ma.R.L%d'
names['reduced_ma'] = []
for i in reduced_polynomial_ma.nonzero()[0][1:]:
names['reduced_ma'].append(ma_template % i)
# Exogenous variances
if self.state_regression and self.time_varying_regression:
exog_var_template = '$\\sigma_\\text{%s}^2$' if latex else 'var.%s'
names['exog_variance'] = [
exog_var_template % exog_name for exog_name in self.exog_names
]
# Measurement error variance
if self.measurement_error:
meas_var_tpl = (
'$\\sigma_\\eta^2$' if latex else 'var.measurement_error'
)
names['measurement_variance'] = [meas_var_tpl]
# State variance
if self.state_error:
var_tpl = '$\\sigma_\\zeta^2$' if latex else 'sigma2'
names['variance'] = [var_tpl]
return names
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation.
Used primarily to enforce stationarity of the autoregressive lag
polynomial, invertibility of the moving average lag polynomial, and
positive variance parameters.
Parameters
----------
unconstrained : array_like
Unconstrained parameters used by the optimizer.
Returns
-------
constrained : array_like
Constrained parameters used in likelihood evaluation.
Notes
-----
If the lag polynomial has non-consecutive powers (so that the
coefficient is zero on some element of the polynomial), then the
constraint function is not onto the entire space of invertible
polynomials, although it only excludes a very small portion very close
to the invertibility boundary.
"""
unconstrained = np.array(unconstrained)
constrained = np.zeros(unconstrained.shape, unconstrained.dtype)
start = end = 0
# Retain the trend parameters
if self.k_trend > 0:
end += self.k_trend
constrained[start:end] = unconstrained[start:end]
start += self.k_trend
# Retain any MLE regression coefficients
if self.mle_regression:
end += self.k_exog
constrained[start:end] = unconstrained[start:end]
start += self.k_exog
# Transform the AR parameters (phi) to be stationary
if self.k_ar_params > 0:
end += self.k_ar_params
if self.enforce_stationarity:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_ar_params
# Transform the MA parameters (theta) to be invertible
if self.k_ma_params > 0:
end += self.k_ma_params
if self.enforce_invertibility:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_ma_params
# Transform the seasonal AR parameters (\tilde phi) to be stationary
if self.k_seasonal_ar > 0:
end += self.k_seasonal_ar_params
if self.enforce_stationarity:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_seasonal_ar_params
# Transform the seasonal MA parameters (\tilde theta) to be invertible
if self.k_seasonal_ma_params > 0:
end += self.k_seasonal_ma_params
if self.enforce_invertibility:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_seasonal_ma_params
# Transform the standard deviation parameters to be positive
if self.state_regression and self.time_varying_regression:
end += self.k_exog
constrained[start:end] = unconstrained[start:end]**2
start += self.k_exog
if self.measurement_error:
constrained[start] = unconstrained[start]**2
start += 1
end += 1
if self.state_error:
constrained[start] = unconstrained[start]**2
# start += 1
# end += 1
return constrained
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Used primarily to reverse enforcement of stationarity of the
autoregressive lag polynomial and invertibility of the moving average
lag polynomial.
Parameters
----------
constrained : array_like
Constrained parameters used in likelihood evaluation.
Returns
-------
constrained : array_like
Unconstrained parameters used by the optimizer.
Notes
-----
If the lag polynomial has non-consecutive powers (so that the
coefficient is zero on some element of the polynomial), then the
constraint function is not onto the entire space of invertible
polynomials, although it only excludes a very small portion very close
to the invertibility boundary.
"""
constrained = np.array(constrained)
unconstrained = np.zeros(constrained.shape, constrained.dtype)
start = end = 0
# Retain the trend parameters
if self.k_trend > 0:
end += self.k_trend
unconstrained[start:end] = constrained[start:end]
start += self.k_trend
# Retain any MLE regression coefficients
if self.mle_regression:
end += self.k_exog
unconstrained[start:end] = constrained[start:end]
start += self.k_exog
# Transform the AR parameters (phi) to be stationary
if self.k_ar_params > 0:
end += self.k_ar_params
if self.enforce_stationarity:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_ar_params
# Transform the MA parameters (theta) to be invertible
if self.k_ma_params > 0:
end += self.k_ma_params
if self.enforce_invertibility:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_ma_params
# Transform the seasonal AR parameters (\tilde phi) to be stationary
if self.k_seasonal_ar > 0:
end += self.k_seasonal_ar_params
if self.enforce_stationarity:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_seasonal_ar_params
# Transform the seasonal MA parameters (\tilde theta) to be invertible
if self.k_seasonal_ma_params > 0:
end += self.k_seasonal_ma_params
if self.enforce_invertibility:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_seasonal_ma_params
# Untransform the standard deviation
if self.state_regression and self.time_varying_regression:
end += self.k_exog
unconstrained[start:end] = constrained[start:end]**0.5
start += self.k_exog
if self.measurement_error:
unconstrained[start] = constrained[start]**0.5
start += 1
end += 1
if self.state_error:
unconstrained[start] = constrained[start]**0.5
# start += 1
# end += 1
return unconstrained
def update(self, params, transformed=True):
"""
Update the parameters of the model
Updates the representation matrices to fill in the new parameter
values.
Parameters
----------
params : array_like
Array of new parameters.
transformed : boolean, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True..
Returns
-------
params : array_like
Array of parameters.
"""
params = super(SARIMAX, self).update(params, transformed)
params_trend = None
params_exog = None
params_ar = None
params_ma = None
params_seasonal_ar = None
params_seasonal_ma = None
params_exog_variance = None
params_measurement_variance = None
params_variance = None
# Extract the parameters
start = end = 0
end += self.k_trend
params_trend = params[start:end]
start += self.k_trend
if self.mle_regression:
end += self.k_exog
params_exog = params[start:end]
start += self.k_exog
end += self.k_ar_params
params_ar = params[start:end]
start += self.k_ar_params
end += self.k_ma_params
params_ma = params[start:end]
start += self.k_ma_params
end += self.k_seasonal_ar_params
params_seasonal_ar = params[start:end]
start += self.k_seasonal_ar_params
end += self.k_seasonal_ma_params
params_seasonal_ma = params[start:end]
start += self.k_seasonal_ma_params
if self.state_regression and self.time_varying_regression:
end += self.k_exog
params_exog_variance = params[start:end]
start += self.k_exog
if self.measurement_error:
params_measurement_variance = params[start]
start += 1
end += 1
if self.state_error:
params_variance = params[start]
# start += 1
# end += 1
# Update lag polynomials
if self.k_ar > 0:
if self.polynomial_ar.dtype == params.dtype:
self.polynomial_ar[self._polynomial_ar_idx] = -params_ar
else:
polynomial_ar = self.polynomial_ar.real.astype(params.dtype)
polynomial_ar[self._polynomial_ar_idx] = -params_ar
self.polynomial_ar = polynomial_ar
if self.k_ma > 0:
if self.polynomial_ma.dtype == params.dtype:
self.polynomial_ma[self._polynomial_ma_idx] = params_ma
else:
polynomial_ma = self.polynomial_ma.real.astype(params.dtype)
polynomial_ma[self._polynomial_ma_idx] = params_ma
self.polynomial_ma = polynomial_ma
if self.k_seasonal_ar > 0:
idx = self._polynomial_seasonal_ar_idx
if self.polynomial_seasonal_ar.dtype == params.dtype:
self.polynomial_seasonal_ar[idx] = -params_seasonal_ar
else:
polynomial_seasonal_ar = (
self.polynomial_seasonal_ar.real.astype(params.dtype)
)
polynomial_seasonal_ar[idx] = -params_seasonal_ar
self.polynomial_seasonal_ar = polynomial_seasonal_ar
if self.k_seasonal_ma > 0:
idx = self._polynomial_seasonal_ma_idx
if self.polynomial_seasonal_ma.dtype == params.dtype:
self.polynomial_seasonal_ma[idx] = params_seasonal_ma
else:
polynomial_seasonal_ma = (
self.polynomial_seasonal_ma.real.astype(params.dtype)
)
polynomial_seasonal_ma[idx] = params_seasonal_ma
self.polynomial_seasonal_ma = polynomial_seasonal_ma
# Get the reduced form lag polynomial terms by multiplying the regular
# and seasonal lag polynomials
# Note: that although the numpy np.polymul examples assume that they
# are ordered from highest degree to lowest, whereas our are from
# lowest to highest, it does not matter.
if self.k_seasonal_ar > 0:
reduced_polynomial_ar = -np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
else:
reduced_polynomial_ar = -self.polynomial_ar
if self.k_seasonal_ma > 0:
reduced_polynomial_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
else:
reduced_polynomial_ma = self.polynomial_ma
# Observation intercept
# Exogenous data with MLE estimation of parameters enters through a
# time-varying observation intercept (is equivalent to simply
# subtracting it out of the endogenous variable first)
if self.mle_regression:
self.ssm['obs_intercept'] = np.dot(self.exog, params_exog)[None, :]
# State intercept (Harvey) or additional observation intercept
# (Hamilton)
# SARIMA trend enters through the a time-varying state intercept,
# associated with the first row of the stationary component of the
# state vector (i.e. the first element of the state vector following
# any differencing elements)
if self.k_trend > 0:
data = np.dot(self._trend_data, params_trend).astype(params.dtype)
if not self.hamilton_representation:
self.ssm['state_intercept', self._k_states_diff, :] = data
else:
# The way the trend enters in the Hamilton representation means
# that the parameter is not an ``intercept'' but instead the
# mean of the process. The trend values in `data` are meant for
# an intercept, and so must be transformed to represent the
# mean instead
if self.hamilton_representation:
data /= np.sum(-reduced_polynomial_ar)
# If we already set the observation intercept for MLE
# regression, just add to it
if self.mle_regression:
self.ssm.obs_intercept += data[None, :]
# Otherwise set it directly
else:
self.ssm.obs_intercept = data[None, :]
# Observation covariance matrix
if self.measurement_error:
self.ssm['obs_cov', 0, 0] = params_measurement_variance
# Transition matrix
if self.k_ar > 0 or self.k_seasonal_ar > 0:
self.ssm[self.transition_ar_params_idx] = reduced_polynomial_ar[1:]
elif not self.ssm.transition.dtype == params.dtype:
# This is required if the transition matrix is not really in use
# (e.g. for an MA(q) process) so that it's dtype never changes as
# the parameters' dtype changes. This changes the dtype manually.
self.ssm.transition = self.ssm.transition.real.astype(params.dtype)
# Selection matrix (Harvey) or Design matrix (Hamilton)
if self.k_ma > 0 or self.k_seasonal_ma > 0:
if not self.hamilton_representation:
self.ssm[self.selection_ma_params_idx] = (
reduced_polynomial_ma[1:]
)
else:
self.ssm[self.design_ma_params_idx] = reduced_polynomial_ma[1:]
# State covariance matrix
if self.k_posdef > 0:
self.ssm['state_cov', 0, 0] = params_variance
if self.state_regression and self.time_varying_regression:
self.ssm[self._exog_variance_idx] = params_exog_variance
# Initialize
if not self._manual_initialization:
self.initialize_state()
return params
class SARIMAXResults(MLEResults):
"""
Class to hold results from fitting an SARIMAX model.
Parameters
----------
model : SARIMAX instance
The fitted model instance
Attributes
----------
specification : dictionary
Dictionary including all attributes from the SARIMAX model instance.
polynomial_ar : array
Array containing autoregressive lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_ma : array
Array containing moving average lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_seasonal_ar : array
Array containing seasonal autoregressive lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_seasonal_ma : array
Array containing seasonal moving average lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_trend : array
Array containing trend polynomial coefficients, ordered from lowest
degree to highest. Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
model_orders : list of int
The orders of each of the polynomials in the model.
param_terms : list of str
List of parameters actually included in the model, in sorted order.
See Also
--------
statsmodels.tsa.statespace.kalman_filter.FilterResults
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
def __init__(self, model, params, filter_results, cov_type='opg', **kwargs):
super(SARIMAXResults, self).__init__(model, params, filter_results,
cov_type, **kwargs)
self.df_resid = np.inf # attribute required for wald tests
self.specification = Bunch(**{
# Set additional model parameters
'k_seasons': self.model.k_seasons,
'measurement_error': self.model.measurement_error,
'time_varying_regression': self.model.time_varying_regression,
'mle_regression': self.model.mle_regression,
'simple_differencing': self.model.simple_differencing,
'enforce_stationarity': self.model.enforce_stationarity,
'enforce_invertibility': self.model.enforce_invertibility,
'hamilton_representation': self.model.hamilton_representation,
'order': self.model.order,
'seasonal_order': self.model.seasonal_order,
# Model order
'k_diff': self.model.k_diff,
'k_seasonal_diff': self.model.k_seasonal_diff,
'k_ar': self.model.k_ar,
'k_ma': self.model.k_ma,
'k_seasonal_ar': self.model.k_seasonal_ar,
'k_seasonal_ma': self.model.k_seasonal_ma,
# Param Numbers
'k_ar_params': self.model.k_ar_params,
'k_ma_params': self.model.k_ma_params,
# Trend / Regression
'trend': self.model.trend,
'k_trend': self.model.k_trend,
'k_exog': self.model.k_exog,
'mle_regression': self.model.mle_regression,
'state_regression': self.model.state_regression,
})
# Polynomials
self.polynomial_trend = self.model.polynomial_trend
self.polynomial_ar = self.model.polynomial_ar
self.polynomial_ma = self.model.polynomial_ma
self.polynomial_seasonal_ar = self.model.polynomial_seasonal_ar
self.polynomial_seasonal_ma = self.model.polynomial_seasonal_ma
self.polynomial_reduced_ar = np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
self.polynomial_reduced_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
# Distinguish parameters
self.model_orders = self.model.model_orders
self.param_terms = self.model.param_terms
start = end = 0
for name in self.param_terms:
end += self.model_orders[name]
setattr(self, '_params_%s' % name, self.params[start:end])
start += self.model_orders[name]
@cache_readonly
def arroots(self):
"""
(array) Roots of the reduced form autoregressive lag polynomial
"""
return np.roots(self.polynomial_reduced_ar)**-1
@cache_readonly
def maroots(self):
"""
(array) Roots of the reduced form moving average lag polynomial
"""
return np.roots(self.polynomial_reduced_ma)**-1
@cache_readonly
def arfreq(self):
"""
(array) Frequency of the roots of the reduced form autoregressive
lag polynomial
"""
z = self.arroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2 * np.pi)
@cache_readonly
def mafreq(self):
"""
(array) Frequency of the roots of the reduced form moving average
lag polynomial
"""
z = self.maroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2 * np.pi)
@cache_readonly
def arparams(self):
"""
(array) Autoregressive parameters actually estimated in the model.
Does not include parameters whose values are constrained to be zero.
"""
return self._params_ar
@cache_readonly
def maparams(self):
"""
(array) Moving average parameters actually estimated in the model.
Does not include parameters whose values are constrained to be zero.
"""
return self._params_ma
def predict(self, start=None, end=None, exog=None, dynamic=False,
**kwargs):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables if
end is beyond the last observation in the sample.
dynamic : boolean, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
full_results : boolean, optional
If True, returns a FilterResults instance; if False returns a
tuple with forecasts, the forecast errors, and the forecast error
covariance matrices. Default is False.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
if start is None:
start = 0
# Handle end (e.g. date)
_start = self.model._get_predict_start(start)
_end, _out_of_sample = self.model._get_predict_end(end)
# Handle exogenous parameters
if _out_of_sample and (self.model.k_exog + self.model.k_trend > 0):
# Create a new faux SARIMAX model for the extended dataset
nobs = self.model.orig_endog.shape[0] + _out_of_sample
endog = np.zeros((nobs, self.model.k_endog))
if self.model.k_exog > 0:
if exog is None:
raise ValueError('Out-of-sample forecasting in a model'
' with a regression component requires'
' additional exogenous values via the'
' `exog` argument.')
exog = np.array(exog)
required_exog_shape = (_out_of_sample, self.model.k_exog)
if not exog.shape == required_exog_shape:
raise ValueError('Provided exogenous values are not of the'
' appropriate shape. Required %s, got %s.'
% (str(required_exog_shape),
str(exog.shape)))
exog = np.c_[self.model.orig_exog.T, exog.T].T
# TODO replace with init_kwds or specification or similar
model = SARIMAX(
endog,
exog=exog,
order=self.model.order,
seasonal_order=self.model.seasonal_order,
trend=self.model.trend,
measurement_error=self.model.measurement_error,
time_varying_regression=self.model.time_varying_regression,
mle_regression=self.model.mle_regression,
simple_differencing=self.model.simple_differencing,
enforce_stationarity=self.model.enforce_stationarity,
enforce_invertibility=self.model.enforce_invertibility,
hamilton_representation=self.model.hamilton_representation
)
model.update(self.params)
# Set the kwargs with the update time-varying state space
# representation matrices
for name in self.filter_results.shapes.keys():
if name == 'obs':
continue
mat = getattr(model.ssm, name)
if mat.shape[-1] > 1:
if len(mat.shape) == 2:
kwargs[name] = mat[:, -_out_of_sample:]
else:
kwargs[name] = mat[:, :, -_out_of_sample:]
elif self.model.k_exog == 0 and exog is not None:
warn('Exogenous array provided to predict, but additional data not'
' required. `exog` argument ignored.')
return super(SARIMAXResults, self).predict(
start=start, end=end, exog=exog, dynamic=dynamic, **kwargs
)
def forecast(self, steps=1, exog=None, **kwargs):
"""
Out-of-sample forecasts
Parameters
----------
steps : int, optional
The number of out of sample forecasts from the end of the
sample. Default is 1.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables for
each step forecasted.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
return super(SARIMAXResults, self).forecast(steps, exog=exog, **kwargs)
def summary(self, alpha=.05, start=None):
# Create the model name
# See if we have an ARIMA component
order = ''
if self.model.k_ar + self.model.k_diff + self.model.k_ma > 0:
if self.model.k_ar == self.model.k_ar_params:
order_ar = self.model.k_ar
else:
order_ar = tuple(self.polynomial_ar.nonzero()[0][1:])
if self.model.k_ma == self.model.k_ma_params:
order_ma = self.model.k_ma
else:
order_ma = tuple(self.polynomial_ma.nonzero()[0][1:])
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_diff = 0 if self.model.simple_differencing else self.model.k_diff
order = '(%s, %d, %s)' % (order_ar, k_diff, order_ma)
# See if we have an SARIMA component
seasonal_order = ''
has_seasonal = (
self.model.k_seasonal_ar +
self.model.k_seasonal_diff +
self.model.k_seasonal_ma
) > 0
if has_seasonal:
if self.model.k_ar == self.model.k_ar_params:
order_seasonal_ar = (
int(self.model.k_seasonal_ar / self.model.k_seasons)
)
else:
order_seasonal_ar = (
tuple(self.polynomial_seasonal_ar.nonzero()[0][1:])
)
if self.model.k_ma == self.model.k_ma_params:
order_seasonal_ma = (
int(self.model.k_seasonal_ma / self.model.k_seasons)
)
else:
order_seasonal_ma = (
tuple(self.polynomial_seasonal_ma.nonzero()[0][1:])
)
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_seasonal_diff = self.model.k_seasonal_diff
if self.model.simple_differencing:
k_seasonal_diff = 0
seasonal_order = ('(%s, %d, %s, %d)' %
(str(order_seasonal_ar), k_seasonal_diff,
str(order_seasonal_ma), self.model.k_seasons))
if not order == '':
order += 'x'
model_name = (
'%s%s%s' % (self.model.__class__.__name__, order, seasonal_order)
)
return super(SARIMAXResults, self).summary(
alpha=alpha, start=start, model_name=model_name
)
summary.__doc__ = MLEResults.summary.__doc__
class SARIMAXResultsWrapper(MLEResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(SARIMAXResultsWrapper, SARIMAXResults)
| bsd-3-clause |
SMTorg/smt | smt/applications/tests/test_mixed_integer.py | 2 | 8391 | import unittest
import numpy as np
import matplotlib
matplotlib.use("Agg")
from smt.applications.mixed_integer import (
MixedIntegerContext,
MixedIntegerSamplingMethod,
FLOAT,
ENUM,
INT,
check_xspec_consistency,
unfold_xlimits_with_continuous_limits,
fold_with_enum_index,
unfold_with_enum_mask,
compute_unfolded_dimension,
cast_to_enum_value,
cast_to_mixed_integer,
)
from smt.problems import Sphere
from smt.sampling_methods import LHS
from smt.surrogate_models import KRG
class TestMixedInteger(unittest.TestCase):
def test_check_xspec_consistency(self):
xtypes = [FLOAT, (ENUM, 3), INT]
xlimits = [[-10, 10], ["blue", "red", "green"]] # Bad dimension
with self.assertRaises(ValueError):
check_xspec_consistency(xtypes, xlimits)
xtypes = [FLOAT, (ENUM, 3), INT]
xlimits = [[-10, 10], ["blue", "red"], [-10, 10]] # Bad enum
with self.assertRaises(ValueError):
check_xspec_consistency(xtypes, xlimits)
def test_krg_mixed_3D(self):
xtypes = [FLOAT, (ENUM, 3), INT]
xlimits = [[-10, 10], ["blue", "red", "green"], [-10, 10]]
mixint = MixedIntegerContext(xtypes, xlimits)
sm = mixint.build_surrogate_model(KRG(print_prediction=False))
sampling = mixint.build_sampling_method(LHS, criterion="m")
fun = Sphere(ndim=3)
xt = sampling(20)
yt = fun(xt)
sm.set_training_values(xt, yt)
sm.train()
eq_check = True
for i in range(xt.shape[0]):
if abs(float(xt[i, :][2]) - int(float(xt[i, :][2]))) > 10e-8:
eq_check = False
if not (xt[i, :][1] == 0 or xt[i, :][1] == 1 or xt[i, :][1] == 2):
eq_check = False
self.assertTrue(eq_check)
def test_compute_unfolded_dimension(self):
xtypes = [FLOAT, (ENUM, 2)]
self.assertEqual(3, compute_unfolded_dimension(xtypes))
def test_unfold_with_enum_mask(self):
xtypes = [FLOAT, (ENUM, 2)]
x = np.array([[1.5, 1], [1.5, 0], [1.5, 1]])
expected = [[1.5, 0, 1], [1.5, 1, 0], [1.5, 0, 1]]
self.assertListEqual(expected, unfold_with_enum_mask(xtypes, x).tolist())
def test_unfold_with_enum_mask_with_enum_first(self):
xtypes = [(ENUM, 2), FLOAT]
x = np.array([[1, 1.5], [0, 1.5], [1, 1.5]])
expected = [[0, 1, 1.5], [1, 0, 1.5], [0, 1, 1.5]]
self.assertListEqual(expected, unfold_with_enum_mask(xtypes, x).tolist())
def test_fold_with_enum_index(self):
xtypes = [FLOAT, (ENUM, 2)]
x = np.array([[1.5, 0, 1], [1.5, 1, 0], [1.5, 0, 1]])
expected = [[1.5, 1], [1.5, 0], [1.5, 1]]
self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist())
def test_fold_with_enum_index_with_list(self):
xtypes = [FLOAT, (ENUM, 2)]
expected = [[1.5, 1]]
x = np.array([1.5, 0, 1])
self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist())
x = [1.5, 0, 1]
self.assertListEqual(expected, fold_with_enum_index(xtypes, x).tolist())
def test_cast_to_enum_value(self):
xlimits = [[0.0, 4.0], ["blue", "red"]]
x_col = 1
enum_indexes = [1, 1, 0, 1, 0]
expected = ["red", "red", "blue", "red", "blue"]
self.assertListEqual(expected, cast_to_enum_value(xlimits, x_col, enum_indexes))
def test_unfolded_xlimits_type(self):
xtypes = [FLOAT, (ENUM, 2), (ENUM, 2), INT]
xlimits = np.array([[-5, 5], ["2", "3"], ["4", "5"], [0, 2]])
sampling = MixedIntegerSamplingMethod(xtypes, xlimits, LHS, criterion="ese")
doe = sampling(10)
self.assertEqual((10, 4), doe.shape)
def test_cast_to_mixed_integer(self):
xtypes = [FLOAT, (ENUM, 2), (ENUM, 3), INT]
xlimits = np.array(
[[-5, 5], ["blue", "red"], ["short", "medium", "long"], [0, 2]],
dtype="object",
)
x = np.array([1.5, 0, 2, 1])
self.assertEqual(
[1.5, "blue", "long", 1], cast_to_mixed_integer(xtypes, xlimits, x)
)
def run_mixed_integer_lhs_example(self):
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from smt.sampling_methods import LHS
from smt.applications.mixed_integer import (
FLOAT,
INT,
ENUM,
MixedIntegerSamplingMethod,
)
xtypes = [FLOAT, (ENUM, 2)]
xlimits = [[0.0, 4.0], ["blue", "red"]]
sampling = MixedIntegerSamplingMethod(xtypes, xlimits, LHS, criterion="ese")
num = 40
x = sampling(num)
cmap = colors.ListedColormap(xlimits[1])
plt.scatter(x[:, 0], np.zeros(num), c=x[:, 1], cmap=cmap)
plt.show()
def run_mixed_integer_qp_example(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import QP
from smt.applications.mixed_integer import MixedIntegerSurrogateModel, INT
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.5, 1.0])
# xtypes = [FLOAT, INT, (ENUM, 3), (ENUM, 2)]
# FLOAT means x1 continuous
# INT means x2 integer
# (ENUM, 3) means x3, x4 & x5 are 3 levels of the same categorical variable
# (ENUM, 2) means x6 & x7 are 2 levels of the same categorical variable
sm = MixedIntegerSurrogateModel(xtypes=[INT], xlimits=[[0, 4]], surrogate=QP())
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
def run_mixed_integer_context_example(self):
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
from smt.surrogate_models import KRG
from smt.sampling_methods import LHS, Random
from smt.applications.mixed_integer import MixedIntegerContext, FLOAT, INT, ENUM
xtypes = [INT, FLOAT, (ENUM, 4)]
xlimits = [[0, 5], [0.0, 4.0], ["blue", "red", "green", "yellow"]]
def ftest(x):
return (x[:, 0] * x[:, 0] + x[:, 1] * x[:, 1]) * (x[:, 2] + 1)
# context to create consistent DOEs and surrogate
mixint = MixedIntegerContext(xtypes, xlimits)
# DOE for training
lhs = mixint.build_sampling_method(LHS, criterion="ese")
num = mixint.get_unfolded_dimension() * 5
print("DOE point nb = {}".format(num))
xt = lhs(num)
yt = ftest(xt)
# Surrogate
sm = mixint.build_surrogate_model(KRG())
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
rand = mixint.build_sampling_method(Random)
xv = rand(50)
yv = ftest(xv)
yp = sm.predict_values(xv)
plt.plot(yv, yv)
plt.plot(yv, yp, "o")
plt.xlabel("actual")
plt.ylabel("prediction")
plt.show()
def test_mixed_gower(self):
from smt.applications.mixed_integer import MixedIntegerSurrogateModel, ENUM
from smt.surrogate_models import KRG
import matplotlib.pyplot as plt
import numpy as np
xt = np.linspace(1.0, 5.0, 5)
x_train = np.array(["%.2f" % i for i in xt], dtype=object)
yt = np.array([0.0, 1.0, 1.5, 0.5, 1.0])
xlimits = [["0.0", "1.0", " 2.0", "3.0", "4.0"]]
# Surrogate
sm = MixedIntegerSurrogateModel(
use_gower_distance=True,
xtypes=[(ENUM, 5)],
xlimits=xlimits,
surrogate=KRG(theta0=[1e-2]),
)
sm.set_training_values(x_train, yt)
sm.train()
# DOE for validation
num = 101
x = np.linspace(0, 5, num)
x_pred = np.array(["%.2f" % i for i in x], dtype=object)
y = sm.predict_values(x_pred)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("actual")
plt.ylabel("prediction")
plt.show()
if __name__ == "__main__":
TestMixedInteger().run_mixed_integer_context_example()
| bsd-3-clause |
h2educ/scikit-learn | benchmarks/bench_plot_omp_lars.py | 266 | 4447 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause |
myuuuuun/various | ContinuousAlgorithm/HW8/hw8-2.py | 1 | 4603 | #!/usr/bin/python
#-*- encoding: utf-8 -*-
"""
Copyright (c) 2015 @myuuuuun
Released under the MIT license.
"""
import math
import numpy as np
import pandas as pd
import functools
import matplotlib.pyplot as plt
EPSIRON = 1.0e-8
np.set_printoptions(precision=3)
np.set_printoptions(linewidth=400)
np.set_printoptions(threshold=np.nan)
pd.set_option('display.max_columns', 30)
pd.set_option('display.width', 400)
residual = [[-4.8571429, -8.952381, -3.3333333],
[2.7428571, 2.2380952, 6.9904762],
[-2.444898, -3.4530612, -2.2571429],
[1.3355102, 1.3748299, 2.8868027],
[-1.0997668, -1.4597862, -1.270068],
[0.65483382, 0.72760933, 1.2424606],
[-0.50051062, -0.6416221, -0.65484354],
[0.31542257, 0.36406567, 0.55181013],
[-0.23047317, -0.28906049, -0.32358026],
[0.1502636, 0.17708945, 0.25026068],
[-0.10692094, -0.13229851, -0.15634154],
[0.071128713, 0.084823672, 0.11501942],
[-0.049827426, -0.061148823, -0.074603774],
[0.033545129, 0.040277617, 0.053298436],
[-0.023283648, -0.028433042, -0.03534828],
[0.015786117, 0.019029871, 0.024821041],
[-0.0108977, -0.013268608, -0.016679961],
[0.0074194249, 0.0089648382, 0.011593732],
[-0.0051054624, -0.006205313, -0.0078520446],
[0.0034845039, 0.0042160724, 0.0054250086],
[-0.0023932169, -0.0029057543, -0.0036911447],
[0.0016357637, 0.0019807887, 0.0025411916],
[-0.0011222125, -0.0013617094, -0.0017337277],
[0.00076769266, 0.00093006159, 0.0011910965],
[-0.00052632559, -0.00063841869, -0.00081393451],
[0.00036023645, 0.00043655001, 0.00055849308],
[-0.00024687945, -0.00029939358, -0.00038200882],
[0.00016902409, 0.00020486464, 0.0002619293],
[-0.00011580987, -0.0001404261, -0.00017926015],
[7.9302405e-05, 9.6127424e-05, 0.00012285895],
[-5.4328042e-05, -6.5870903e-05, -8.4110589e-05],
[3.7205777e-05, 4.5102069e-05, 5.7631889e-05],
[-2.5486668e-05, -3.0900344e-05, -3.9463167e-05],
[1.7455259e-05, 2.1160564e-05, 2.7035762e-05],
[-1.1956616e-05, -1.4495965e-05, -1.8514758e-05],
[8.1891239e-06, 9.9276657e-06, 1.2683118e-05],
[-5.6092811e-06, -6.8004759e-06, -8.6863074e-06],
[3.8418973e-06, 4.6575834e-06, 5.9500459e-06],
[-2.6315298e-06, -3.1903359e-06, -4.0751824e-06],
[1.802405e-06, 2.1850951e-06, 2.7913781e-06],
[-1.2345556e-06, -1.4967058e-06, -1.9118587e-06],
[8.455865e-07, 1.0251273e-06, 1.309542e-06],
[-5.7918031e-07, -7.0216336e-07, -8.9693853e-07],
[3.9670083e-07, 4.8093228e-07, 6.1435812e-07],
[-2.7171735e-07, -3.2941314e-07, -4.2079298e-07],
[1.8610919e-07, 2.2562607e-07, 2.8822034e-07],
[-1.2747388e-07, -1.545412e-07, -1.9741204e-07],
[8.731168e-08, 1.0585081e-07, 1.3521602e-07],
[-5.9803309e-08, -7.2501667e-08, -9.2614375e-08],
[4.0961584e-08, 4.9659096e-08, 6.343544e-08],
[-2.8056229e-08, -3.4013548e-08, -4.3449315e-08],
[1.9216801e-08, 2.3297178e-08, 2.9760209e-08],
[-1.3162349e-08, -1.5957173e-08, -2.0383901e-08],
[9.0154071e-09, 1.0929684e-08, 1.3961753e-08],
[-6.1750072e-09, -7.4861752e-09, -9.562946e-09],
[4.2295056e-09, 5.127573e-09, 6.5500423e-09],
[-2.8969538e-09, -3.5120777e-09, -4.486381e-09],
[1.9842386e-09, 2.405562e-09, 3.0729019e-09],
[-1.3590835e-09, -1.6476651e-09, -2.1047484e-09],
[9.3089092e-10, 1.1285515e-09, 1.4416308e-09],
[-6.3760375e-10, -7.7299056e-10, -9.8742348e-10],
[4.3672088e-10, 5.2945204e-10, 6.763301e-10],
[-2.9912783e-10, -3.6264325e-10, -4.6324189e-10],
[2.04885e-10, 2.4839153e-10, 3.1729996e-10],
[-1.4033574e-10, -1.7013058e-10, -2.1733015e-10],
[9.6120445e-11, 1.1653079e-10, 1.488587e-10],
[-6.5837114e-11, -7.9815266e-11, -1.0195578e-10],
[4.5094595e-11, 5.4669158e-11, 6.983214e-11],
[-3.088374e-11, -3.7443826e-11, -4.7830184e-11],
[2.1154634e-11, 2.564704e-11, 3.2763126e-11],
[-1.4487966e-11, -1.7564616e-11, -2.2435387e-11],
[9.9227293e-12, 1.2029489e-11, 1.5365487e-11],
[-6.7963413e-12, -8.2369667e-12, -1.0523138e-11],
[4.6540549e-12, 5.6434857e-12, 7.2084561e-12],
[-3.1867842e-12, -3.8617998e-12, -4.9347193e-12],
[2.1831426e-12, 2.6467717e-12, 3.3821834e-12],
[-1.4939161e-12, -1.811884e-12, -2.3128166e-12],
[1.0231815e-12, 1.2398971e-12, 1.5845103e-12],
[-6.9988459e-13, -8.4732221e-13, -1.080025e-12],
[4.7961635e-13, 5.8086869e-13, 7.4251716e-13]]
if __name__ == '__main__':
residual = np.array(residual);
size = residual.shape[0]
residual_norm = np.zeros(size, dtype=float)
for i in range(size):
residual_norm[i] = np.linalg.norm(residual[i])
plt.title("Jacobi法, 反復毎の残差の推移")
x = np.arange(size)
plt.plot(x, residual_norm, color='green', label="√(残差2乗和)の推移")
plt.xlabel("repeats")
plt.ylabel("residual(2-norm)")
plt.yscale('log')
plt.legend()
plt.show() | mit |
gpfreitas/bokeh | bokeh/compat/mplexporter/exporter.py | 32 | 12403 | """
Matplotlib Exporter
===================
This submodule contains tools for crawling a matplotlib figure and exporting
relevant pieces to a renderer.
"""
import warnings
import io
from . import utils
import matplotlib
from matplotlib import transforms
from matplotlib.backends.backend_agg import FigureCanvasAgg
class Exporter(object):
"""Matplotlib Exporter
Parameters
----------
renderer : Renderer object
The renderer object called by the exporter to create a figure
visualization. See mplexporter.Renderer for information on the
methods which should be defined within the renderer.
close_mpl : bool
If True (default), close the matplotlib figure as it is rendered. This
is useful for when the exporter is used within the notebook, or with
an interactive matplotlib backend.
"""
def __init__(self, renderer, close_mpl=True):
self.close_mpl = close_mpl
self.renderer = renderer
def run(self, fig):
"""
Run the exporter on the given figure
Parmeters
---------
fig : matplotlib.Figure instance
The figure to export
"""
# Calling savefig executes the draw() command, putting elements
# in the correct place.
if fig.canvas is None:
fig.canvas = FigureCanvasAgg(fig)
fig.savefig(io.BytesIO(), format='png', dpi=fig.dpi)
if self.close_mpl:
import matplotlib.pyplot as plt
plt.close(fig)
self.crawl_fig(fig)
@staticmethod
def process_transform(transform, ax=None, data=None, return_trans=False,
force_trans=None):
"""Process the transform and convert data to figure or data coordinates
Parameters
----------
transform : matplotlib Transform object
The transform applied to the data
ax : matplotlib Axes object (optional)
The axes the data is associated with
data : ndarray (optional)
The array of data to be transformed.
return_trans : bool (optional)
If true, return the final transform of the data
force_trans : matplotlib.transform instance (optional)
If supplied, first force the data to this transform
Returns
-------
code : string
Code is either "data", "axes", "figure", or "display", indicating
the type of coordinates output.
transform : matplotlib transform
the transform used to map input data to output data.
Returned only if return_trans is True
new_data : ndarray
Data transformed to match the given coordinate code.
Returned only if data is specified
"""
if isinstance(transform, transforms.BlendedGenericTransform):
warnings.warn("Blended transforms not yet supported. "
"Zoom behavior may not work as expected.")
if force_trans is not None:
if data is not None:
data = (transform - force_trans).transform(data)
transform = force_trans
code = "display"
if ax is not None:
for (c, trans) in [("data", ax.transData),
("axes", ax.transAxes),
("figure", ax.figure.transFigure),
("display", transforms.IdentityTransform())]:
if transform.contains_branch(trans):
code, transform = (c, transform - trans)
break
if data is not None:
if return_trans:
return code, transform.transform(data), transform
else:
return code, transform.transform(data)
else:
if return_trans:
return code, transform
else:
return code
def crawl_fig(self, fig):
"""Crawl the figure and process all axes"""
with self.renderer.draw_figure(fig=fig,
props=utils.get_figure_properties(fig)):
for ax in fig.axes:
self.crawl_ax(ax)
def crawl_ax(self, ax):
"""Crawl the axes and process all elements within"""
with self.renderer.draw_axes(ax=ax,
props=utils.get_axes_properties(ax)):
for line in ax.lines:
self.draw_line(ax, line)
for text in ax.texts:
self.draw_text(ax, text)
for (text, ttp) in zip([ax.xaxis.label, ax.yaxis.label, ax.title],
["xlabel", "ylabel", "title"]):
if(hasattr(text, 'get_text') and text.get_text()):
self.draw_text(ax, text, force_trans=ax.transAxes,
text_type=ttp)
for artist in ax.artists:
# TODO: process other artists
if isinstance(artist, matplotlib.text.Text):
self.draw_text(ax, artist)
for patch in ax.patches:
self.draw_patch(ax, patch)
for collection in ax.collections:
self.draw_collection(ax, collection)
for image in ax.images:
self.draw_image(ax, image)
legend = ax.get_legend()
if legend is not None:
props = utils.get_legend_properties(ax, legend)
with self.renderer.draw_legend(legend=legend, props=props):
if props['visible']:
self.crawl_legend(ax, legend)
def crawl_legend(self, ax, legend):
"""
Recursively look through objects in legend children
"""
legendElements = list(utils.iter_all_children(legend._legend_box,
skipContainers=True))
legendElements.append(legend.legendPatch)
for child in legendElements:
# force a large zorder so it appears on top
child.set_zorder(1E6 + child.get_zorder())
try:
# What kind of object...
if isinstance(child, matplotlib.patches.Patch):
self.draw_patch(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.text.Text):
if not (child is legend.get_children()[-1]
and child.get_text() == 'None'):
self.draw_text(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.lines.Line2D):
self.draw_line(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.collections.Collection):
self.draw_collection(ax, child,
force_pathtrans=ax.transAxes)
else:
warnings.warn("Legend element %s not impemented" % child)
except NotImplementedError:
warnings.warn("Legend element %s not impemented" % child)
def draw_line(self, ax, line, force_trans=None):
"""Process a matplotlib line and call renderer.draw_line"""
coordinates, data = self.process_transform(line.get_transform(),
ax, line.get_xydata(),
force_trans=force_trans)
linestyle = utils.get_line_style(line)
if linestyle['dasharray'] is None:
linestyle = None
markerstyle = utils.get_marker_style(line)
if (markerstyle['marker'] in ['None', 'none', None]
or markerstyle['markerpath'][0].size == 0):
markerstyle = None
label = line.get_label()
if markerstyle or linestyle:
self.renderer.draw_marked_line(data=data, coordinates=coordinates,
linestyle=linestyle,
markerstyle=markerstyle,
label=label,
mplobj=line)
def draw_text(self, ax, text, force_trans=None, text_type=None):
"""Process a matplotlib text object and call renderer.draw_text"""
content = text.get_text()
if content:
transform = text.get_transform()
position = text.get_position()
coords, position = self.process_transform(transform, ax,
position,
force_trans=force_trans)
style = utils.get_text_style(text)
self.renderer.draw_text(text=content, position=position,
coordinates=coords,
text_type=text_type,
style=style, mplobj=text)
def draw_patch(self, ax, patch, force_trans=None):
"""Process a matplotlib patch object and call renderer.draw_path"""
vertices, pathcodes = utils.SVG_path(patch.get_path())
transform = patch.get_transform()
coordinates, vertices = self.process_transform(transform,
ax, vertices,
force_trans=force_trans)
linestyle = utils.get_path_style(patch, fill=patch.get_fill())
self.renderer.draw_path(data=vertices,
coordinates=coordinates,
pathcodes=pathcodes,
style=linestyle,
mplobj=patch)
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
"""Process a matplotlib collection and call renderer.draw_collection"""
(transform, transOffset,
offsets, paths) = collection._prepare_points()
offset_coords, offsets = self.process_transform(
transOffset, ax, offsets, force_trans=force_offsettrans)
path_coords = self.process_transform(
transform, ax, force_trans=force_pathtrans)
processed_paths = [utils.SVG_path(path) for path in paths]
processed_paths = [(self.process_transform(
transform, ax, path[0], force_trans=force_pathtrans)[1], path[1])
for path in processed_paths]
path_transforms = collection.get_transforms()
try:
# matplotlib 1.3: path_transforms are transform objects.
# Convert them to numpy arrays.
path_transforms = [t.get_matrix() for t in path_transforms]
except AttributeError:
# matplotlib 1.4: path transforms are already numpy arrays.
pass
styles = {'linewidth': collection.get_linewidths(),
'facecolor': collection.get_facecolors(),
'edgecolor': collection.get_edgecolors(),
'alpha': collection._alpha,
'zorder': collection.get_zorder()}
offset_dict = {"data": "before",
"screen": "after"}
offset_order = offset_dict[collection.get_offset_position()]
self.renderer.draw_path_collection(paths=processed_paths,
path_coordinates=path_coords,
path_transforms=path_transforms,
offsets=offsets,
offset_coordinates=offset_coords,
offset_order=offset_order,
styles=styles,
mplobj=collection)
def draw_image(self, ax, image):
"""Process a matplotlib image object and call renderer.draw_image"""
self.renderer.draw_image(imdata=utils.image_to_base64(image),
extent=image.get_extent(),
coordinates="data",
style={"alpha": image.get_alpha(),
"zorder": image.get_zorder()},
mplobj=image)
| bsd-3-clause |
DanielWinklehner/dans_pymodules | dans_pymodules/read_igun.py | 1 | 12187 | from optparse import OptionParser
# from matplotlib import pyplot as plt
import numpy as np
from scipy import constants
from random import random
import os.path
import sys
__doc__ = "Rewritten version of the original ReadIGUN from 2010. This is now compatible with the latest " \
"version of IGUN (2016)."
__author__ = "Daniel Winklehner, Alberto Lemut"
def read_igun(filename, npart=5000):
"""
Legacy function for backwards compatibility
:param filename:
:param npart:
:return:
"""
ir = IgunReader()
ir.read_trj(filename)
e_rrp, e_xxp = ir.get_emittance()
print("rrp: {} mm-mrad".format(e_rrp))
print("xxp: {} mm-mrad".format(e_xxp))
return ir.generate_dist(npart=npart)
ReadIGUN = read_igun
class IgunReader(object):
"""
This class contains the necessary functions to read IGUN .TRJ files and return a distribution of
randomly generated particles to match the IGUN trajectories.
"""
def __init__(self):
"""
Constructor
"""
self.filename = None # path and filename of trj file
self.run_label = None # Unique string at beginning of trj file
self.data = None # Structured numpy array containing the input data
self.ns = 0 # Number of species
self.legacy = False # Flag for legacy file handling
def read_trj(self,
filename=None,
resolution=0.25 # polygon units --> mm
):
"""
Function that reads in the values from TRJ file
:param filename:
:param resolution:
:return:
"""
if filename is None:
return None
rest, ext = os.path.splitext(filename) # extract basename (incl. path) and extension
if ext not in [".trj", ".TRJ", ".Trj"]:
return None
self.filename = filename
with open(filename) as infile:
self.run_label = infile.readline()
raw_data = infile.readlines()
# Flag for legacy file handling
if "BETA" in raw_data[-1]:
self.legacy = False
else:
self.legacy = True
raw_data.pop(-1) # Delete the last row, it contains the column headers
mydtype = [("ray", float), # Ray number
("group", float), # Ray group
("q", float), # Ray charge (e)
("m", float), # Ray mass (amu)
("rho", float), # Ray R (in polygon units!)
("zeta", float), # Ray Z (in polygon units!)
("energy", float), # Ray kinetic energy per charge state (i.e. source voltage eV)
("atandrdz", float), # Ray R' (rad)
("i", float), # Ray current (A)
("atandtdz", float), # Ray Theta'
("phi", float), # Ray Theta (rad)
("beta", float)] # Ray beta (relativistic parameter)
data = []
if self.legacy:
for line in raw_data:
data_ = [float(item) for item in line.split()]
# Old IGUN .TRJ files didn't have the BETA column...
# Calculate gamma from energy and mass (Ekin = m0c^2 * (gamma - 1)).
# Cave: Energy is given as source voltage. i.e. needs to be multiplied with charge state.
gamma = data_[2] * data_[6] / data_[3] / 931500000.0 + 1.0
# Calculate beta from gamma and append to data
data_.append(np.sqrt(1.0 - gamma ** -2.0))
data.append(tuple(data_))
self.data = np.array(data, dtype=mydtype)
# noinspection PyTypeChecker
self.data["i"] *= -6.2832e-6 # Legacy currents were given as uA/6.2832 and pos. ions had negative currents
else:
for line in raw_data:
data.append(tuple([float(item) for item in line.split()]))
self.data = np.array(data, dtype=mydtype)
# noinspection PyTypeChecker
self.data["i"] *= -1.0 # Positive currents are given as negative in IGUN
self.ns = len(np.unique(self.data["group"]))
# noinspection PyTypeChecker
self.data["zeta"] *= resolution # Polygon units --> mm
# noinspection PyTypeChecker
self.data["rho"] *= resolution # Polygon units --> mm
return data
def get_emittance(self):
groups = np.array(np.unique(self.data["group"]))
e_rrp = []
e_xxp = []
for species in groups: # process each species
data = self.data[np.where(self.data["group"] == species)] # Select subset of data
# species = int(species) - 1 # So we can use it as an index
r = data["rho"] # (mm)
rp = data["atandrdz"] * 1000.0 # (rad --> mrad)
currents = np.array(data["i"]) # (A)
currentsum = sum(currents)
e_rrp.append(np.sqrt(
sum(currents * r ** 2.0) * sum(currents * rp ** 2.0) - sum(currents * r * rp) ** 2.0) / currentsum)
e_xxp.append(np.sqrt(0.5 *
sum(currents * r ** 2.0) * sum(currents * rp ** 2.0)
- sum(currents * r * rp) ** 2.0) / currentsum)
return np.array(e_rrp), np.array(e_xxp)
def generate_dist(self, npart=5000):
"""
Uses the loaded data to generate a random particle distribution corresponding to the trajectory info.
:param npart: Number of particles to generate per species.
:return:
"""
groups = np.array(np.unique(self.data["group"]))
x = np.zeros((self.ns, npart), 'd')
y = np.zeros((self.ns, npart), 'd')
xp = np.zeros((self.ns, npart), 'd')
yp = np.zeros((self.ns, npart), 'd')
z = np.zeros((self.ns, npart), 'd')
vx = np.zeros((self.ns, npart), 'd')
vy = np.zeros((self.ns, npart), 'd')
vz = np.zeros((self.ns, npart), 'd')
currentsum = np.zeros(self.ns, 'd')
pps = []
mass = []
charge = []
for species in groups: # process each species
data = self.data[np.where(self.data["group"] == species)] # Select subset of data
species = int(species) - 1 # So we can use it as an index
numpart = len(data)
pps.append(numpart)
mass.append(data["m"][0])
charge.append(data["q"][0])
currentsum[species] = sum(data["i"])
cumulative = np.zeros(numpart + 1, 'd')
for k in range(numpart):
cumulative[k + 1] = cumulative[k] + data["i"][k] / currentsum[species]
# indices = []
for k in range(npart):
probability = random() # get random number
jmin = 0
jmid = int(numpart / 2)
jmax = numpart
for dummy in range(200):
if cumulative[jmin] <= probability <= cumulative[jmid]:
if jmin + 1 == jmid:
jmid = jmin
break
jmax = jmid
jmid = int((jmin + jmax) / 2)
elif cumulative[jmid] <= probability <= cumulative[jmax]:
if jmid + 1 == jmax:
break
jmin = jmid
jmid = int((jmin + jmax) / 2)
else:
print("{}: probability {} of out boundaries cumulative[{}] = {} - cumulative[{}] = {}\n".format(
os.path.split(sys.argv[0])[1], probability, jmin,
cumulative[jmin], jmax, cumulative[jmax]))
jmid -= 1
theta = 2.0 * np.pi * random()
velocity = data["beta"][jmid] * constants.c
x[species, k] = data["rho"][jmid] * np.cos(theta) # (mm)
y[species, k] = data["rho"][jmid] * np.sin(theta) # (mm)
z[species, k] = data["zeta"][0] # (mm)
xp[species, k] = (data["atandrdz"][jmid] * np.cos(theta) - data["atandtdz"][jmid] * np.sin(theta))
yp[species, k] = (data["atandrdz"][jmid] * np.sin(theta) + data["atandtdz"][jmid] * np.cos(theta))
vz[species, k] = velocity / np.sqrt(xp[species, k] ** 2 + yp[species, k] ** 2 + 1) # (m/s)
vx[species, k] = xp[species, k] * vz[species, k] # (m/s)
vy[species, k] = yp[species, k] * vz[species, k] # (m/s)
# Calculate some handy additional output values
vzmean = vz.mean(axis=1) # Calculate mean vz for each species (m/s)
xmax = x.max(axis=1)
ymax = y.max(axis=1)
xenv = np.zeros(self.ns, 'd')
yenv = np.zeros(self.ns, 'd')
for k in range(self.ns):
maxid = np.where(x[k, :] == xmax[k])
xenv[k] = xp[k, maxid[0]] # rad
maxid = np.where(y[k, :] == ymax[k])
yenv[k] = yp[k, maxid[0]] # rad
results = {"value": 0,
"ns": self.ns,
"np": np.ones(self.ns, 'd') * npart,
"pps": np.array(pps),
"M": np.array(mass),
"Q": np.array(charge),
"totalCurrent": currentsum * 1000000.0, # Postprocessor expects current in uA
"x": x,
"y": y,
"z": z,
"xp": xp * 1000, # mrad
"yp": yp * 1000, # mrad
"vx": vx,
"vy": vy,
"vz": vz,
"vzmean": vzmean,
"xmax": xmax,
"ymax": ymax,
"xenv": xenv,
"yenv": yenv}
return results
# This part is only executed if ReadIGUN.py is called on it's own (e.g. from command line)
if __name__ == '__main__':
# --- Option parser for command-line options --- #
parser = OptionParser()
parser.add_option("-i", "--infile", dest="ipf", help="Specify input file (*.TRJ)", metavar="IFILE")
parser.add_option("-o", "--outfile", dest="opf", help="Specify output file", metavar="OFILE")
parser.add_option("-p", "--particles", dest="nparticles", type="int", help="Number of particles to be generated",
metavar="NP")
(options, args) = parser.parse_args()
# Set variables according to command-line options:
if options.ipf is not None:
ipf = options.ipf
else:
ipf = None
if options.nparticles is not None:
nparticles = options.nparticles
else:
nparticles = 5000
if options.opf is not None:
opf = options.opf
else:
opf = None
if ipf is None or opf is None:
print("Error: Either inputfile or outputfile not specified!")
print("Usage: 'ReadIGUN.py -i <INPUT FILE> -o <OUTPUT FILE> [-p <# of particles to calculate>]'")
raise SystemExit
# Call the main script
igun_reader = IgunReader()
igun_reader.read_trj(filename=ipf)
res = igun_reader.generate_dist(npart=nparticles)
# --- write results to file --- #
for j in range(res["ns"]):
outpath = os.path.splitext(opf)[0] + "_species" + str(j + 1) + ".dat" # each species gets it's own file
print("Output file {} written\n".format(outpath))
f = open(outpath, 'w')
f.write("Original file: %s\n" % (os.path.split(sys.argv[0])[1]))
f.write("M = %i amu\n" % (res["M"][j]))
f.write("Q = %i e\n" % (res["Q"][j]))
f.write("I = %f euA\n" % (res["totalCurrent"][j]))
f.write(
"x (mm) y (mm) z (mm) xp (mrad) yp (mrad) vx (m/s) vy (m/s) vz (m/s)\n")
for i in range(res["np"][j]):
f.write("%e %e %e %e %e %e %e %e\n" % (
res["x"][j][i], res["y"][j][i], res["z"][j][i],
res["xp"][j][i] * 1000, res["yp"][j][i] * 1000, res["vx"][j][i], res["vy"][j][i],
res["vz"][j][i]))
f.close()
| mit |
mattjj/pyhsmm-collapsedinfinite | models.py | 1 | 5967 | from __future__ import division
import numpy as np
na = np.newaxis
from matplotlib import pyplot as plt
from matplotlib import cm
import abc
from collections import defaultdict
from pybasicbayes.abstractions import ModelGibbsSampling
from internals import transitions, states
class Collapsed(ModelGibbsSampling):
__metaclass__ = abc.ABCMeta
def resample_model(self):
for s in self.states_list:
s.resample()
self.beta.resample()
# statistic gathering methods
def _counts_from(self,k):
# returns an integer
return sum(s._counts_from(k) for s in self.states_list)
def _counts_to(self,k):
# returns an integer
return sum(s._counts_to(k) for s in self.states_list)
def _counts_fromto(self,k1,k2):
# returns an integer
return sum(s._counts_fromto(k1,k2) for s in self.states_list)
def _initial_counts(self,k):
return sum(s.stateseq[0] == k for s in self.states_list)
def _data_withlabel(self,k):
# returns a list of (masked) arrays
return [s._data_withlabel(k) for s in self.states_list]
def _occupied(self):
# returns a set
return reduce(set.union,(s._occupied() for s in self.states_list),set([]))
### optional methods
def plot(self,color=None):
import itertools
num_states = len(self._occupied())
state_colors = {}
idx = 0
for state in itertools.chain(*[s.stateseq for s in self.states_list]):
if state not in state_colors:
state_colors[state] = idx/(num_states-1) if color is None else color
idx += 1
cmap = cm.get_cmap()
for s in self.states_list:
plt.figure()
### obs stuff
# plt.subplot(2,1,1)
# for state in rle(s.stateseq)[0]:
# self.obs.plot(color=cmap(state_colors[state]),
# data=s.data[s.stateseq==state] if s.data is not None else None,
# plot_params=False)
# plt.subplot(2,1,2)
### states stuff
s.plot(colors_dict=state_colors)
class collapsed_stickyhdphmm(Collapsed):
def __init__(self,gamma_0,alpha_0,kappa,obs):
self.gamma_0 = gamma_0
self.alpha_0 = alpha_0
self.kappa = kappa
self.obs = obs
self.beta = transitions.beta(model=self,gamma_0=gamma_0)
self.states_list = []
def add_data(self,data):
self.states_list.append(states.collapsed_stickyhdphmm_states(
model=self,beta=self.beta,alpha_0=self.alpha_0,
kappa=self.kappa,obs=self.obs,data=data))
def generate(self,T,keep=True):
# TODO only works if there's no other data in the model; o/w need to add
# existing data to obs resample. it should be an easy update.
assert len(self.states_list) == 0
tempstates = states.collapsed_stickyhdphmm_states(
T=T,model=self,beta=self.beta,alpha_0=self.alpha_0,
kappa=self.kappa,obs=self.obs)
used_states = np.bincount(tempstates.stateseq)
allobs = []
for state, count in enumerate(used_states):
self.obs.resample()
allobs.append([self.obs.rvs(1) for itr in range(count)])
obs = []
for state in tempstates.stateseq:
obs.append(allobs[state].pop())
obs = np.concatenate(obs)
if keep:
tempstates.data = obs
self.states_list.append(tempstates)
return obs, tempstates.stateseq
class collapsed_hdphsmm(Collapsed):
def __init__(self,gamma_0,alpha_0,obs,dur):
self.gamma_0 = gamma_0
self.alpha_0 = alpha_0
self.obs = obs
self.dur = dur
self.beta = transitions.censored_beta(model=self,gamma_0=gamma_0,alpha_0=alpha_0)
self.states_list = []
def add_data(self,data,stateseq=None):
self.states_list.append(states.collapsed_hdphsmm_states(
model=self,beta=self.beta,alpha_0=self.alpha_0,
obs=self.obs,dur=self.dur,data=data,stateseq=stateseq))
def _durs_withlabel(self,k):
# returns a list of (masked) arrays
return [s._durs_withlabel(k) for s in self.states_list]
def generate(self,T,keep=True):
# TODO only works if there's no other data in the model
assert len(self.states_list) == 0
tempstates = states.collapsed_hdphsmm_states(
T=T,model=self,beta=self.beta,alpha_0=self.alpha_0,
obs=self.obs,dur=self.dur)
used_states = defaultdict(lambda: 0)
for state in tempstates.stateseq:
used_states[state] += 1
allobs = {}
for state,count in used_states.items():
self.obs.resample()
allobs[state] = [self.obs.rvs(1) for itr in range(count)]
obs = []
for state in tempstates.stateseq:
obs.append(allobs[state].pop())
obs = np.concatenate(obs)
if keep:
tempstates.data = obs
self.states_list.append(tempstates)
return obs, tempstates.stateseq
def resample_model_labels(self):
for s in self.states_list:
s.resample_label_version()
self.beta.resample()
class collapsed_hdphsmm_sameashdphmm(collapsed_hdphsmm):
def __init__(self,gamma_0,alpha_0,obs,dur,kappa):
super(collapsed_hdphsmm_sameashdphmm,self).__init__(gamma_0,alpha_0,obs,dur)
self.beta = transitions.censored_beta_sameashdphmm(model=self,gamma_0=gamma_0,alpha_0=alpha_0)
self.kappa = kappa
def add_data(self,data,stateseq=None):
self.states_list.append(states.collapsed_hdphsmm_states_sameashdphmm(
model=self,beta=self.beta,alpha_0=self.alpha_0,kappa=self.kappa,
obs=self.obs,dur=self.dur,data=data,stateseq=stateseq))
# TODO methods to convert to/from weak limit representations
| mit |
aayushidwivedi01/spark-tk | regression-tests/sparktkregtests/testcases/dicom/save_load_dicom_test.py | 13 | 4193 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests dicom.save and dicom.load functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import dicom
import numpy
import datetime
class SaveLoadDicomTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(SaveLoadDicomTest, self).setUp()
self.dataset = self.get_file("dicom_uncompressed")
self.dicom = self.context.dicom.import_dcm(self.dataset)
# generate a unique name to save the dicom object under
self.location = self.get_file(self.get_name("save_load_test"))
def test_basic_save_load_content_test(self):
"""basic save load content test"""
# save the current dicom object under a unique name
self.dicom.save(self.location)
# load the dicom
load_dicom = self.context.load(self.location)
original_metadata = self.dicom.metadata.to_pandas()["metadata"]
load_metadata = load_dicom.metadata.to_pandas()["metadata"]
# compare the loaded dicom object with the dicom object we created
for (load_row, original_row) in zip(original_metadata, load_metadata):
original_file = original_row.encode("ascii", "ignore")
# extract and remove bulk data element from metadata since we don't care about it
# bulk data records the file's location, so it may differ
loaded_file = load_row.encode("ascii", "ignore")
bulk_data_index = original_file.index("<BulkData")
load_bulk_data = loaded_file[bulk_data_index:bulk_data_index + loaded_file[bulk_data_index:].index(">") + 1]
original_bulk_data = original_file[bulk_data_index:bulk_data_index + original_file[bulk_data_index:].index(">") + 1]
loaded_file = loaded_file.replace(load_bulk_data, "")
original_file = original_file.replace(original_bulk_data, "")
self.assertEqual(loaded_file, original_file)
# now we check that the pixel data matches
original_image = self.dicom.pixeldata.to_pandas()
loaded_image = load_dicom.pixeldata.to_pandas()
for (dcm_image, pixel_image) in zip(original_image["imagematrix"], loaded_image["imagematrix"]):
numpy.testing.assert_equal(pixel_image, dcm_image)
def test_save_invalid_long_unicode_name(self):
"""save under a long unicode name, should fail"""
# we will pass the dicom metadata itself as the name
metadata_unicode = self.dicom.metadata.to_pandas()["metadata"]
with self.assertRaisesRegexp(Exception, "does not exist"):
self.dicom.save(metadata_unicode)
def test_load_does_not_exist(self):
"""test load dicom does not exist"""
with self.assertRaisesRegexp(Exception, "Input path does not exist"):
self.context.load("does_not_exist")
def test_save_invalid_path_type(self):
"""test save dicom invalid path type"""
with self.assertRaisesRegexp(Exception, "does not exist"):
self.dicom.save(1)
def test_save_name_already_exists(self):
"""test save dicom duplicate name"""
with self.assertRaisesRegexp(Exception, "already exists"):
self.dicom.save("duplicate_name")
self.dicom.save("duplicate_name")
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
IndraVikas/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
mschmittfull/nbodykit | nbodykit/core/datasource/RaDecRedshift.py | 1 | 7223 | from nbodykit.core import DataSource
from nbodykit.utils import selectionlanguage
import numpy
class RaDecRedshiftDataSource(DataSource):
"""
DataSource designed to handle reading (ra, dec, redshift)
from a plaintext file, using `pandas.read_csv`
* Returns the Cartesian coordinates corresponding to
(ra, dec, redshift) as the `Position` column.
* If `unit_sphere = True`, the Cartesian coordinates
are on the unit sphere, so the the redshift information
is not used
"""
plugin_name = "RaDecRedshift"
def __init__(self, path, names, unit_sphere=False,
usecols=None, sky_cols=['ra','dec'], z_col='z',
weight_col=None, degrees=False, select=None, nbar_col=None, colmap={}):
# positional arguments
self.path = path
self.names = names
# keywords
self.unit_sphere = unit_sphere
self.usecols = usecols
self.sky_cols = sky_cols
self.z_col = z_col
self.weight_col = weight_col
self.degrees = degrees
self.select = select
self.nbar_col = nbar_col
self.colmap = colmap
# setup the cosmology
if not self.unit_sphere:
if self.cosmo is None:
raise ValueError("please specify a input Cosmology to use in `RaDecRedshift`")
# sample the cosmology's comoving distance
self.cosmo.sample('comoving_distance', numpy.logspace(-5, 1, 1024))
else:
# unit sphere fits in box of size L = 2
self.BoxSize = numpy.array([2., 2., 2.])
@classmethod
def fill_schema(cls):
s = cls.schema
s.description = "read (ra, dec, z) from a plaintext file, returning Cartesian coordinates"
# required
s.add_argument("path", type=str, help="the file path to load the data from")
s.add_argument("names", type=str, nargs='+', help="the names of columns in text file")
# optional
s.add_argument('unit_sphere', type=bool,
help='if True, return Cartesian coordinates on the unit sphere')
s.add_argument("usecols", type=str, nargs='*',
help="only read these columns from file")
s.add_argument("sky_cols", type=str, nargs='*',
help="names of the columns specifying the sky coordinates")
s.add_argument("z_col", type=str,
help="name of the column specifying the redshift coordinate")
s.add_argument("weight_col", type=str,
help="name of the column specifying the `weight` for each object")
s.add_argument("nbar_col", type=str,
help="name of the column specifying the `nbar` value for each object")
s.add_argument('degrees', type=bool,
help='set this flag if the input (ra, dec) are in degrees')
s.add_argument("select", type=selectionlanguage.Query,
help='row selection based on conditions specified as string')
s.add_argument('colmap', type=dict,
help='dictionary that maps input columns to output columns')
def _to_cartesian(self, coords):
"""
Convert the (ra, dec, redshift) coordinates to cartesian coordinates,
scaled to the comoving distance if `unit_sphere = False`, else
on the unit sphere
Notes
-----
Input angles `ra` and `dec` (first 2 columns of `coords`)
are assumed to be in radians
Parameters
-----------
coords : array_like, (N, 3)
the input coordinates with the columns giving (ra, dec, redshift),
where ``ra`` and ``dec`` are in radians
Returns
-------
pos : array_like, (N,3)
the cartesian position coordinates, where columns represent ``x``,
``y``, and ``z``
"""
ra, dec, redshift = coords.T
x = numpy.cos( dec ) * numpy.cos( ra )
y = numpy.cos( dec ) * numpy.sin( ra )
z = numpy.sin( dec )
pos = numpy.vstack([x,y,z])
if not self.unit_sphere:
pos *= self.cosmo.comoving_distance(redshift)
return pos.T
def readall(self):
"""
Read all available data, returning a dictionary
This provides the following columns:
``Ra`` : right ascension (in radians)
``Dec`` : declination (in radians)
``Redshift`` : the redshift
``Position`` : cartesian coordinates computed from angular coords + redshift
And optionally, the `Weight` and `Nbar` columns
"""
try:
import pandas as pd
except:
name = self.__class__.__name__
raise ImportError("pandas must be installed to use %s" %name)
# read in the plain text file using pandas
kwargs = {}
kwargs['comment'] = '#'
kwargs['names'] = self.names
kwargs['header'] = None
kwargs['engine'] = 'c'
kwargs['delim_whitespace'] = True
kwargs['usecols'] = self.usecols
# iterate through in parallel
data = pd.read_csv(self.path, **kwargs)
# create structured array to hold data
dtype = [('Position', ('f4', 3))]
dtype += [('Ra', 'f4'), ('Dec', 'f4'), ('Redshift', 'f4')]
if self.weight_col is not None:
dtype += [('Weight', 'f4')]
if self.nbar_col is not None:
dtype += [('Nbar', 'f4')]
for in_name, out_name in self.colmap.items():
if in_name not in self.names:
raise ValueError("invalid column re-mapping: '%s' not in input catalog" %in_name)
dtype += [(out_name, data[in_name].dtype.str)]
dtype = numpy.dtype(dtype)
new = numpy.empty(len(data), dtype=dtype)
# rescale the angles
if self.degrees:
data[self.sky_cols] *= numpy.pi/180.
# get the (ra, dec, z) coords
cols = self.sky_cols + [self.z_col]
pos = data[cols].values.astype('f4')
new['Ra'] = pos[:,0]
new['Dec'] = pos[:,1]
new['Redshift'] = pos[:,2]
new['Position'] = self._to_cartesian(pos)
# optionally, return a weight
if self.weight_col is not None:
new['Weight'] = data[self.weight_col].values.astype('f4')
# optionally, return nbar
if self.nbar_col is not None:
new['Nbar'] = data[self.nbar_col].values.astype('f4')
# copy any extra columns from the column map
for in_name, out_name in self.colmap.items():
new[out_name] = data[in_name].values.copy()
# select based on input conditions
if self.select is not None:
mask = self.select.get_mask(new)
new = new[mask]
toret = {}
for name in new.dtype.names:
toret[name] = new[name].copy()
return toret
| gpl-3.0 |
arunchaganty/kbp-online | doc/emnlp2017/figures/mention-histogram.py | 1 | 1247 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
"""
import csv
import sys
import matplotlib.pyplot as plt
from matplotlib import rc
from collections import defaultdict
def do_command(args):
# Read data
rc('text', usetex=True)
#rc('font', size=22)
data = defaultdict(list)
for value, unit in csv.reader(args.input, delimiter='\t'):
data[unit].append(float(value))
# Set up plotter.
plt.ylabel('Number of mentions', fontsize=22)
plt.xlabel('Mention value', fontsize=22)
plt.xscale('log')
bins = [10**i for i in range(-2,11)]
plt.hist(data.values(), bins, stacked=True, label=data.keys())
plt.legend()
plt.savefig(args.output)
#plt.show()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( description='' )
parser.add_argument('--input', type=argparse.FileType('r'), default=sys.stdin, help="")
parser.add_argument('--output', type=str, default='mention-histogram.pdf', help="")
parser.set_defaults(func=do_command)
#subparsers = parser.add_subparsers()
#command_parser = subparsers.add_parser('command', help='' )
#command_parser.set_defaults(func=do_command)
ARGS = parser.parse_args()
ARGS.func(ARGS)
| mit |
scienceopen/gridaurora | gridaurora/loadtranscargrid.py | 1 | 2538 | """
load and plot transcar energy grid
Egrid is not what's used externally by other programs, but rather variable "bins"
"""
from pathlib import Path
import xarray
import numpy as np
from scipy.stats import linregress
from matplotlib.pyplot import figure
flux0 = 70114000000.0
Nold = 33
Nnew = 81 # 100MeV
def loadregress(fn: Path):
# %%
Egrid = np.loadtxt(Path(fn).expanduser(), delimiter=",")
# Ematt = asarray([logspace(1.7220248253079387,4.2082263059355824,num=Nold,base=10),
# #[logspace(3.9651086925197356,9.689799159992674,num=33,base=exp(1)),
# logspace(1.8031633895706722,4.2851520785250914,num=Nold,base=10)]).T
# %% log-lin regression
Enew = np.empty((Nnew, 4))
Enew[:Nold, :] = Egrid
for k in range(4):
s, i = linregress(range(Nold), np.log10(Egrid[:, k]))[:2]
Enew[Nold:, k] = 10 ** (np.arange(Nold, Nnew) * s + i)
return Enew
def doplot(fn: Path, bins: xarray.DataArray, Egrid: np.ndarray = None, debug: bool = False):
# %% main plot
ax = figure().gca()
ax.bar(
left=bins.loc[:, "low"], height=bins.loc[:, "flux"], width=bins.loc[:, "high"] - bins.loc[:, "low"],
)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylabel("flux [s$^{-1}$ sr$^{-1}$ cm$^{-2}$ eV$^{-1}$]")
ax.set_xlabel("bin energy [eV]")
ax.set_title(f"Input flux used to generate eigenprofiles, based on {fn}")
# %% debug plots
if debug:
ax = figure().gca()
bins[["low", "high"]].plot(logy=True, ax=ax, marker=".")
ax.set_xlabel("bin number")
ax.set_ylabel("bin energy [eV]")
ax = figure().gca()
bins["flux"].plot(logy=True, ax=ax, marker=".")
ax.set_xlabel("bin number")
ax.set_ylabel("flux [s$^{-1}$ sr$^{-1}$ cm$^{-2}$ eV$^{-1}$]")
if Egrid is not None:
ax = figure().gca()
ax.plot(Egrid, marker=".")
# ax.plot(Ematt,marker='.',color='k')
ax.set_yscale("log")
ax.set_ylabel("eV")
ax.legend(["E1", "E2", "pr1", "pr2"], loc="best")
def makebin(Egrid: np.ndarray):
E1 = Egrid[:, 0]
E2 = Egrid[:, 1]
pr1 = Egrid[:, 2]
pr2 = Egrid[:, 3]
dE = E2 - E1
Esum = E2 + E1
flux = flux0 / 0.5 / Esum / dE
Elow = E1 - 0.5 * (E1 - pr1)
Ehigh = E2 - 0.5 * (E2 - pr2)
E = np.column_stack((Elow, Ehigh, flux))
Ed = xarray.DataArray(data=E, dims=["energy", "type"])
Ed["type"] = ["low", "high", "flux"]
return Ed
| gpl-3.0 |
DamCB/tyssue | tests/generation/test_generation.py | 2 | 4813 | from scipy.spatial import Voronoi
from tyssue import generation
from tyssue.core.sheet import Sheet
from tyssue.generation import hexa_grid3d, from_3d_voronoi
from tyssue.generation import hexa_grid2d, from_2d_voronoi
from tyssue.generation import extrude, subdivide_faces
from tyssue import Epithelium, BulkGeometry, config
from tyssue.core.sheet import get_opposite
from pytest import raises
import numpy as np
import pandas as pd
def test_3faces():
datasets, _ = generation.three_faces_sheet()
assert datasets["edge"].shape[0] == 18
assert datasets["face"].shape[0] == 3
assert datasets["vert"].shape[0] == 13
def test_from_3d_voronoi():
grid = hexa_grid3d(6, 4, 3)
datasets = from_3d_voronoi(Voronoi(grid))
assert datasets["vert"].shape[0] == 139
assert datasets["edge"].shape[0] == 1272
assert datasets["face"].shape[0] == 282
assert datasets["cell"].shape[0] == 70
bulk = Epithelium("bulk", datasets, config.geometry.bulk_spec())
bulk.reset_index()
bulk.reset_topo()
BulkGeometry.update_all(bulk)
bulk.sanitize()
# GH 137
assert (
bulk.edge_df.groupby("face").apply(lambda df: df["cell"].unique().size).max()
== 1
)
assert bulk.validate()
def test_from_2d_voronoi():
grid = hexa_grid2d(6, 4, 1, 1)
datasets = from_2d_voronoi(Voronoi(grid))
assert datasets["vert"].shape[0] == 32
assert datasets["edge"].shape[0] == 82
assert datasets["face"].shape[0] == 24
def test_extrude():
datasets, specs = generation.three_faces_sheet()
sheet = Sheet("test", datasets, specs)
extruded = extrude(sheet.datasets, method="translation")
assert extruded["cell"].shape[0] == 3
assert extruded["face"].shape[0] == 24
assert extruded["edge"].shape[0] == 108
assert extruded["vert"].shape[0] == 26
def test_subdivide():
datasets, specs = generation.three_faces_sheet()
sheet = Sheet("test", datasets, specs)
subdivided = subdivide_faces(sheet, [0])
assert subdivided["face"].shape[0] == 3
assert subdivided["edge"].shape[0] == 30
assert subdivided["vert"].shape[0] == 14
datasets_3d = extrude(datasets, method="translation")
sheet_3d = Sheet("test3d", datasets_3d, specs)
subdivided_3d = subdivide_faces(sheet_3d, [0])
assert subdivided_3d["face"].shape[0] == 24
assert subdivided_3d["edge"].shape[0] == 120
assert subdivided_3d["vert"].shape[0] == 27
assert subdivided_3d["cell"].shape[0] == 3
def test_extrude_invalid_method():
datasets, specs = generation.three_faces_sheet()
with raises(ValueError):
datasets_3d = extrude(datasets, method="invalid_method")
def test_hexagrid3d_noise():
np.random.seed(1)
grid = hexa_grid3d(6, 4, 3, noise=0.1)
datasets = from_3d_voronoi(Voronoi(grid))
assert datasets["vert"].shape[0] == 318
assert datasets["edge"].shape[0] == 3300
assert datasets["face"].shape[0] == 670
assert datasets["cell"].shape[0] == 72
def test_anchors():
datasets, specs = generation.three_faces_sheet()
sheet = Sheet("test_anchors", datasets, specs)
sheet.edge_df["opposite"] = get_opposite(sheet.edge_df)
expected_dict = {
18: [1, 13],
19: [2, 14],
20: [3, 15],
21: [4, 16],
22: [5, 17],
23: [6, 18],
24: [7, 19],
25: [8, 20],
26: [9, 21],
27: [10, 22],
28: [11, 23],
29: [12, 24],
}
expected_res = pd.DataFrame.from_dict(expected_dict, orient="index")
expected_res.columns = ["srce", "trgt"]
generation.create_anchors(sheet)
res_srce_trgt_anchors = sheet.edge_df.loc[18:, ["srce", "trgt"]]
assert res_srce_trgt_anchors.equals(expected_res)
def test_extract():
datasets, specs = generation.three_faces_sheet()
sheet = Sheet("test_sheet_extract_coordinate", datasets, specs)
sheet.face_df.loc[0, "is_alive"] = 0
subsheet = sheet.extract("is_alive")
assert subsheet.face_df["is_alive"].all()
assert subsheet.Nf == 2
def test_sheet_extract_coordinate():
grid = hexa_grid2d(6, 4, 3, 3)
datasets = from_2d_voronoi(Voronoi(grid))
sheet = Sheet("test_extract_bounding_box", datasets)
subsheet = sheet.extract_bounding_box(
[sheet.face_df["x"].min(), sheet.face_df["x"].max() / 2],
[sheet.face_df["y"].min(), sheet.face_df["y"].max() / 2],
)
assert subsheet.face_df["x"].max() <= sheet.face_df["x"].max() / 2
assert subsheet.face_df["x"].min() >= sheet.face_df["x"].min()
assert subsheet.face_df["y"].max() <= sheet.face_df["y"].max() / 2
assert subsheet.face_df["y"].min() >= sheet.face_df["y"].min()
assert subsheet.face_df["z"].max() <= sheet.face_df["z"].max()
assert subsheet.face_df["z"].min() >= sheet.face_df["z"].min()
| gpl-3.0 |
shusenl/scikit-learn | sklearn/metrics/classification.py | 95 | 67713 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
jwheatp/eiffelometre | train.py | 1 | 1365 | # imports
import pandas as pd
import numpy as np
from sklearn.svm import SVC
from sklearn.cross_validation import KFold
# import data
# it's a small dataset so we can load it completely
df = pd.read_csv('db_june_dayhourcount', sep=",")
# weather data
df_weather = pd.read_csv('db_june_weather', sep=",")
weather = []
for row in df_weather.iterrows() :
index, data = row
weather.extend(data.tolist())
# insert weather column in dataframe
df["weather"] = weather
# create two variables X (model input) and y (model output) for the model
X = df[["weekday","hour","weather"]].as_matrix()
y = df[["count"]].as_matrix()
y = np.ravel(y)
y = y.astype(float)
# normalize y between 0 and 1 (strictly)
y = (y-min(y))/(max(y)+1-min(y))
# create bins for a discrete frequentation scale
bins_5 = np.array([0,0.2,0.4,0.6,0.8,1])
bins_4 = np.array([0,0.25,0.5,0.75,1])
bins_3 = np.array([0,0.33,0.66,1])
# here we use bins_5
y = np.digitize(y, bins_5)
n = len(y)
# SVM/SVC model
clf = SVC()
# use 5-fold cross-validation to test the model accuracy
kf = KFold(n, n_folds=5, shuffle=True)
scores = []
for train, test in kf:
X_train = [X[i] for i in train]
y_train = [y[i] for i in train]
clf.fit(X_train,y_train)
X_test = [X[i] for i in test]
y_test = [y[i] for i in test]
scores.append(clf.score(X_test,y_test))
# print main accuracy
print(np.mean(scores))
| mit |
gam-ba/aspiradora-analitica | procesamiento_bd.py | 1 | 1565 | import datetime as dt
import numpy as np
import pandas as pd
import re
from string import punctuation
df = pd.read_csv('nombre_de_archivo', sep='\t', encoding='utf-8')
df['time'] = df['date'].str.extract('(\d\d:\d\d:\d\d)') # Limpia los datos de fecha y hora con regex (porque la api los devuelve sucios).
df['date'] = df['date'].str.extract('(\d\d\d\d-\d\d-\d\d)')
df['date'] = pd.to_datetime(df.date).dt.date
df['time'] = pd.to_datetime(df.time).dt.time
df['word_count'] = df.comments_list.apply(lambda x: len(re.findall(r'\w+', x))) # Cuenta la cantidad de palabras del comentario (una especie de "medida de complejidad").
df['weeks_cat'] = df.date.apply(lambda x: int((x-df.date.min()).days/7+1)) # Transforma la fecha en número de semana desde el primer comentario.
df['comments_clean'] = df.comments_list.apply(lambda x: ''.join(c for c in x if c not in punctuation).lower()) # Genera una columna de comentarios "limpios": sin puntuación y en minúscula (sería mejor sacar también tildes).
df = df[['date', 'time', 'weeks_cat', 'comments_list', 'comments_clean', 'likes', 'word_count']]
df = df.sort_values(by=['date','time'], ascending=[True, True]).reset_index() # Ordena cronológicamente la BD, a nivel de segundos.
df.to_csv('nombre_de_archivo_procesado', sep='\t', encoding='utf-8', index=False) # Se guarda en un CSV para retomar en la siguiente etapa.
| mit |
Statoil/libres | python/res/enkf/export/summary_observation_collector.py | 2 | 2313 | from pandas import DataFrame, MultiIndex
import numpy
from res.enkf import ErtImplType, EnKFMain, EnkfFs, RealizationStateEnum, EnkfObservationImplementationType
from res.enkf.key_manager import KeyManager
from res.enkf.plot_data import EnsemblePlotData
from ecl.util.util import BoolVector
class SummaryObservationCollector(object):
@staticmethod
def getAllObservationKeys(ert):
"""
@type ert: EnKFMain
@rtype: list of str
"""
key_manager = KeyManager(ert)
return key_manager.summaryKeysWithObservations()
@staticmethod
def loadObservationData(ert, case_name, keys=None):
"""
@type ert: EnKFMain
@type case_name: str
@type keys: list of str
@rtype: DataFrame
"""
fs = ert.getEnkfFsManager().getFileSystem(case_name)
time_map = fs.getTimeMap()
dates = [time_map[index].datetime() for index in range(1, len(time_map))]
summary_keys = SummaryObservationCollector.getAllObservationKeys(ert)
if keys is not None:
summary_keys = [key for key in keys if key in summary_keys] # ignore keys that doesn't exist
columns = summary_keys
std_columns = ["STD_%s" % key for key in summary_keys]
df = DataFrame(index=dates, columns=columns + std_columns)
for key in summary_keys:
observation_keys = ert.ensembleConfig().getNode(key).getObservationKeys()
for obs_key in observation_keys:
observations = ert.getObservations()
observation_data = observations[obs_key]
history_length = ert.getHistoryLength()
for index in range(0, history_length):
if observation_data.isActive(index):
obs_time = observations.getObservationTime(index).datetime()
node = observation_data.getNode(index)
value = node.getValue()
std = node.getStandardDeviation()
df[key][obs_time] = value
df["STD_%s" % key][obs_time] = std
return df
@classmethod
def summaryKeyHasObservations(cls, ert, key):
return len(ert.ensembleConfig().getNode(key).getObservationKeys()) > 0
| gpl-3.0 |
DESatAPSU/DAWDs | python/reformat_mags_to_csv_DES.py | 1 | 2771 | #!/usr/bin/env python
# This script takes the DES synthetic photometry calcphot results (fits files) from the WD models and organizes the data in a CSV file.
# This should be run in a directory contaning subdirectores of the WDs organized by name. For example:
# jacobs-air-2:DirContainingSubDirectories jacob$ ls
# SSSJ0005-0127 SSSJ0057-4914 SSSJ0206-4159 SSSJ0243p0119 SSSJ0515-3224
# This script is for the 20170515 set, modification is required on lines 17,
import numpy as np
import pandas as pd
import pyfits
import glob
import os
import glob
dirNameList = glob.glob('SSSJ?????????')
for dirName in dirNameList:
print dirName
os.chdir(dirName)
filenameList = sorted(glob.glob('*.fits'))
starnameList = [(x.split('.ccd')[0])[4:] for x in filenameList]
output = [x.split('_') for x in starnameList]
outputFileString = output[0][0] + ".mags.csv"
allcatdf = pd.DataFrame()
for i in range(len(filenameList)):
filename = filenameList[i]
starname = starnameList[i]
hdulist = pyfits.open(filename)
tbdata = hdulist[1].data
hdulist.close()
fnameList = tbdata['OBSMODE'].tolist()
abmagList = tbdata['COUNTRATE'].tolist()
filterList = [ (os.path.split(fname)[1]).split('_')[0] for fname in fnameList ]
ccdList = [ int(((os.path.split(fname)[1]).split('.')[1]).split('ccd')[1]) for fname in fnameList ]
snameList = len(filterList)*[starname]
try:
catdf = pd.DataFrame(np.column_stack([filterList,ccdList,snameList,abmagList]), columns=['BAND', 'CCDNUM', 'STARNAME','ABMAG'])
catdf.CCDNUM = catdf.CCDNUM.astype(int)
catdf.ABMAG = catdf.ABMAG.astype(float)
allcatdf = pd.concat([allcatdf,catdf])
except:
print 'Failed! % Continuing to next filename...' % (filename)
continue
#endfor (i)
allcatdf['STARNAMECCDNUM'] = allcatdf['STARNAME'] + '_xxx_' + allcatdf['CCDNUM'].map(str)
allcatdf.reset_index(drop=True, inplace=True)
allcatdf2 = allcatdf.pivot_table('ABMAG', index='STARNAMECCDNUM', columns=['BAND'], aggfunc=sum)
allcatdf2['STARNAME'] = allcatdf2.index.astype(str)
allcatdf2['STARNAME'] = allcatdf2['STARNAME'].str.split('_xxx_').str.get(0)
allcatdf2['CCDNUM'] = allcatdf2.index.astype(str)
allcatdf2['CCDNUM'] = allcatdf2['CCDNUM'].str.split('_xxx_').str.get(1).astype(int)
cols = ['STARNAME','CCDNUM','u', 'g', 'v', 'r', 'i', 'z', 'y']
allcatdf2 = allcatdf2[cols]
allcatdf2.reset_index(drop=True, inplace=True)
allcatdf2.rename(columns = {'y':'Y', 'v':'VR'}, inplace=True)
allcatdf2.to_csv(outputFileString, index=False)
os.chdir('..')
exit()
| mit |
ssaeger/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
QuLogic/iris | lib/iris/tests/system_test.py | 5 | 3658 | # (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
This system test module is useful to identify if some of the key components required for Iris are available.
The system tests can be run with ``python setup.py test --system-tests``.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before importing anything else
import cf_units
import numpy as np
import iris
import iris.fileformats.netcdf as netcdf
import iris.fileformats.pp as pp
import iris.tests as tests
class SystemInitialTest(tests.IrisTest):
def system_test_supported_filetypes(self):
nx, ny = 60, 60
data = np.arange(nx * ny, dtype='>f4').reshape(nx, ny)
laty = np.linspace(0, 59, ny).astype('f8')
lonx = np.linspace(30, 89, nx).astype('f8')
horiz_cs = lambda : iris.coord_systems.GeogCS(6371229)
cm = iris.cube.Cube(data, 'wind_speed', units='m s-1')
cm.add_dim_coord(
iris.coords.DimCoord(laty, 'latitude', units='degrees',
coord_system=horiz_cs()),
0)
cm.add_dim_coord(
iris.coords.DimCoord(lonx, 'longitude', units='degrees',
coord_system=horiz_cs()),
1)
cm.add_aux_coord(iris.coords.AuxCoord(np.array([9], 'i8'),
'forecast_period', units='hours'))
hours_since_epoch = cf_units.Unit('hours since epoch',
cf_units.CALENDAR_GREGORIAN)
cm.add_aux_coord(iris.coords.AuxCoord(np.array([3], 'i8'),
'time', units=hours_since_epoch))
cm.add_aux_coord(iris.coords.AuxCoord(np.array([99], 'i8'),
long_name='pressure', units='Pa'))
filetypes = ('.nc', '.pp')
if tests.GRIB_AVAILABLE:
filetypes += ('.grib2',)
for filetype in filetypes:
saved_tmpfile = iris.util.create_temp_filename(suffix=filetype)
iris.save(cm, saved_tmpfile)
new_cube = iris.load_cube(saved_tmpfile)
self.assertCML(new_cube,
('system',
'supported_filetype_%s.cml' % filetype))
@tests.skip_grib
def system_test_grib_patch(self):
import gribapi
gm = gribapi.grib_new_from_samples("GRIB2")
result = gribapi.grib_get_double(gm, "missingValue")
new_missing_value = 123456.0
gribapi.grib_set_double(gm, "missingValue", new_missing_value)
new_result = gribapi.grib_get_double(gm, "missingValue")
self.assertEqual(new_result, new_missing_value)
def system_test_imports_general(self):
if tests.MPL_AVAILABLE:
import matplotlib
import netCDF4
if __name__ == '__main__':
tests.main()
| gpl-3.0 |
jchodera/assaytools | scripts/xml2png_spectra.py | 1 | 6250 | # This script takes xml data file output from the Tecan Infinite m1000 Pro plate reader
# and makes quick and dirty images of the raw data.
# But with scans and not just singlet reads.
# This script specifically combines four spectrum scripts (AB, CD, EF, GH) into a single dataframe and plot.
# The same procedure can be used to make matrices suitable for analysis using
# matrix = dataframe.values
# Made by Sonya Hanson, with some help from things that worked in xml2png.py and xml2png4scans.py
# Friday, November 18,2015
# Usage: python xml2png4scans-spectra.py *.xml
############ For future to combine with xml2png.py
#
# for i, sect in enumerate(Sections):
# reads = sect.xpath("*/Well")
# parameters = root.xpath(path)[0]
# if reads[0].attrib['Type'] == "Scan":
#
##############
import matplotlib.pyplot as plt
from lxml import etree
import pandas as pd
import matplotlib.cm as cm
import seaborn
import sys
import os
### Define extract function that extracts parameters
def extract(taglist):
result = []
for p in taglist:
print "Attempting to extract tag '%s'..." % p
try:
param = parameters.xpath("*[@Name='" + p + "']")[0]
result.append( p + '=' + param.attrib['Value'])
except:
### tag not found
result.append(None)
return result
### Define an initial set of dataframes, one per each section
large_dataframe0 = pd.DataFrame()
large_dataframe1 = pd.DataFrame()
large_dataframe2 = pd.DataFrame()
large_dataframe3 = pd.DataFrame()
large_dataframe4 = pd.DataFrame()
def process_files(xml_files):
"""
Main entry point.
"""
### Define xml files.
xml_files = sys.argv[1:]
so_many = len(xml_files)
print "****This script is about to make png files for %s xml files. ****" % so_many
for file in xml_files:
### Parse XML file.
root = etree.parse(file)
### Remove extension from xml filename.
file_name = os.path.splitext(file)[0]
### Extract plate type and barcode.
plate = root.xpath("/*/Header/Parameters/Parameter[@Name='Plate']")[0]
plate_type = plate.attrib['Value']
try:
bar = root.xpath("/*/Plate/BC")[0]
barcode = bar.text
except:
barcode = 'no barcode'
### Define Sections.
Sections = root.xpath("/*/Section")
much = len(Sections)
print "****The xml file " + file + " has %s data sections:****" % much
for sect in Sections:
print sect.attrib['Name']
for i, sect in enumerate(Sections):
### Extract Parameters for this section.
path = "/*/Section[@Name='" + sect.attrib['Name'] + "']/Parameters"
parameters = root.xpath(path)[0]
### Parameters are extracted slightly differently depending on Absorbance or Fluorescence read.
# Attach these to title1, title2, or title3, depending on section which will be the same for all 4 files.
if parameters[0].attrib['Value'] == "Absorbance":
result = extract(["Mode", "Wavelength Start", "Wavelength End", "Wavelength Step Size"])
globals()["title"+str(i)] = '%s, %s, %s, %s' % tuple(result)
else:
result = extract(["Gain", "Excitation Wavelength", "Emission Wavelength", "Part of Plate", "Mode"])
globals()["title"+str(i)] = '%s, %s, %s, \n %s, %s' % tuple(result)
print "****The %sth section has the parameters:****" %i
print globals()["title"+str(i)]
### Extract Reads for this section.
Sections = root.xpath("/*/Section")
reads = root.xpath("/*/Section[@Name='" + sect.attrib['Name'] + "']/*/Well")
wellIDs = [read.attrib['Pos'] for read in reads]
data = [(s.text, float(s.attrib['WL']), r.attrib['Pos'])
for r in reads
for s in r]
dataframe = pd.DataFrame(data, columns=['fluorescence','wavelength (nm)','Well'])
### dataframe_rep replaces 'OVER' (when fluorescence signal maxes out) with '3289277', an arbitrarily high number
dataframe_rep = dataframe.replace({'OVER':'3289277'})
dataframe_rep[['fluorescence']] = dataframe_rep[['fluorescence']].astype('float')
### Create large_dataframe1, large_dataframe2, and large_dataframe3 that collect data for each section
### as we run through cycle through sections and files.
globals()["dataframe_pivot"+str(i)] = pd.pivot_table(dataframe_rep, index = 'wavelength (nm)', columns= ['Well'])
print 'The max fluorescence value in this dataframe is %s'% globals()["dataframe_pivot"+str(i)].values.max()
globals()["large_dataframe"+str(i)] = pd.concat([globals()["large_dataframe"+str(i)],globals()["dataframe_pivot"+str(i)]])
### Plot, making a separate png for each section.
for i, sect in enumerate(Sections):
section_name = sect.attrib['Name']
path = "/*/Section[@Name='" + sect.attrib['Name'] + "']/Parameters"
parameters = root.xpath(path)[0]
if parameters[0].attrib['Value'] == "Absorbance":
section_ylim = [0,0.2]
else:
section_ylim = [0,40000]
Alphabet = ['A','B','C','D','E','F','G','H']
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(12, 12))
for j,A in enumerate(Alphabet):
for k in range(1,12):
try:
globals()["large_dataframe"+str(i)].fluorescence.get(A + str(k)).plot(ax=axes[(j/3)%3,j%3], title=A, c=cm.hsv(k*15), ylim=section_ylim, xlim=[240,800])
except:
print "****No row %s.****" %A
fig.suptitle('%s \n %s \n Barcode = %s' % (globals()["title"+str(i)], plate_type, barcode), fontsize=14)
fig.subplots_adjust(hspace=0.3)
plt.savefig('%s_%s.png' % (file_name, section_name))
return
def entry_point():
xml_files = sys.argv[1:]
process_files(xml_files)
if __name__ == '__main__':
xml_files = sys.argv[1:]
process_files(xml_files)
| lgpl-2.1 |
sambiak/recommandation-film | compte rendu/stat.py | 1 | 1033 | from math import *
import numpy as np
from movielens import *
import matplotlib.pyplot as plt
#renvoi une liste donnant le nombre de film vu par chaque utilisateur
def nbre_de_film_vu_par_utilisateur(array):
film_vu=[]
for i in range(670):
k=0
for j in array[i,:]:
if not math.isnan(j) :
k+=1
film_vu+=[k]
return film_vu
#retourne une liste contenant le nombre de fois qu'a ete note chaque film
def nbre_de_note_par_film(array):
film_note=[]
for i in range(9125) :
k=0
for j in array[:,i]:
if not math.isnan(j):
k+=1
film_note+=[k]
return film_note
#donne le pourcentage de NaN pour le cas de notre tableau des notes(98,4%)
def pourcentage_de_nan(array):
k=0
for i in range(9125):
for j in array[:,i]:
if not math.isnan(j) :
k+=1
return k/(9125*670)
#la personne qui a note le plus de film en a note 2390
#le film qui a le plus de note en a 339
| gpl-3.0 |
cauchycui/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
jrmontag/Data-Science-45min-Intros | support-vector-machines-101/rbf-circles.py | 26 | 1504 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
import sys
import json
import numpy as np
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError as e:
sys.stderr.write("seaborn not installed. Using default matplotlib templates.")
from sklearn.svm import SVC
from sklearn.datasets import make_circles
# adapted from:
# http://scikit-learn.org/stable/auto_examples/svm/plot_svm_kernels.html
# http://scikit-learn.org/stable/auto_examples/decomposition/plot_kernel_pca.html
xx, yy = make_circles(n_samples=500, factor=0.1, noise=0.15)
clf = SVC(kernel='rbf')
clf.fit(xx, yy)
plt.figure(figsize=(8,6))
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
facecolors='none', zorder=10, s=300)
plt.scatter(xx[:, 0], xx[:, 1], c=yy, zorder=10, cmap=plt.cm.Paired, s=100)
#plt.scatter(xx[:, 0], xx[:, 1], c=yy, zorder=10, s=100)
plt.axis('tight')
x_min = -1.5
x_max = 1.5
y_min = -1.5
y_max = 1.5
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
#plt.figure(fignum, figsize=(4, 3))
#plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.pcolormesh(XX, YY, Z > 0, alpha=0.1)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.title('rbf kernel')
plt.show()
| unlicense |
norheim/pextant | pextant/lib/geopoint_test.py | 2 | 1383 | from geoshapely import *
import json
import pandas as pd
pd.options.display.max_rows = 5
latlong = LatLon()
origin = GeoPoint(latlong, 43.461621,-113.572019)
origincopy = GeoPolygon(latlong, [43.461621, 43.461622, 43.461622],[-113.572019,-113.572010, -113.572010])
with open('waypoints/MD10_EVA10_Stn18_Stn23_X.json') as data_file:
data = json.load(data_file)
ways_and_segments = data['sequence']
s = pd.DataFrame(ways_and_segments)
waypoints = s[s['type']=='Station']['geometry']
w = waypoints.values.tolist()
latlongFull = pd.DataFrame(w)
latlongInter = latlongFull['coordinates'].values.tolist()
waypointslatlong = pd.DataFrame(latlongInter, columns=['longitude','latitude'])
print waypointslatlong['latitude'].values, waypointslatlong['longitude'].values
waypoints = GeoPolygon(latlong, waypointslatlong['latitude'].values, waypointslatlong['longitude'].values)
from EnvironmentalModel import *
info = loadElevationsLite("maps/hwmidres.tif")
nw_corner = GeoPoint(UTM(info["zone"]), info["nw_easting"], info["nw_northing"])
print nw_corner
XY = Cartesian(nw_corner, info["resolution"])
se_corner = GeoPoint(XY, info["width"], info["height"])
print se_corner
corners = LineString([(p.x, p.y) for p in [nw_corner, se_corner]])
bounds = corners.envelope
print bounds
zoomarea = waypoints.envelope
print zoomarea
intersection = bounds.intersection(zoomarea)
print intersection | mit |
philippjfr/bokeh | bokeh/core/property/containers.py | 2 | 14930 | ''' Provide special versions of list and dict, that can automatically notify
about changes when used for property values.
Mutations to these values are detected, and the properties owning the
collection is notified of the changes. Consider the following model
definition:
.. code-block:: python
class SomeModel(Model):
options = List(String)
If we have an instance of this model, ``m`` then we can set the entire
value of the ``options`` property at once:
.. code-block:: python
m.options = ["foo", "bar"]
When we do this in the context of a Bokeh server application that is being
viewed in a browser, this change is automatically noticed, and the
corresponding BokehJS property in the browser is synchronized, possibly
causing some change in the visual state of the application in the browser.
But it is also desirable that changes *inside* the ``options`` list also
be detected. That is, the following kinds of operations should also be
automatically synchronized between BokehJS and a Bokeh server:
.. code-block:: python
m.options.append("baz")
m.options[2] = "quux"
m.options.insert(0, "bar")
The classes in this module provide this functionality.
.. note::
These classes form part of the very low-level machinery that implements
the Bokeh model and property system. It is unlikely that any of these
classes or their methods will be applicable to any standard usage or to
anyone who is not directly developing on Bokeh's own infrastructure.
'''
from __future__ import absolute_import, print_function
from ...util.dependencies import import_optional
pd = import_optional('pandas')
def notify_owner(func):
''' A decorator for mutating methods of property container classes
that notifies owners of the property container about mutating changes.
Args:
func (callable) : the container method to wrap in a notification
Returns:
wrapped method
Examples:
A ``__setitem__`` could be wrapped like this:
.. code-block:: python
# x[i] = y
@notify_owner
def __setitem__(self, i, y):
return super(PropertyValueDict, self).__setitem__(i, y)
The returned wrapped method will have a docstring indicating what
original method it is wrapping.
'''
def wrapper(self, *args, **kwargs):
old = self._saved_copy()
result = func(self, *args, **kwargs)
self._notify_owners(old)
return result
wrapper.__doc__ = "Container method ``%s`` instrumented to notify property owners" % func.__name__
return wrapper
class PropertyValueContainer(object):
''' A base class for property container classes that support change
notifications on mutating operations.
This class maintains an internal list of property owners, and also
provides a private mechanism for methods wrapped with
:func:`~bokeh.core.property_containers.notify_owners` to update
those owners when mutating changes occur.
'''
def __init__(self, *args, **kwargs):
self._owners = set()
super(PropertyValueContainer, self).__init__(*args, **kwargs)
def _register_owner(self, owner, descriptor):
self._owners.add((owner, descriptor))
def _unregister_owner(self, owner, descriptor):
self._owners.discard((owner, descriptor))
def _notify_owners(self, old, hint=None):
for (owner, descriptor) in self._owners:
descriptor._notify_mutated(owner, old, hint=hint)
def _saved_copy(self):
raise RuntimeError("Subtypes must implement this to make a backup copy")
class PropertyValueList(PropertyValueContainer, list):
''' A list property value container that supports change notifications on
mutating operations.
When a Bokeh model has a ``List`` property, the ``PropertyValueLists`` are
transparently created to wrap those values. These ``PropertyValueList``
values are subject to normal property validation. If the property type
``foo = List(Str)`` then attempting to set ``x.foo[0] = 10`` will raise
an error.
Instances of ``PropertyValueList`` can be explicitly created by passing
any object that the standard list initializer accepts, for example:
.. code-block:: python
>>> PropertyValueList([10, 20])
[10, 20]
>>> PropertyValueList((10, 20))
[10, 20]
The following mutating operations on lists automatically trigger
notifications:
.. code-block:: python
del x[y]
del x[i:j]
x += y
x *= y
x[i] = y
x[i:j] = y
x.append
x.extend
x.insert
x.pop
x.remove
x.reverse
x.sort
'''
def __init__(self, *args, **kwargs):
return super(PropertyValueList, self).__init__(*args, **kwargs)
def _saved_copy(self):
return list(self)
# delete x[y]
@notify_owner
def __delitem__(self, y):
return super(PropertyValueList, self).__delitem__(y)
# delete x[i:j]
@notify_owner
def __delslice__(self, i, j):
# Note: this is different py2 vs py3, py3 calls __delitem__ with a
# slice index, and does not have this method at all
return super(PropertyValueList, self).__delslice__(i, j)
# x += y
@notify_owner
def __iadd__(self, y):
return super(PropertyValueList, self).__iadd__(y)
# x *= y
@notify_owner
def __imul__(self, y):
return super(PropertyValueList, self).__imul__(y)
# x[i] = y
@notify_owner
def __setitem__(self, i, y):
return super(PropertyValueList, self).__setitem__(i, y)
# x[i:j] = y
@notify_owner
def __setslice__(self, i, j, y):
# Note: this is different py2 vs py3, py3 calls __setitem__ with a
# slice index, and does not have this method at all
return super(PropertyValueList, self).__setslice__(i, j, y)
@notify_owner
def append(self, obj):
return super(PropertyValueList, self).append(obj)
@notify_owner
def extend(self, iterable):
return super(PropertyValueList, self).extend(iterable)
@notify_owner
def insert(self, index, obj):
return super(PropertyValueList, self).insert(index, obj)
@notify_owner
def pop(self, index=-1):
return super(PropertyValueList, self).pop(index)
@notify_owner
def remove(self, obj):
return super(PropertyValueList, self).remove(obj)
@notify_owner
def reverse(self):
return super(PropertyValueList, self).reverse()
@notify_owner
def sort(self, **kwargs):
return super(PropertyValueList, self).sort(**kwargs)
class PropertyValueDict(PropertyValueContainer, dict):
''' A dict property value container that supports change notifications on
mutating operations.
When a Bokeh model has a ``List`` property, the ``PropertyValueLists`` are
transparently created to wrap those values. These ``PropertyValueList``
values are subject to normal property validation. If the property type
``foo = Dict(Str, Str)`` then attempting to set ``x.foo['bar'] = 10`` will
raise an error.
Instances of ``PropertyValueDict`` can be eplicitly created by passing
any object that the standard dict initializer accepts, for example:
.. code-block:: python
>>> PropertyValueDict(dict(a=10, b=20))
{'a': 10, 'b': 20}
>>> PropertyValueDict(a=10, b=20)
{'a': 10, 'b': 20}
>>> PropertyValueDict([('a', 10), ['b', 20]])
{'a': 10, 'b': 20}
The following mutating operations on dicts automatically trigger
notifications:
.. code-block:: python
del x[y]
x[i] = y
x.clear
x.pop
x.popitem
x.setdefault
x.update
'''
def __init__(self, *args, **kwargs):
return super(PropertyValueDict, self).__init__(*args, **kwargs)
def _saved_copy(self):
return dict(self)
# delete x[y]
@notify_owner
def __delitem__(self, y):
return super(PropertyValueDict, self).__delitem__(y)
# x[i] = y
@notify_owner
def __setitem__(self, i, y):
return super(PropertyValueDict, self).__setitem__(i, y)
@notify_owner
def clear(self):
return super(PropertyValueDict, self).clear()
@notify_owner
def pop(self, *args):
return super(PropertyValueDict, self).pop(*args)
@notify_owner
def popitem(self):
return super(PropertyValueDict, self).popitem()
@notify_owner
def setdefault(self, *args):
return super(PropertyValueDict, self).setdefault(*args)
@notify_owner
def update(self, *args, **kwargs):
return super(PropertyValueDict, self).update(*args, **kwargs)
class PropertyValueColumnData(PropertyValueDict):
''' A property value container for ColumnData that supports change
notifications on mutating operations.
This property value container affords specialized code paths for
updating the .data dictionary for ColumnDataSource. When possible,
more efficient ColumnDataChangedEvent hints are generated to perform
the updates:
.. code-block:: python
x[i] = y
x.update
'''
# x[i] = y
# don't wrap with notify_owner --- notifies owners explicitly
def __setitem__(self, i, y):
return self.update([(i, y)])
# don't wrap with notify_owner --- notifies owners explicitly
def update(self, *args, **kwargs):
old = self._saved_copy()
result = super(PropertyValueDict, self).update(*args, **kwargs)
from ...document.events import ColumnDataChangedEvent
# Grab keys to update according to Python docstring for update([E, ]**F)
#
# If E is present and has a .keys() method, then does: for k in E: D[k] = E[k]
# If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v
# In either case, this is followed by: for k in F: D[k] = F[k]
cols = set(kwargs.keys())
if len(args) == 1:
E = args[0]
if hasattr(E, 'keys'):
cols |= set(E.keys())
else:
cols |= { x[0] for x in E }
# we must loop ourselves here instead of calling _notify_owners
# because the hint is customized for each owner separately
for (owner, descriptor) in self._owners:
hint = ColumnDataChangedEvent(owner.document, owner, cols=list(cols))
descriptor._notify_mutated(owner, old, hint=hint)
return result
# don't wrap with notify_owner --- notifies owners explicitly
def _stream(self, doc, source, new_data, rollover=None, setter=None):
''' Internal implementation to handle special-casing stream events
on ``ColumnDataSource`` columns.
Normally any changes to the ``.data`` dict attribute on a
``ColumnDataSource`` triggers a notification, causing all of the data
to be synchronized between server and clients.
The ``.stream`` method on column data sources exists to provide a
more efficient way to perform streaming (i.e. append-only) updates
to a data source, without having to perform a full synchronization,
which would needlessly re-send all the data.
To accomplish this, this function bypasses the wrapped methods on
``PropertyValueDict`` and uses the unwrapped versions on the dict
superclass directly. It then explicitly makes a notification, adding
a special ``ColumnsStreamedEvent`` hint to the message containing
only the small streamed data that BokehJS needs in order to
efficiently synchronize.
.. warning::
This function assumes the integrity of ``new_data`` has already
been verified.
'''
old = self._saved_copy()
import numpy as np
# pandas/issues/13918
if pd and isinstance(new_data, pd.DataFrame):
new_items = new_data.iteritems()
else:
new_items = new_data.items()
# TODO (bev) Currently this reports old differently for array vs list
# For arrays is reports the actual old value. For lists, the old value
# is actually the already updated value. This is because the method
# self._saved_copy() makes a shallow copy.
for k, v in new_items:
if isinstance(self[k], np.ndarray):
data = np.append(self[k], new_data[k])
if rollover and len(data) > rollover:
data = data[-rollover:]
super(PropertyValueDict, self).__setitem__(k, data)
else:
L = self[k]
L.extend(new_data[k])
if rollover is not None:
del L[:-rollover]
from ...document.events import ColumnsStreamedEvent
self._notify_owners(old,
hint=ColumnsStreamedEvent(doc, source, new_data, rollover, setter))
# don't wrap with notify_owner --- notifies owners explicitly
def _patch(self, doc, source, patches, setter=None):
''' Internal implementation to handle special-casing patch events
on ``ColumnDataSource`` columns.
Normally any changes to the ``.data`` dict attribute on a
``ColumnDataSource`` triggers a notification, causing all of the data
to be synchronized between server and clients.
The ``.patch`` method on column data sources exists to provide a
more efficient way to perform patching (i.e. random access) updates
to a data source, without having to perform a full synchronization,
which would needlessly re-send all the data.
To accomplish this, this function bypasses the wrapped methods on
``PropertyValueDict`` and uses the unwrapped versions on the dict
superclass directly. It then explicitly makes a notification, adding
a special ``ColumnsPatchedEvent`` hint to the message containing
only the small patched data that BokehJS needs in order to efficiently
synchronize.
.. warning::
This function assumes the integrity of ``patches`` has already
been verified.
'''
import numpy as np
old = self._saved_copy()
for name, patch in patches.items():
for ind, value in patch:
if isinstance(ind, (int, slice)):
self[name][ind] = value
else:
shape = self[name][ind[0]][ind[1:]].shape
self[name][ind[0]][ind[1:]] = np.array(value, copy=False).reshape(shape)
from ...document.events import ColumnsPatchedEvent
self._notify_owners(old,
hint=ColumnsPatchedEvent(doc, source, patches, setter))
| bsd-3-clause |
xingniu/nlp-util | probability-histogram.py | 1 | 2748 | #!/usr/bin/env python -*- coding: utf-8 -*-
import numpy as np
import argparse
import utils
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', required=False, help='input file(s) (glob patterns are supported)')
parser.add_argument('-c', '--column', required=False, type=int, default=0, help='the index of column that contains values')
parser.add_argument('-n', '--normalize', required=False, action="store_true", help='normalize scores to [-1,1]')
parser.add_argument('-l', '--lower', required=False, type=float, help='the lower range of bins')
parser.add_argument('-u', '--upper', required=False, type=float, help='the upper range of bins')
parser.add_argument('-b', '--bins', required=False, type=int, default=10, help='the number of bins')
parser.add_argument('-p', '--plot', required=False, action="store_true", help='plot the histogram')
args = parser.parse_args()
scores = []
for line in utils.get_input(args.input):
score = utils.str2float(line.split()[args.column])
if score != None:
scores.append(score)
lower = np.min(scores)
upper = np.max(scores)
magnitude = np.max(np.abs(scores))
norm_lower = -1
norm_upper = 1
if args.lower is not None and args.upper is not None:
if args.normalize:
lower = args.lower * magnitude
upper = args.upper * magnitude
norm_lower = args.lower
norm_upper = args.upper
else:
lower = args.lower
upper = args.upper
if args.normalize:
bin_edges = np.linspace(norm_lower, norm_upper, args.bins+1, endpoint=True)
else:
bin_edges = np.linspace(lower, upper, args.bins+1, endpoint=True)
interval = (upper-lower)*1.0/args.bins
if interval >= 2:
decimal = str(0)
elif interval >= 0.1:
decimal = str(1)
else:
decimal = str(2)
df = "%."+decimal+"f"
bin_labels = [("["+df+","+df+")") % (bin_edges[i], bin_edges[i+1]) for i in range(len(bin_edges)-1)]
bin_labels[-1] = bin_labels[-1][:-1]+"]"
hist = np.histogram(scores, bins=args.bins, range=(lower,upper))
for i in range(len(hist[0])):
print((df+"\t%s\t%.6f") % (bin_edges[i], bin_labels[i], float(hist[0][i])/len(scores)))
print(df % bin_edges[-1])
if args.plot:
import matplotlib.pyplot as plt
width = (bin_edges[1]-bin_edges[0])/2.0
plt.xticks(bin_edges[:-1], bin_labels)
plt.bar(bin_edges[:-1], hist[0]/float(len(scores)), width, align='center', alpha=0.5)
plt.xlim([2*bin_edges[0]-bin_edges[1], bin_edges[-1]])
plt.show()
| mit |
jougs/nest-simulator | pynest/examples/spatial/connex_ew.py | 6 | 2248 | # -*- coding: utf-8 -*-
#
# connex_ew.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
NEST spatial example
Create two populations of iaf_psc_alpha neurons on a 30x30 grid with edge_wrap,
connect with circular mask, flat probability,
visualize.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import matplotlib.pyplot as plt
import numpy as np
import nest
nest.ResetKernel()
pos = nest.spatial.grid(shape=[30, 30], extent=[3., 3.], edge_wrap=True)
#######################################################################
# create and connect two populations
a = nest.Create('iaf_psc_alpha', positions=pos)
b = nest.Create('iaf_psc_alpha', positions=pos)
cdict = {'rule': 'pairwise_bernoulli',
'p': 0.5,
'mask': {'circular': {'radius': 0.5}}}
nest.Connect(a, b,
conn_spec=cdict,
syn_spec={'weight': nest.random.uniform(0.5, 2.)})
plt.clf()
#####################################################################
# plot targets of neurons in different grid locations
# first, clear existing figure, get current figure
plt.clf()
fig = plt.gcf()
# plot targets of two source neurons into same figure, with mask
for src_index in [30 * 15 + 15, 0]:
# obtain node id for center
src = a[src_index:src_index + 1]
nest.PlotTargets(src, b, mask=cdict['mask'], fig=fig)
# beautify
plt.axes().set_xticks(np.arange(-1.5, 1.55, 0.5))
plt.axes().set_yticks(np.arange(-1.5, 1.55, 0.5))
plt.grid(True)
plt.axis([-2.0, 2.0, -2.0, 2.0])
plt.axes().set_aspect('equal', 'box')
plt.title('Connection targets')
plt.show()
# plt.savefig('connex_ew.pdf')
| gpl-2.0 |
RomainBrault/scikit-learn | examples/applications/plot_face_recognition.py | 44 | 5706 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset:
================== ============ ======= ========== =======
precision recall f1-score support
================== ============ ======= ========== =======
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
================== ============ ======= ========== =======
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import fetch_lfw_people
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = PCA(n_components=n_components, svd_solver='randomized',
whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
ssaeger/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 272 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
Code4SA/umibukela | umibukela/importer/mavc/cycle1_2_sassa_paypoint.py | 1 | 6948 | import umibukela.importer.mavc.cycle1_2 as cycle1_2
import uuid
"""
_________ _____ _________ _________ _____
/ _____/ / _ \ / _____// _____/ / _ \
\_____ \ / /_\ \ \_____ \ \_____ \ / /_\ \
/ \/ | \/ \/ \/ | \
/_______ /\____|__ /_______ /_______ /\____|__ / PAYPOINT
\/ \/ \/ \/ \/
- no optional questions
- one "select all that apply" - visit_reason
- map valid answers to new columns val True/False
- remaining columns
- map known responses to cycle2 values
- map known responses to 'other' if in cycle2, otherwise n/a
"""
# In alphabetical order of the original column names according to pandas
#
# 'waiting_group/medicine_time',
columns = [
'yes_no_group/money_lenders',
'yes_no_group/bribe',
'performance_group/queues',
'performance_group/respect',
'performance_group/assistance',
'deductions_agree',
'yes_no_group/safety',
'yes_no_group/documents',
'yes_no_group/complaint',
'yes_no_group/response',
'performance_group/access',
'ease_access',
'transport_amount',
'waiting_time',
'personal_comment',
'clinic_feedback',
'improvements_comment',
'demographics_group/gender',
'demographics_group/age',
'town_village',
'local_office',
'district',
'monitor',
'today',
'visit_reason/care_dependency',
'visit_reason/child_support',
'visit_reason/disability',
'visit_reason/foster_child',
'visit_reason/social_relief',
'visit_reason/old_age',
'visit_reason/war_veterans',
'yes_no_group/deductions',
'performance_group/clean',
]
# change values
# method
# for val in pd.unique(df.where(df['device_id']=='MAVCEC1')['facility'].ravel()):
# print val
#
# deviceid doesn't seem to be fixed to a site
#
# df.where(df['town_village']=='Folweni').replace(inplace=True, to_replace={'facility':{'Clinic':'notclinic'}})
# doesn't seem to work
# for c in df.columns:
# if c.startswith('waiting_group/'):
# print("### %s ###" % c)
# for val in pd.unique(df[c].ravel()):
# print("'%s': ''," % val)
# 'visit_reason': {
# '3 days for infant': 'accompanying',
replacements_all = {
'visit_reason/care_dependency': {
'': 'False',
'Care Dependency Grant': 'True',
},
'visit_reason/child_support': {
'': 'False',
'Child-Support Grant': 'True',
},
'visit_reason/disability': {
'': 'False',
'Disability Grant': 'True',
},
'visit_reason/foster_child': {
'': 'False',
'Foster Child Grant': 'True',
},
'visit_reason/social_relief': {
'': 'False',
'Social relief of distress': 'True',
},
'visit_reason/old_age': {
'': 'False',
'State Old Age Grant': 'True',
},
'visit_reason/war_veterans': {
'': 'False',
'War-Veteran\'s Grant': 'True',
},
'clinic_feedback': {
'Yes': 'yes',
'No': 'no',
'Maybe': 'maybe',
},
'demographics_group/age': {
'26 - 40 years old': '26_40',
'41 - 60 years old': '40_60',
'Older than 60 years': 'older_60',
'Under 25 years old': 'under_25',
},
'demographics_group/gender': {
'Male': 'male',
'Female': 'female',
},
'performance_group/clean': {
'Very poor': '1',
'Poor': '2',
'OK': '3',
'Good': '4',
'Excellent': '5',
'Not applicable': 'n/a',
'': 'n/a',
},
'performance_group/queues': {
'Very poor': '1',
'Poor': '2',
'OK': '3',
'Good': '4',
'Excellent': '5',
'Not applicable': 'n/a',
'': 'n/a',
},
'performance_group/respect': {
'Very poor': '1',
'Poor': '2',
'OK': '3',
'Good': '4',
'Excellent': '5',
'Not applicable': 'n/a',
'': 'n/a',
},
'performance_group/access': {
'Very poor': '1',
'Poor': '2',
'OK': '3',
'Good': '4',
'Excellent': '5',
'Not applicable': 'n/a',
'': 'n/a',
},
'performance_group/assistance': {
'Very poor': '1',
'Poor': '2',
'OK': '3',
'Good': '4',
'Excellent': '5',
'Not applicable': 'n/a',
'': 'n/a',
},
'transport_amount': {
'R11 - R25': 'eleven_twentyfive',
'Nothing': 'nothing',
'R26 - R50': 'twentysix_fifty',
'Less than R10': 'under_ten',
'R51 - R75': 'fiftyone_seventyfive',
'More than R75': 'more_seventyfive',
'': 'n/a',
},
'waiting_time': {
'30 minutes - 1 hour': 'thirty_one',
'1 - 2 hours': 'one_two',
'2 - 4 hours': 'two_four',
'4 - 6 hours': 'four_six',
'Less than 30 minutes': 'under_thirty',
},
'yes_no_group/bribe': {
'Yes': 'yes',
'No': 'no',
'Not applicable': 'n/a',
},
'yes_no_group/documents': {
'Yes': 'yes',
'No': 'no',
'Not applicable': 'n/a',
},
'yes_no_group/money_lenders': {
'Yes': 'yes',
'No': 'no',
'Not applicable': 'n/a',
},
'yes_no_group/complaint': {
'Yes': 'yes',
'No': 'no',
'Not applicable': 'n/a',
},
'yes_no_group/safety': {
'Yes': 'yes',
'No': 'no',
'Not applicable': 'n/a',
'YesNo': 'n/a',
},
'yes_no_group/response': {
'Yes': 'yes',
'No': 'no',
'Not applicable': 'n/a',
'YesNo': 'n/a',
},
'yes_no_group/deductions': {
'Yes': 'yes',
'No': 'no',
'Not applicable': 'n/a',
'NoNot applicable': 'n/a',
},
'deductions_agree': {
'Yes': 'yes',
'No': 'no',
'Not applicable': 'n/a',
},
'ease_access': {
'Easy': 'easy',
'Difficulto': 'difficult',
'Very difficult': 'very_difficult',
},
'clinic_feedback': {
'Yes': 'yes',
'No': 'no',
'Maybe': 'maybe',
},
}
# 'MAVCEC1': {
# 'facility': {
# 'Thabong Clinic': 'thabong',
device_replacements = {
'MAVCCT5': {
'facility': 'montagu',
},
'MAVCCT1': {
'facility': 'kaigarib',
},
}
# 'MAVCEC1': 'Health Citizen Survey MAVCEC1 - Data.csv',
device_files = {
'MAVCCT5': 'SASSA Citizen Survey Pay Point MAVCCT5 - Data.csv',
'MAVCCT1': 'NEW SASSA Citizen Survey Pay Point MAVCCT1 - Data.csv',
}
# [c for c in df2.columns if c.startswith("visit_reason")]
#
# 'visit_reason': ['accompanying',
select_all_that_applies_columns = {
}
def run():
dicts = cycle1_2.run(columns, replacements_all, device_files, device_replacements, select_all_that_applies_columns)
for d in dicts:
d['_uuid'] = str(uuid.uuid4())
return dicts
| mit |
YinongLong/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 143 | 9461 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.model_selection import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
jontyjashan/PiNN_Caffe2 | dc_iv_api.py | 1 | 16120 | import caffe2_paths
import os
import pickle
from caffe2.python import (
workspace, layer_model_helper, schema, optimizer, net_drawer
)
import caffe2.python.layer_model_instantiator as instantiator
import numpy as np
from pinn.pinn_lib import build_pinn, init_model_with_schemas
import pinn.data_reader as data_reader
import pinn.preproc as preproc
import pinn.parser as parser
import pinn.visualizer as visualizer
import pinn.exporter as exporter
# import logging
import matplotlib.pyplot as plt
class DCModel:
def __init__(
self,
model_name,
sig_input_dim=1,
tanh_input_dim=1,
output_dim=1,
):
self.model_name = model_name
self.model = init_model_with_schemas(
model_name, sig_input_dim, tanh_input_dim, output_dim)
self.input_data_store = {}
self.preproc_param = {}
self.net_store = {}
self.reports = {
'epoch':[],
'train_loss':[], 'eval_loss':[],
'train_l1_metric':[], 'eval_l1_metric':[],
'train_scaled_l1_metric':[], 'eval_scaled_l1_metric':[]
}
def add_data(
self,
data_tag,
data_arrays,
preproc_param,
override=True,
):
'''
data_arrays are in the order of sig_input, tanh_input, and label
'''
assert len(data_arrays) == 3, 'Incorrect number of input data'
# number of examples and same length assertion
num_example = len(data_arrays[0])
for data in data_arrays[1:]:
assert len(data) == num_example, 'Mismatch dimensions'
# set default values in preproc_param if not set
preproc_param.setdefault('preproc_slope_vg', -1.0)
preproc_param.setdefault('preproc_threshold_vg', 0.0)
preproc_param.setdefault('preproc_slope_vd', -1.0)
preproc_param.setdefault('preproc_threshold_vd', 0.0)
self.preproc_param = preproc_param
self.pickle_file_name = self.model_name + '_preproc_param' + '.p'
db_name = self.model_name + '_' + data_tag + '.minidb'
if os.path.isfile(db_name):
if override:
print("XXX Delete the old database...")
os.remove(db_name)
os.remove(self.pickle_file_name)
else:
raise Exception('Encounter database with the same name. ' +
'Choose the other model name or set override to True.')
print("+++ Create a new database...")
pickle.dump(
self.preproc_param,
open(self.pickle_file_name, 'wb')
)
preproc_data_arrays = preproc.dc_iv_preproc(
data_arrays[0], data_arrays[1], data_arrays[2],
self.preproc_param['scale'],
self.preproc_param['vg_shift'],
slope_vg=self.preproc_param['preproc_slope_vg'],
thre_vg=self.preproc_param['preproc_threshold_vg'],
slope_vd=self.preproc_param['preproc_slope_vd'],
thre_vd=self.preproc_param['preproc_threshold_vd'],
)
self.preproc_data_arrays=preproc_data_arrays
# Only expand the dim if the number of dimension is 1
preproc_data_arrays = [np.expand_dims(
x, axis=1) if x.ndim == 1 else x for x in preproc_data_arrays]
# Write to database
data_reader.write_db('minidb', db_name, preproc_data_arrays)
self.input_data_store[data_tag] = [db_name, num_example]
def build_nets(
self,
hidden_sig_dims,
hidden_tanh_dims,
train_batch_size=1,
eval_batch_size=1,
weight_optim_method='AdaGrad',
weight_optim_param={'alpha':0.01, 'epsilon':1e-4},
bias_optim_method='AdaGrad',
bias_optim_param={'alpha':0.01, 'epsilon':1e-4},
loss_function='scaled_l1',
max_loss_scale = 1e6,
):
assert len(self.input_data_store) > 0, 'Input data store is empty.'
assert 'train' in self.input_data_store, 'Missing training data.'
self.batch_size = train_batch_size
# Build the date reader net for train net
input_data_train = data_reader.build_input_reader(
self.model,
self.input_data_store['train'][0],
'minidb',
['sig_input', 'tanh_input', 'label'],
batch_size=train_batch_size,
data_type='train',
)
if 'eval' in self.input_data_store:
# Build the data reader net for eval net
input_data_eval = data_reader.build_input_reader(
self.model,
self.input_data_store['eval'][0],
'minidb',
['eval_sig_input', 'eval_tanh_input', 'eval_label'],
batch_size=eval_batch_size,
data_type='eval',
)
# Build the computational nets
# Create train net
self.model.input_feature_schema.sig_input.set_value(
input_data_train[0].get(), unsafe=True)
self.model.input_feature_schema.tanh_input.set_value(
input_data_train[1].get(), unsafe=True)
self.model.trainer_extra_schema.label.set_value(
input_data_train[2].get(), unsafe=True)
self.pred, self.loss = build_pinn(
self.model,
sig_net_dim=hidden_sig_dims,
tanh_net_dim=hidden_tanh_dims,
weight_optim=_build_optimizer(
weight_optim_method, weight_optim_param),
bias_optim=_build_optimizer(
bias_optim_method, bias_optim_param),
loss_function=loss_function,
max_loss_scale=max_loss_scale
)
train_init_net, train_net = instantiator.generate_training_nets(self.model)
workspace.RunNetOnce(train_init_net)
workspace.CreateNet(train_net)
self.net_store['train_net'] = train_net
pred_net = instantiator.generate_predict_net(self.model)
workspace.CreateNet(pred_net)
self.net_store['pred_net'] = pred_net
if 'eval' in self.input_data_store:
# Create eval net
self.model.input_feature_schema.sig_input.set_value(
input_data_eval[0].get(), unsafe=True)
self.model.input_feature_schema.tanh_input.set_value(
input_data_eval[1].get(), unsafe=True)
self.model.trainer_extra_schema.label.set_value(
input_data_eval[2].get(), unsafe=True)
eval_net = instantiator.generate_eval_net(self.model)
workspace.CreateNet(eval_net)
self.net_store['eval_net'] = eval_net
def train_with_eval(
self,
num_epoch=1,
report_interval=0,
eval_during_training=False,
):
''' Fastest mode: report_interval = 0
Medium mode: report_interval > 0, eval_during_training=False
Slowest mode: report_interval > 0, eval_during_training=True
'''
num_batch_per_epoch = int(
self.input_data_store['train'][1] /
self.batch_size
)
if not self.input_data_store['train'][1] % self.batch_size == 0:
num_batch_per_epoch += 1
print('[Warning]: batch_size cannot be divided. ' +
'Run on {} example instead of {}'.format(
num_batch_per_epoch * self.batch_size,
self.input_data_store['train'][1]
)
)
print('<<< Run {} iteration'.format(num_epoch * num_batch_per_epoch))
train_net = self.net_store['train_net']
if report_interval > 0:
print('>>> Training with Reports')
num_eval = int(num_epoch / report_interval)
num_unit_iter = int((num_batch_per_epoch * num_epoch)/num_eval)
if eval_during_training and 'eval_net' in self.net_store:
print('>>> Training with Eval Reports (Slowest mode)')
eval_net = self.net_store['eval_net']
for i in range(num_eval):
workspace.RunNet(
train_net.Proto().name,
num_iter=num_unit_iter
)
self.reports['epoch'].append((i + 1) * report_interval)
train_loss = np.asscalar(schema.FetchRecord(self.loss).get())
self.reports['train_loss'].append(train_loss)
# Add metrics
train_l1_metric = np.asscalar(schema.FetchRecord(
self.model.metrics_schema.l1_metric).get())
self.reports['train_l1_metric'].append(train_l1_metric)
train_scaled_l1_metric = np.asscalar(schema.FetchRecord(
self.model.metrics_schema.scaled_l1_metric).get())
self.reports['train_scaled_l1_metric'].append(
train_scaled_l1_metric)
if eval_during_training and 'eval_net' in self.net_store:
workspace.RunNet(
eval_net.Proto().name,
num_iter=num_unit_iter)
eval_loss = np.asscalar(schema.FetchRecord(self.loss).get())
# Add metrics
self.reports['eval_loss'].append(eval_loss)
eval_l1_metric = np.asscalar(schema.FetchRecord(
self.model.metrics_schema.l1_metric).get())
self.reports['eval_l1_metric'].append(eval_l1_metric)
eval_scaled_l1_metric = np.asscalar(schema.FetchRecord(
self.model.metrics_schema.scaled_l1_metric).get())
self.reports['eval_scaled_l1_metric'].append(
eval_scaled_l1_metric)
else:
print('>>> Training without Reports (Fastest mode)')
workspace.RunNet(
train_net,
num_iter=num_epoch * num_batch_per_epoch
)
print('>>> Saving test model')
exporter.save_net(
self.net_store['pred_net'],
self.model,
self.model_name+'_init',self.model_name+'_predict'
)
# Depreciate
def avg_loss_full_epoch(self, net_name):
num_batch_per_epoch = int(
self.input_data_store['train'][1] /
self.batch_size
)
if not self.input_data_store['train'][1] % self.batch_size == 0:
num_batch_per_epoch += 1
print('[Warning]: batch_size cannot be divided. ' +
'Run on {} example instead of {}'.format(
num_batch_per_epoch * self.batch_size,
self.input_data_store['train'][1]
)
)
# Get the average loss of all data
loss = 0.
for j in range(num_batch_per_epoch):
workspace.RunNet(self.net_store[net_name])
loss += np.asscalar(schema.FetchRecord(self.loss).get())
loss /= num_batch_per_epoch
return loss
def draw_nets(self):
for net_name in self.net_store:
net = self.net_store[net_name]
graph = net_drawer.GetPydotGraph(net.Proto().op, rankdir='TB')
with open(net.Name() + ".png",'wb') as f:
f.write(graph.create_png())
def predict_ids(self, vg, vd):
# preproc the input
vg = vg.astype(np.float32)
vd = vd.astype(np.float32)
if len(self.preproc_param) == 0:
self.preproc_param = pickle.load(
open(self.pickle_file_name, "rb" )
)
dummy_ids = np.zeros(len(vg))
preproc_data_arrays = preproc.dc_iv_preproc(
vg, vd, dummy_ids,
self.preproc_param['scale'],
self.preproc_param['vg_shift'],
slope_vg=self.preproc_param['preproc_slope_vg'],
thre_vg=self.preproc_param['preproc_threshold_vg'],
slope_vd=self.preproc_param['preproc_slope_vd'],
thre_vd=self.preproc_param['preproc_threshold_vd'],
)
_preproc_data_arrays = [np.expand_dims(
x, axis=1) for x in preproc_data_arrays]
workspace.FeedBlob('DBInput_train/sig_input', _preproc_data_arrays[0])
workspace.FeedBlob('DBInput_train/tanh_input', _preproc_data_arrays[1])
pred_net = self.net_store['pred_net']
workspace.RunNet(pred_net)
_ids = np.squeeze(schema.FetchRecord(self.pred).get())
restore_id_func = preproc.get_restore_id_func(
self.preproc_param['scale'],
self.preproc_param['vg_shift'],
slope_vg=self.preproc_param['preproc_slope_vg'],
thre_vg=self.preproc_param['preproc_threshold_vg'],
slope_vd=self.preproc_param['preproc_slope_vd'],
thre_vd=self.preproc_param['preproc_threshold_vd'],
)
ids = restore_id_func(_ids, preproc_data_arrays[0], preproc_data_arrays[1])
return _ids, ids
def plot_loss_trend(self):
plt.plot(
self.reports['epoch'],
self.reports['train_loss'], 'r',
label='train error'
)
plt.plot(
self.reports['epoch'],
self.reports['train_scaled_l1_metric'], 'b',
label='train_scaled_l1_metric'
)
plt.plot(
self.reports['epoch'],
self.reports['train_l1_metric'], 'g',
label='train_l1_metric'
)
if len(self.reports['eval_loss']) > 0:
plt.plot(
self.reports['epoch'],
self.reports['eval_loss'], 'r--',
label='eval error'
)
plt.plot(
self.reports['epoch'],
self.reports['eval_scaled_l1_metric'], 'b--',
label='eval_scaled_l1_metric'
)
plt.plot(
self.reports['epoch'],
self.reports['eval_l1_metric'], 'g--',
label='eval_l1_metric'
)
plt.legend()
#plt.show()
def save_loss_trend(self,save_name):
if len(self.reports['eval_loss'])>0:
f = open(save_name, "w")
f.write("{},{},{},{},{},{},{}\n".format("epoch", "train_loss","eval_loss","train_l1_metric","eval_l1_metric",
"train_scaled_l1_metric","eval_scaled_l1_metric" ))
for x in zip(self.reports['epoch'],self.reports['train_loss'],self.reports['eval_loss'],self.reports['train_l1_metric'],
self.reports['eval_l1_metric'],self.reports['train_scaled_l1_metric'],self.reports['eval_scaled_l1_metric'] ):
f.write("{},{},{},{},{},{},{}\n".format(x[0], x[1], x[2], x[3], x[4], x[5], x[6]))
f.close()
else:
f = open(save_name, "w")
f.write("{},{},{},{}\n".format("epoch", "train_loss","train_l1_metric",
"train_scaled_l1_metric" ))
for x in zip(self.reports['epoch'],self.reports['train_loss'],self.reports['train_l1_metric'],
self.reports['train_scaled_l1_metric'] ):
f.write("{},{},{},{}\n".format(x[0], x[1], x[2], x[3]))
f.close()
def save_loss(self):
epoch = self.reports['epoch'][len(self.reports['epoch'])-1]
train_loss = self.reports['train_loss'][len(self.reports['epoch'])-1]
eval_loss = self.reports['eval_loss'][len(self.reports['epoch'])-1]
train_l1_metric = self.reports['train_l1_metric'][len(self.reports['epoch'])-1]
eval_l1_metric = self.reports['eval_l1_metric'][len(self.reports['epoch'])-1]
train_scaled_l1_metric = self.reports['train_scaled_l1_metric'][len(self.reports['epoch'])-1]
eval_scaled_l1_metric = self.reports['eval_scaled_l1_metric'][len(self.reports['epoch'])-1]
return epoch,train_loss,eval_loss,train_l1_metric,eval_l1_metric,train_scaled_l1_metric,eval_scaled_l1_metric
# --------------------------------------------------------
# ---------------- Global functions -------------------
# --------------------------------------------------------
def predict_ids(model_name, vg, vd):
workspace.ResetWorkspace()
# preproc the input
vg = vg.astype(np.float32)
vd = vd.astype(np.float32)
#if len(self.preproc_param) == 0:
preproc_param = pickle.load(
open(model_name+'_preproc_param.p', "rb" )
)
dummy_ids = np.zeros(len(vg))
preproc_data_arrays = preproc.dc_iv_preproc(
vg, vd, dummy_ids,
preproc_param['scale'],
preproc_param['vg_shift'],
slope_vg=preproc_param['preproc_slope_vg'],
thre_vg=preproc_param['preproc_threshold_vg'],
slope_vd=preproc_param['preproc_slope_vd'],
thre_vd=preproc_param['preproc_threshold_vd'],
)
_preproc_data_arrays = [np.expand_dims(
x, axis=1) for x in preproc_data_arrays]
workspace.FeedBlob('DBInput_train/sig_input', _preproc_data_arrays[0])
workspace.FeedBlob('DBInput_train/tanh_input', _preproc_data_arrays[1])
pred_net = exporter.load_net(model_name+'_init', model_name+'_predict')
#print(type(pred_net.name))
workspace.RunNet(pred_net)
_ids = np.squeeze(workspace.FetchBlob('prediction'))
restore_id_func = preproc.get_restore_id_func(
preproc_param['scale'],
preproc_param['vg_shift'],
slope_vg=preproc_param['preproc_slope_vg'],
thre_vg=preproc_param['preproc_threshold_vg'],
slope_vd=preproc_param['preproc_slope_vd'],
thre_vd=preproc_param['preproc_threshold_vd'],
)
ids = restore_id_func(_ids, preproc_data_arrays[0], preproc_data_arrays[1])
return _ids, ids
def plot_iv(
vg, vd, ids,
vg_comp = None, vd_comp = None, ids_comp = None,
save_name = '',
styles = ['vg_major_linear', 'vd_major_linear', 'vg_major_log', 'vd_major_log']
):
if 'vg_major_linear' in styles:
visualizer.plot_linear_Id_vs_Vd_at_Vg(
vg, vd, ids,
vg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,
save_name = save_name + 'vg_major_linear'
)
if 'vd_major_linear' in styles:
visualizer.plot_linear_Id_vs_Vg_at_Vd(
vg, vd, ids,
vg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,
save_name = save_name + 'vd_major_linear'
)
if 'vg_major_log' in styles:
visualizer.plot_log_Id_vs_Vd_at_Vg(
vg, vd, ids,
vg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,
save_name = save_name + 'vg_major_log'
)
if 'vd_major_log' in styles:
visualizer.plot_log_Id_vs_Vg_at_Vd(
vg, vd, ids,
vg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,
save_name = save_name + 'vd_major_log'
)
def _build_optimizer(optim_method, optim_param):
if optim_method == 'AdaGrad':
optim = optimizer.AdagradOptimizer(**optim_param)
elif optim_method == 'SgdOptimizer':
optim = optimizer.SgdOptimizer(**optim_param)
elif optim_method == 'Adam':
optim = optimizer.AdamOptimizer(**optim_param)
else:
raise Exception(
'Did you foget to implement {}?'.format(optim_method))
return optim
| mit |
capturePointer/vigra | vigranumpy/examples/grid_graph_shortestpath.py | 8 | 3978 | import vigra
import vigra.graphs as vigraph
import pylab
import numpy
np=numpy
import sys
import matplotlib
import pylab as plt
import math
from matplotlib.widgets import Slider, Button, RadioButtons
def makeWeights(gamma):
global hessian,gradmag,gridGraph
print "hessian",hessian.min(),hessian.max()
print "raw ",raw.min(),raw.max()
wImg= numpy.exp((gradmag**0.5)*gamma*-1.0)#**0.5
wImg = numpy.array(wImg).astype(numpy.float32)
w=vigra.graphs.implicitMeanEdgeMap(gridGraph,wImg)
return w
def makeVisuImage(path,img):
coords = (path[:,0],path[:,1])
visuimg =img.copy()
iR=visuimg[:,:,0]
iG=visuimg[:,:,1]
iB=visuimg[:,:,2]
iR[coords]=255
iG[coords]=0
iB[coords]=0
visuimg-=visuimg.min()
visuimg/=visuimg.max()
return visuimg
f = '100075.jpg'
f = '69015.jpg'
#f = "/media/tbeier/GSP1RMCPRFR/iso.03530.png"
img = vigra.impex.readImage(f)
print img.shape
if(img.shape[2]==1):
img = numpy.concatenate([img]*3,axis=2)
imgLab = img
imgLab = vigra.taggedView(imgLab,'xyc')
else:
imgLab = vigra.colors.transform_RGB2Lab(img)
sigma = 1.0
imgLab-=imgLab.min()
imgLab/=imgLab.max()
imgLab*=255
img-=img.min()
img/=img.max()
img*=255
print imgLab.shape
print "interpolate image"
imgLabSmall = imgLab
# make a few edge weights
gradmag = numpy.squeeze(vigra.filters.gaussianGradientMagnitude(imgLabSmall,sigma))
hessian = numpy.squeeze(vigra.filters.hessianOfGaussianEigenvalues(imgLabSmall[:,:,0],sigma))[:,:,0]
hessian-=hessian.min()
raw = 256-imgLabSmall[:,:,0].copy()
gridGraph = vigraph.gridGraph(imgLab.shape[:2],False)
weights = makeWeights(3.0)
pathFinder = vigraph.ShortestPathPathDijkstra(gridGraph)
visuimg =img.copy()
ax = plt.gca()
fig = plt.gcf()
visuimg-=visuimg.min()
visuimg/=visuimg.max()
implot = ax.imshow(numpy.swapaxes(visuimg,0,1),cmap='gray')
clickList=[]
frozen = False
axslider = plt.axes([0.0, 0.00, 0.4, 0.075])
axfreeze = plt.axes([0.6, 0.00, 0.1, 0.075])
axunfreeze = plt.axes([0.8, 0.00, 0.1, 0.075])
bfreeze = Button(axfreeze, 'freeze')
bunfreeze = Button(axunfreeze, 'unfrease and clear')
sgamma = Slider(axslider, 'gamma', 0.01, 5.0, valinit=1.0)
def onclick(event):
global clickList
global weights
global img
if event.xdata != None and event.ydata != None:
xRaw,yRaw = event.xdata,event.ydata
if not frozen and xRaw >=0.0 and yRaw>=0.0 and xRaw<img.shape[0] and yRaw<img.shape[1]:
x,y = long(math.floor(event.xdata)),long(math.floor(event.ydata))
clickList.append((x,y))
if len(clickList)==2:
source = gridGraph.coordinateToNode(clickList[0])
target = gridGraph.coordinateToNode(clickList[1])
weights = makeWeights(sgamma.val)
#path = pathFinder.run(weights, source,target).path(pathType='coordinates')
path = pathFinder.run(weights, source).path(pathType='coordinates',target=target)
visuimg = makeVisuImage(path,img)
implot.set_data(numpy.swapaxes(visuimg,0,1))
plt.draw()
def freeze(event):
global frozen
frozen=True
def unfreeze(event):
global frozen,clickList
frozen=False
clickList = []
def onslide(event):
global img,gradmag,weights,clickList,sgamma
weights = makeWeights(sgamma.val)
print "onslide",clickList
if len(clickList)>=2:
print "we have path"
source = gridGraph.coordinateToNode(clickList[0])
target = gridGraph.coordinateToNode(clickList[1])
path = pathFinder.run(weights, source,target).path(pathType='coordinates')
visuimg = makeVisuImage(path,img)
implot.set_data(numpy.swapaxes(visuimg,0,1))
plt.draw()
bfreeze.on_clicked(freeze)
bunfreeze.on_clicked(unfreeze)
sgamma.on_changed(onslide)
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
| mit |
spallavolu/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 98 | 20870 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
Ziqi-Li/bknqgis | pandas/pandas/core/panel.py | 4 | 55784 | """
Contains data structures designed for manipulating panel (3-dimensional) data
"""
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
import numpy as np
import warnings
from pandas.core.dtypes.cast import (
infer_dtype_from_scalar,
cast_scalar_to_array,
maybe_cast_item)
from pandas.core.dtypes.common import (
is_integer, is_list_like,
is_string_like, is_scalar)
from pandas.core.dtypes.missing import notna
import pandas.core.computation.expressions as expressions
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.core.missing as missing
from pandas import compat
from pandas.compat import (map, zip, range, u, OrderedDict)
from pandas.compat.numpy import function as nv
from pandas.core.common import _try_sort, _default_index
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_objs_combined_axis)
from pandas.io.formats.printing import pprint_thing
from pandas.core.indexing import maybe_droplevels
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.ops import _op_descriptions
from pandas.core.series import Series
from pandas.core.reshape.util import cartesian_product
from pandas.util._decorators import (deprecate, Appender)
_shared_doc_kwargs = dict(
axes='items, major_axis, minor_axis',
klass="Panel",
axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}")
_shared_doc_kwargs['args_transpose'] = ("three positional arguments: each one"
"of\n%s" %
_shared_doc_kwargs['axes_single_arg'])
def _ensure_like_indices(time, panels):
"""
Makes sure that time and panels are conformable
"""
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
def panel_index(time, panels, names=None):
"""
Returns a multi-index suitable for a panel-like DataFrame
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
-------
multi_index : MultiIndex
Time index is the first level, the panels are the second level.
Examples
--------
>>> years = range(1960,1963)
>>> panels = ['A', 'B', 'C']
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),
(1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),
(1962, 'C')], dtype=object)
or
>>> import numpy as np
>>> years = np.repeat(range(1960,1963), 3)
>>> panels = np.tile(['A', 'B', 'C'], 3)
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),
(1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),
(1962, 'C')], dtype=object)
"""
if names is None:
names = ['time', 'panel']
time, panels = _ensure_like_indices(time, panels)
return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)
class Panel(NDFrame):
"""
Represents wide format panel data, stored as 3-dimensional array
Parameters
----------
data : ndarray (items x major x minor), or dict of DataFrames
items : Index or array-like
axis=0
major_axis : Index or array-like
axis=1
minor_axis : Index or array-like
axis=2
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""
@property
def _constructor(self):
return type(self)
_constructor_sliced = DataFrame
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
# deprecation GH13563
warnings.warn("\nPanel is deprecated and will be removed in a "
"future version.\nThe recommended way to represent "
"these types of 3-dimensional data are with a "
"MultiIndex on a DataFrame, via the "
"Panel.to_frame() method\n"
"Alternatively, you can use the xarray package "
"http://xarray.pydata.org/en/stable/.\n"
"Pandas provides a `.to_xarray()` method to help "
"automate this conversion.\n",
DeprecationWarning, stacklevel=3)
self._init_data(data=data, items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=copy, dtype=dtype)
def _init_data(self, data, copy, dtype, **kwargs):
"""
Generate ND initialization; axes are passed
as required objects to __init__
"""
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]
if kwargs:
raise TypeError('_init_data() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axes = None
if isinstance(data, BlockManager):
if any(x is not None for x in passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
elif isinstance(data, dict):
mgr = self._init_dict(data, passed_axes, dtype=dtype)
copy = False
dtype = None
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
elif is_scalar(data) and all(x is not None for x in passed_axes):
values = cast_scalar_to_array([len(x) for x in passed_axes],
data, dtype=dtype)
mgr = self._init_matrix(values, passed_axes, dtype=values.dtype,
copy=False)
copy = False
else: # pragma: no cover
raise ValueError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
def _init_dict(self, data, axes, dtype=None):
haxis = axes.pop(self._info_axis_number)
# prefilter if haxis passed
if haxis is not None:
haxis = _ensure_index(haxis)
data = OrderedDict((k, v)
for k, v in compat.iteritems(data)
if k in haxis)
else:
ks = list(data.keys())
if not isinstance(data, OrderedDict):
ks = _try_sort(ks)
haxis = Index(ks)
for k, v in compat.iteritems(data):
if isinstance(v, dict):
data[k] = self._constructor_sliced(v)
# extract axis for remaining axes & create the slicemap
raxes = [self._extract_axis(self, data, axis=i) if a is None else a
for i, a in enumerate(axes)]
raxes_sm = self._extract_axes_for_slice(self, raxes)
# shallow copy
arrays = []
haxis_shape = [len(a) for a in raxes]
for h in haxis:
v = values = data.get(h)
if v is None:
values = np.empty(haxis_shape, dtype=dtype)
values.fill(np.nan)
elif isinstance(v, self._constructor_sliced):
d = raxes_sm.copy()
d['copy'] = False
v = v.reindex(**d)
if dtype is not None:
v = v.astype(dtype)
values = v.values
arrays.append(values)
return self._init_arrays(arrays, haxis, [haxis] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
return create_block_manager_from_arrays(arrays, arr_names, axes)
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
Panel
"""
from collections import defaultdict
orient = orient.lower()
if orient == 'minor':
new_data = defaultdict(OrderedDict)
for col, df in compat.iteritems(data):
for item, s in compat.iteritems(df):
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
raise ValueError('Orientation must be one of {items, minor}.')
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
ks = list(d['data'].keys())
if not isinstance(d['data'], OrderedDict):
ks = list(sorted(ks))
d[cls._info_axis_name] = Index(ks)
return cls(**d)
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
if isinstance(self._info_axis, MultiIndex):
return self._getitem_multilevel(key)
if not (is_list_like(key) or isinstance(key, slice)):
return super(Panel, self).__getitem__(key)
return self.loc[key]
def _getitem_multilevel(self, key):
info = self._info_axis
loc = info.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_index = info[loc]
result_index = maybe_droplevels(new_index, key)
slices = [loc] + [slice(None) for x in range(self._AXIS_LEN - 1)]
new_values = self.values[slices]
d = self._construct_axes_dict(self._AXIS_ORDERS[1:])
d[self._info_axis_name] = result_index
result = self._constructor(new_values, **d)
return result
else:
return self._get_item_cache(key)
def _init_matrix(self, data, axes, dtype=None, copy=False):
values = self._prep_ndarray(self, data, copy=copy)
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
raise ValueError('failed to cast to %s' % dtype)
shape = values.shape
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
ax = _default_index(shape[i])
else:
ax = _ensure_index(ax)
fixed_axes.append(ax)
return create_block_manager_from_blocks([values], fixed_axes)
# ----------------------------------------------------------------------
# Comparison methods
def _compare_constructor(self, other, func, try_cast=True):
if not self._indexed_same(other):
raise Exception('Can only compare identically-labeled '
'same type objects')
new_data = {}
for col in self._info_axis:
new_data[col] = func(self[col], other[col])
d = self._construct_axes_dict(copy=False)
return self._constructor(data=new_data, **d)
# ----------------------------------------------------------------------
# Magic methods
def __unicode__(self):
"""
Return a string representation for a particular Panel
Invoked by unicode(df) in py2 only.
Yields a Unicode String in both py2/py3.
"""
class_name = str(self.__class__)
shape = self.shape
dims = u('Dimensions: %s') % ' x '.join(
["%d (%s)" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])
def axis_pretty(a):
v = getattr(self, a)
if len(v) > 0:
return u('%s axis: %s to %s') % (a.capitalize(),
pprint_thing(v[0]),
pprint_thing(v[-1]))
else:
return u('%s axis: None') % a.capitalize()
output = '\n'.join(
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
def _get_plane_axes_index(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes indexes
"""
axis_name = self._get_axis_name(axis)
if axis_name == 'major_axis':
index = 'minor_axis'
columns = 'items'
if axis_name == 'minor_axis':
index = 'major_axis'
columns = 'items'
elif axis_name == 'items':
index = 'major_axis'
columns = 'minor_axis'
return index, columns
def _get_plane_axes(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes
"""
return [self._get_axis(axi)
for axi in self._get_plane_axes_index(axis)]
fromDict = from_dict
def to_sparse(self, *args, **kwargs):
"""
NOT IMPLEMENTED: do not call this method, as sparsifying is not
supported for Panel objects and will raise an error.
Convert to SparsePanel
"""
raise NotImplementedError("sparsifying is not supported "
"for Panel objects")
def to_excel(self, path, na_rep='', engine=None, **kwargs):
"""
Write each DataFrame in Panel to a separate excel sheet
Parameters
----------
path : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missing data representation
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
Other Parameters
----------------
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
Notes
-----
Keyword arguments (and na_rep) are passed to the ``to_excel`` method
for each DataFrame written.
"""
from pandas.io.excel import ExcelWriter
if isinstance(path, compat.string_types):
writer = ExcelWriter(path, engine=engine)
else:
writer = path
kwargs['na_rep'] = na_rep
for item, df in self.iteritems():
name = str(item)
df.to_excel(writer, name, **kwargs)
writer.save()
def as_matrix(self):
self._consolidate_inplace()
return self._data.as_matrix()
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, *args, **kwargs):
"""
Quickly retrieve single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
takeable : interpret the passed labels as indexers, default False
Returns
-------
value : scalar value
"""
nargs = len(args)
nreq = self._AXIS_LEN
# require an arg for each axis
if nargs != nreq:
raise TypeError('There must be an argument for each axis, you gave'
' {0} args, but {1} are required'.format(nargs,
nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('get_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
return lower.get_value(*args[1:], takeable=takeable)
def set_value(self, *args, **kwargs):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object
"""
# require an arg for each axis and the value
nargs = len(args)
nreq = self._AXIS_LEN + 1
if nargs != nreq:
raise TypeError('There must be an argument for each axis plus the '
'value provided, you gave {0} args, but {1} are '
'required'.format(nargs, nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('set_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
try:
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
lower.set_value(*args[1:], takeable=takeable)
return self
except KeyError:
axes = self._expand_axes(args)
d = self._construct_axes_dict_from(self, axes, copy=False)
result = self.reindex(**d)
args = list(args)
likely_dtype, args[-1] = infer_dtype_from_scalar(args[-1])
made_bigger = not np.array_equal(axes[0], self._info_axis)
# how to make this logic simpler?
if made_bigger:
maybe_cast_item(result, args[0], likely_dtype)
return result.set_value(*args)
def _box_item_values(self, key, values):
if self.ndim == values.ndim:
result = self._constructor(values)
# a dup selection will yield a full ndim
if result._get_axis(0).is_unique:
result = result[key]
return result
d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])
return self._constructor_sliced(values, **d)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
shape = tuple(self.shape)
if isinstance(value, self._constructor_sliced):
value = value.reindex(
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
if value.shape != shape[1:]:
raise ValueError('shape of value must be {0}, shape of given '
'object was {1}'.format(
shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif is_scalar(value):
mat = cast_scalar_to_array(shape[1:], value)
else:
raise TypeError('Cannot set item of type: %s' % str(type(value)))
mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
_unpickle = com._unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
def conform(self, frame, axis='items'):
"""
Conform input DataFrame to align with chosen axis pair.
Parameters
----------
frame : DataFrame
axis : {'items', 'major', 'minor'}
Axis the input corresponds to. E.g., if axis='major', then
the frame's columns would be items, and the index would be
values of the minor axis
Returns
-------
DataFrame
"""
axes = self._get_plane_axes(axis)
return frame.reindex(**self._extract_axes_for_slice(self, axes))
def head(self, n=5):
raise NotImplementedError
def tail(self, n=5):
raise NotImplementedError
def round(self, decimals=0, *args, **kwargs):
"""
Round each value in Panel to a specified number of decimal places.
.. versionadded:: 0.18.0
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Panel object
See Also
--------
numpy.around
"""
nv.validate_round(args, kwargs)
if is_integer(decimals):
result = np.apply_along_axis(np.round, 0, self.values)
return self._wrap_result(result, axis=0)
raise TypeError("decimals must be an integer")
def _needs_reindex_multi(self, axes, method, level):
""" don't allow a multi reindex on Panel or above ndim """
return False
def align(self, other, **kwargs):
raise NotImplementedError
def dropna(self, axis=0, how='any', inplace=False):
"""
Drop 2D from panel, holding passed axis constant
Parameters
----------
axis : int, default 0
Axis to hold constant. E.g. axis=1 will drop major_axis entries
having a certain amount of NA data
how : {'all', 'any'}, default 'any'
'any': one or more values are NA in the DataFrame along the
axis. For 'all' they all must be.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
dropped : Panel
"""
axis = self._get_axis_number(axis)
values = self.values
mask = notna(values)
for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
if how == 'all':
cond = mask > 0
else:
cond = mask == per_slice
new_ax = self._get_axis(axis)[cond]
result = self.reindex_axis(new_ax, axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def _combine(self, other, func, axis=0):
if isinstance(other, Panel):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif is_scalar(other):
return self._combine_const(other, func)
else:
raise NotImplementedError("%s is not supported in combine "
"operation with %s" %
(str(type(other)), str(type(self))))
def _combine_const(self, other, func, try_cast=True):
with np.errstate(all='ignore'):
new_values = func(self.values, other)
d = self._construct_axes_dict()
return self._constructor(new_values, **d)
def _combine_frame(self, other, func, axis=0, try_cast=True):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
with np.errstate(all='ignore'):
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
return self._constructor(new_values, self.items, self.major_axis,
self.minor_axis)
def _combine_panel(self, other, func, try_cast=True):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
with np.errstate(all='ignore'):
result_values = func(this.values, other.values)
return self._constructor(result_values, items, major, minor)
def major_xs(self, key):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
Notes
-----
major_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of major_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 2)
def minor_xs(self, key):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
Returns
-------
y : DataFrame
index -> major axis, columns -> items
Notes
-----
minor_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of minor_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 1)
def xs(self, key, axis=1):
"""
Return slice of panel along selected axis
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
Returns
-------
y : ndim(self)-1
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
axis = self._get_axis_number(axis)
if axis == 0:
return self[key]
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=False)
result = self._construct_return_type(new_data)
copy = new_data.is_mixed_type
result._set_is_copy(self, copy=copy)
return result
_xs = xs
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
ax = self._get_axis(axis)
key = ax[i]
# xs cannot handle a non-scalar key, so just reindex here
# if we have a multi-index and a single tuple, then its a reduction
# (GH 7516)
if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):
if is_list_like(key):
indexer = {self._get_axis_name(axis): key}
return self.reindex(**indexer)
# a reduction
if axis == 0:
values = self._data.iget(i)
return self._box_item_values(key, values)
# xs by position
self._consolidate_inplace()
new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)
return self._construct_return_type(new_data)
def groupby(self, function, axis='major'):
"""
Group data on given axis, returning GroupBy object
Parameters
----------
function : callable
Mapping function for chosen access
axis : {'major', 'minor', 'items'}, default 'major'
Returns
-------
grouped : PanelGroupBy
"""
from pandas.core.groupby import PanelGroupBy
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
def to_frame(self, filter_observations=True):
"""
Transform wide format into long (stacked) format as DataFrame whose
columns are the Panel's items and whose index is a MultiIndex formed
of the Panel's major and minor axes.
Parameters
----------
filter_observations : boolean, default True
Drop (major, minor) pairs without a complete set of observations
across all the items
Returns
-------
y : DataFrame
"""
_, N, K = self.shape
if filter_observations:
# shaped like the return DataFrame
mask = notna(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
else:
# size = N * K
selector = slice(None, None)
data = {}
for item in self.items:
data[item] = self[item].values.ravel()[selector]
def construct_multi_parts(idx, n_repeat, n_shuffle=1):
axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)
labels = [x[selector] for x in axis_idx.labels]
levels = axis_idx.levels
names = axis_idx.names
return labels, levels, names
def construct_index_parts(idx, major=True):
levels = [idx]
if major:
labels = [np.arange(N).repeat(K)[selector]]
names = idx.name or 'major'
else:
labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
labels = [labels.ravel()[selector]]
names = idx.name or 'minor'
names = [names]
return labels, levels, names
if isinstance(self.major_axis, MultiIndex):
major_labels, major_levels, major_names = construct_multi_parts(
self.major_axis, n_repeat=K)
else:
major_labels, major_levels, major_names = construct_index_parts(
self.major_axis)
if isinstance(self.minor_axis, MultiIndex):
minor_labels, minor_levels, minor_names = construct_multi_parts(
self.minor_axis, n_repeat=N, n_shuffle=K)
else:
minor_labels, minor_levels, minor_names = construct_index_parts(
self.minor_axis, major=False)
levels = major_levels + minor_levels
labels = major_labels + minor_labels
names = major_names + minor_names
index = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
return DataFrame(data, index=index, columns=self.items)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def apply(self, func, axis='major', **kwargs):
"""
Applies function along axis (or axes) of the Panel
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
e.g. if axis = 'items', the combination of major_axis/minor_axis
will each be passed as a Series; if axis = ('items', 'major'),
DataFrames of items & major axis will be passed
axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two
axes
Additional keyword arguments will be passed as keywords to the function
Examples
--------
Returns a Panel with the square root of each element
>>> p = pd.Panel(np.random.rand(4,3,2))
>>> p.apply(np.sqrt)
Equivalent to p.sum(1), returning a DataFrame
>>> p.apply(lambda x: x.sum(), axis=1)
Equivalent to previous:
>>> p.apply(lambda x: x.sum(), axis='minor')
Return the shapes of each DataFrame over axis 2 (i.e the shapes of
items x major), as a Series
>>> p.apply(lambda x: x.shape, axis=(0,1))
Returns
-------
result : Panel, DataFrame, or Series
"""
if kwargs and not isinstance(func, np.ufunc):
f = lambda x: func(x, **kwargs)
else:
f = func
# 2d-slabs
if isinstance(axis, (tuple, list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
# try ufunc like
if isinstance(f, np.ufunc):
try:
with np.errstate(all='ignore'):
result = np.apply_along_axis(func, axis, self.values)
return self._wrap_result(result, axis=axis)
except (AttributeError):
pass
# 1d
return self._apply_1d(f, axis=axis)
def _apply_1d(self, func, axis):
axis_name = self._get_axis_name(axis)
ndim = self.ndim
values = self.values
# iter thru the axes
slice_axis = self._get_axis(axis)
slice_indexer = [0] * (ndim - 1)
indexer = np.zeros(ndim, 'O')
indlist = list(range(ndim))
indlist.remove(axis)
indexer[axis] = slice(None, None)
indexer.put(indlist, slice_indexer)
planes = [self._get_axis(axi) for axi in indlist]
shape = np.array(self.shape).take(indlist)
# all the iteration points
points = cartesian_product(planes)
results = []
for i in range(np.prod(shape)):
# construct the object
pts = tuple([p[i] for p in points])
indexer.put(indlist, slice_indexer)
obj = Series(values[tuple(indexer)], index=slice_axis, name=pts)
result = func(obj)
results.append(result)
# increment the indexer
slice_indexer[-1] += 1
n = -1
while (slice_indexer[n] >= shape[n]) and (n > (1 - ndim)):
slice_indexer[n - 1] += 1
slice_indexer[n] = 0
n -= 1
# empty object
if not len(results):
return self._constructor(**self._construct_axes_dict())
# same ndim as current
if isinstance(results[0], Series):
arr = np.vstack([r.values for r in results])
arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
tranp = np.array([axis] + indlist).argsort()
arr = arr.transpose(tuple(list(tranp)))
return self._constructor(arr, **self._construct_axes_dict())
# ndim-1 shape
results = np.array(results).reshape(shape)
if results.ndim == 2 and axis_name != self._info_axis_name:
results = results.T
planes = planes[::-1]
return self._construct_return_type(results, planes)
def _apply_2d(self, func, axis):
""" handle 2-d slices, equiv to iterating over the other axis """
ndim = self.ndim
axis = [self._get_axis_number(a) for a in axis]
# construct slabs, in 2-d this is a DataFrame result
indexer_axis = list(range(ndim))
for a in axis:
indexer_axis.remove(a)
indexer_axis = indexer_axis[0]
slicer = [slice(None, None)] * ndim
ax = self._get_axis(indexer_axis)
results = []
for i, e in enumerate(ax):
slicer[indexer_axis] = i
sliced = self.iloc[tuple(slicer)]
obj = func(sliced)
results.append((e, obj))
return self._construct_return_type(dict(results))
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if numeric_only:
raise NotImplementedError('Panel.{0} does not implement '
'numeric_only.'.format(name))
axis_name = self._get_axis_name(axis)
axis_number = self._get_axis_number(axis_name)
f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)
with np.errstate(all='ignore'):
result = f(self.values)
axes = self._get_plane_axes(axis_name)
if result.ndim == 2 and axis_name != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
def _construct_return_type(self, result, axes=None):
""" return the type for the ndim of the result """
ndim = getattr(result, 'ndim', None)
# need to assume they are the same
if ndim is None:
if isinstance(result, dict):
ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)
# have a dict, so top-level is +1 dim
if ndim != 0:
ndim += 1
# scalar
if ndim == 0:
return Series(result)
# same as self
elif self.ndim == ndim:
# return the construction dictionary for these axes
if axes is None:
return self._constructor(result)
return self._constructor(result, **self._construct_axes_dict())
# sliced
elif self.ndim == ndim + 1:
if axes is None:
return self._constructor_sliced(result)
return self._constructor_sliced(
result, **self._extract_axes_for_slice(self, axes))
raise ValueError('invalid _construct_return_type [self->%s] '
'[result->%s]' % (self, result))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
axes = self._get_plane_axes(axis)
if result.ndim == 2 and axis != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).rename(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(Panel, self).reindex_axis(labels=labels, axis=axis,
method=method, level=level,
copy=copy, limit=limit,
fill_value=fill_value)
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
# check if a list of axes was passed in instead as a
# single *args element
if (len(args) == 1 and hasattr(args[0], '__iter__') and
not is_string_like(args[0])):
axes = args[0]
else:
axes = args
if 'axes' in kwargs and axes:
raise TypeError("transpose() got multiple values for "
"keyword argument 'axes'")
elif not axes:
axes = kwargs.pop('axes', ())
return super(Panel, self).transpose(*axes, **kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Panel, self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis)
def shift(self, periods=1, freq=None, axis='major'):
"""
Shift index by desired number of periods with an optional time freq.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original. This is different
from the behavior of DataFrame.shift()
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, optional
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
shifted : Panel
"""
if freq:
return self.tshift(periods, freq, axis=axis)
return super(Panel, self).slice_shift(periods, axis=axis)
def tshift(self, periods=1, freq=None, axis='major'):
return super(Panel, self).tshift(periods, freq, axis)
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
Join items with other Panel either on major and minor axes column
Parameters
----------
other : Panel or list of Panels
Index should be similar to one of the columns in this one
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
Returns
-------
joined : Panel
"""
from pandas.core.reshape.concat import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing '
'multiple panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify Panel in place using non-NA values from passed
Panel, or object coercible to Panel. Aligns on items
Parameters
----------
other : Panel, or object coercible to Panel
join : How to join individual DataFrames
{'left', 'right', 'outer', 'inner'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling panel
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : bool
If True, will raise an error if a DataFrame and other both
contain data in the same place.
"""
if not isinstance(other, self._constructor):
other = self._constructor(other)
axis_name = self._info_axis_name
axis_values = self._info_axis
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join, overwrite, filter_func,
raise_conflict)
def _get_join_index(self, other, how):
if how == 'left':
join_major, join_minor = self.major_axis, self.minor_axis
elif how == 'right':
join_major, join_minor = other.major_axis, other.minor_axis
elif how == 'inner':
join_major = self.major_axis.intersection(other.major_axis)
join_minor = self.minor_axis.intersection(other.minor_axis)
elif how == 'outer':
join_major = self.major_axis.union(other.major_axis)
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
# miscellaneous data creation
@staticmethod
def _extract_axes(self, data, axes, **kwargs):
""" return a list of the axis indicies """
return [self._extract_axis(self, data, axis=i, **kwargs)
for i, a in enumerate(axes)]
@staticmethod
def _extract_axes_for_slice(self, axes):
""" return the slice dictionary for these axes """
return dict([(self._AXIS_SLICEMAP[i], a)
for i, a in zip(
self._AXIS_ORDERS[self._AXIS_LEN - len(axes):],
axes)])
@staticmethod
def _prep_ndarray(self, values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
# NumPy strings are a pain, convert to object
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object, copy=True)
else:
if copy:
values = values.copy()
if values.ndim != self._AXIS_LEN:
raise ValueError("The number of dimensions required is {0}, "
"but the number of dimensions of the "
"ndarray given was {1}".format(self._AXIS_LEN,
values.ndim))
return values
@staticmethod
def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned results & indicies
"""
result = dict()
# caller differs dict/ODict, presered type
if isinstance(frames, OrderedDict):
result = OrderedDict()
adj_frames = OrderedDict()
for k, v in compat.iteritems(frames):
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
adj_frames[k] = v
axes = self._AXIS_ORDERS[1:]
axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(
self, adj_frames, axes, intersect=intersect))])
reindex_dict = dict(
[(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])
reindex_dict['copy'] = False
for key, frame in compat.iteritems(adj_frames):
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
result[key] = None
axes_dict['data'] = result
axes_dict['dtype'] = dtype
return axes_dict
@staticmethod
def _extract_axis(self, data, axis=0, intersect=False):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
have_raw_arrays = False
have_frames = False
for v in data.values():
if isinstance(v, self._constructor_sliced):
have_frames = True
elif v is not None:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
index = _get_objs_combined_axis(data.values(), axis=axis,
intersect=intersect)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('ndarrays must match shape on axis %d' % axis)
if have_frames:
if lengths[0] != len(index):
raise AssertionError('Length of data and index must match')
else:
index = Index(np.arange(lengths[0]))
if index is None:
index = Index([])
return _ensure_index(index)
@classmethod
def _add_aggregate_operations(cls, use_numexpr=True):
""" add the operations to the cls; evaluate the doc strings again """
# doc strings substitors
_agg_doc = """
Wrapper method for %%s
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__, cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + "\n"
def _panel_arith_method(op, name, str_rep=None, default_axis=None,
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True,
**eval_kwargs)
except TypeError:
result = op(x, y)
# handles discrepancy between numpy and numexpr on division/mod
# by 0 though, given that these are generally (always?)
# non-scalars, I'm not sure whether it's worth it at the moment
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' panel'
else:
equiv = 'panel ' + op_desc['op'] + ' other'
_op_doc = """
%%s of series and other, element-wise (binary operator `%%s`).
Equivalent to ``%%s``.
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__,
cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + """
See also
--------
""" + cls.__name__ + ".%s\n"
doc = _op_doc % (op_desc['desc'], op_name, equiv,
op_desc['reverse'])
else:
doc = _agg_doc % name
@Appender(doc)
def f(self, other, axis=0):
return self._combine(other, na_op, axis=axis)
f.__name__ = name
return f
# add `div`, `mul`, `pow`, etc..
ops.add_flex_arithmetic_methods(
cls, _panel_arith_method, use_numexpr=use_numexpr,
flex_comp_method=ops._comp_method_PANEL)
Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0,
stat_axis=1, aliases={'major': 'major_axis',
'minor': 'minor_axis'},
slicers={'major_axis': 'index',
'minor_axis': 'columns'})
ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
# legacy
class WidePanel(Panel):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("WidePanel is deprecated. Please use Panel",
FutureWarning, stacklevel=2)
super(WidePanel, self).__init__(*args, **kwargs)
class LongPanel(DataFrame):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("LongPanel is deprecated. Please use DataFrame",
FutureWarning, stacklevel=2)
super(LongPanel, self).__init__(*args, **kwargs)
| gpl-2.0 |
winklerand/pandas | pandas/tests/plotting/test_deprecated.py | 1 | 1535 | # coding: utf-8
import string
import pandas as pd
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import pytest
from numpy.random import randn
import pandas.tools.plotting as plotting
from pandas.tests.plotting.common import TestPlotBase
"""
Test cases for plot functions imported from deprecated
pandas.tools.plotting
"""
@td.skip_if_no_mpl
class TestDeprecatedNameSpace(TestPlotBase):
@pytest.mark.slow
def test_scatter_plot_legacy(self):
tm._skip_if_no_scipy()
df = pd.DataFrame(randn(100, 2))
with tm.assert_produces_warning(FutureWarning):
plotting.scatter_matrix(df)
with tm.assert_produces_warning(FutureWarning):
pd.scatter_matrix(df)
@pytest.mark.slow
def test_boxplot_deprecated(self):
df = pd.DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
with tm.assert_produces_warning(FutureWarning):
plotting.boxplot(df, column=['one', 'two'],
by='indic')
@pytest.mark.slow
def test_radviz_deprecated(self):
df = self.iris
with tm.assert_produces_warning(FutureWarning):
plotting.radviz(frame=df, class_column='Name')
@pytest.mark.slow
def test_plot_params(self):
with tm.assert_produces_warning(FutureWarning):
pd.plot_params['xaxis.compat'] = True
| bsd-3-clause |
oemof/oemof_examples | oemof_examples/oemof.solph/v0.3.x/start_and_shutdown_costs/startup_shutdown.py | 2 | 2806 | # -*- coding: utf-8 -*-
"""
General description
-------------------
Example that illustrates how to model startup and shutdown costs attributed
to a binary flow.
Installation requirements
-------------------------
This example requires the version v0.3.x of oemof. Install by:
pip install 'oemof>=0.3,<0.4'
"""
__copyright__ = "oemof developer group"
__license__ = "GPLv3"
import os
import pandas as pd
import oemof.solph as solph
from oemof.network import Node
from oemof.outputlib import processing, views
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
# read sequence data
full_filename = os.path.join(os.path.dirname(__file__),
'data.csv')
data = pd.read_csv(full_filename, sep=",")
# select periods
periods = len(data)-1
# create an energy system
idx = pd.date_range('1/1/2017', periods=periods, freq='H')
es = solph.EnergySystem(timeindex=idx)
Node.registry = es
# power bus and components
bel = solph.Bus(label='bel')
demand_el = solph.Sink(
label='demand_el',
inputs={bel: solph.Flow(
fixed=True, actual_value=data['demand_el'], nominal_value=10)})
# pp1 and pp2 are competing to serve overall 12 units load at lowest cost
# summed costs for pp1 = 12 * 10 * 10.25 = 1230
# summed costs for pp2 = 4*5 + 4*5 + 12 * 10 * 10 = 1240
# => pp1 serves the load despite of higher variable costs since
# the start and shutdown costs of pp2 change its marginal costs
pp1 = solph.Source(
label='power_plant1',
outputs={bel: solph.Flow(nominal_value=10, variable_costs=10.25)})
# shutdown costs only work in combination with a minimum load
# since otherwise the status variable is "allowed" to be active i.e.
# it permanently has a value of one which does not allow to set the shutdown
# variable which is set to one if the status variable changes from one to zero
pp2 = solph.Source(
label='power_plant2',
outputs={
bel: solph.Flow(
nominal_value=10, min=0.5, max=1.0, variable_costs=10,
nonconvex=solph.NonConvex(startup_costs=5, shutdown_costs=5))})
# create an optimization problem and solve it
om = solph.Model(es)
# debugging
#om.write('problem.lp', io_options={'symbolic_solver_labels': True})
# solve model
om.solve(solver='cbc', solve_kwargs={'tee': True})
# create result object
results = processing.results(om)
# plot data
if plt is not None:
# plot electrical bus
data = views.node(results, 'bel')['sequences']
data[(('bel', 'demand_el'), 'flow')] *= -1
columns = [c for c in data.columns
if not any(s in c for s in ['status', 'startup', 'shutdown'])]
data = data[columns]
ax = data.plot(kind='line', drawstyle='steps-post', grid=True, rot=0)
ax.set_xlabel('Hour')
ax.set_ylabel('P (MW)')
plt.show()
| gpl-3.0 |
tudarmstadt-lt/context-eval | dataset2key.py | 2 | 2341 | from pandas import read_csv
import argparse
import codecs
from eval_lib import get_best_id, format_lexsample, FIELD_NAMES, FIELD_TYPES, LIST_SEP, SCORE_SEP
SEMEVAL_SEP = " "
SEMEVAL_SCORE_SEP = "/"
BEST_SENSE = True
BEST_SENSE_WITH_SCORE = False
def format_score(score):
""" Gets '0:-99.65' and returns '0:10.03' """
label, score = score.split(SCORE_SEP)
score = int(-100000.*(1/float(score)))
return "%s/%d" % (label, score)
def convert_dataset2semevalkey(dataset_fpath, output_fpath, no_header=False):
with codecs.open(output_fpath, "w", encoding="utf-8") as output:
if no_header:
df = read_csv(dataset_fpath, sep='\t', encoding='utf8', header=None, names=FIELD_NAMES,
dtype=FIELD_TYPES, doublequote=False, quotechar='\0')
df.target = df.target.astype(str)
else:
df = read_csv(dataset_fpath, encoding='utf-8', delimiter="\t", error_bad_lines=False,
doublequote=False, quotechar='\0')
for i, row in df.iterrows():
if BEST_SENSE:
predicted_senses = get_best_id(unicode(row.predict_sense_ids))
elif BEST_SENSE_WITH_SCORE:
predicted_senses = format_score(get_best_id(unicode(row.predict_sense_ids), output_score=True))
else:
predicted_senses = SEMEVAL_SEP.join(format_score(s) for s in row.predict_sense_ids.split(LIST_SEP))
print >> output, "%s %s %s" % (row.target + "." + row.target_pos, row.context_id, predicted_senses)
print "Key file:", output_fpath
def main():
parser = argparse.ArgumentParser(description='Convert lexical sample dataset to SemEval 2013 key format.')
parser.add_argument('input', help='Path to a file with input lexical sample CSV file (9 columns or more).')
parser.add_argument('output', help='Output file: a SemEval key file with the sense predictions.')
parser.add_argument('--no_header', action='store_true', help='No headers. Default -- false.')
args = parser.parse_args()
print "Input: ", args.input
print "Output: ", args.output
print "No header:", args.no_header
lexsample_9cols_fpath = format_lexsample(args.input)
convert_dataset2semevalkey(lexsample_9cols_fpath, args.output, args.no_header)
if __name__ == '__main__':
main()
| apache-2.0 |
Eric89GXL/scikit-learn | examples/svm/plot_weighted_samples.py | 69 | 1942 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasis the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
jangorecki/h2o-3 | h2o-py/h2o/model/metrics_base.py | 1 | 26162 | # -*- encoding: utf-8 -*-
"""
Regression model.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import imp
from h2o.model.confusion_matrix import ConfusionMatrix
from h2o.utils.backward_compatibility import backwards_compatible
from h2o.utils.compatibility import * # NOQA
from h2o.utils.typechecks import assert_is_type, assert_satisfies, numeric
class MetricsBase(backwards_compatible()):
"""
A parent class to house common metrics available for the various Metrics types.
The methods here are available across different model categories, and so appear here.
"""
def __init__(self, metric_json, on=None, algo=""):
super(MetricsBase, self).__init__()
# Yep, it's messed up...
if isinstance(metric_json, MetricsBase): metric_json = metric_json._metric_json
self._metric_json = metric_json
# train and valid and xval are not mutually exclusive -- could have a test. train and
# valid only make sense at model build time.
self._on_train = False
self._on_valid = False
self._on_xval = False
self._algo = algo
if on == "training_metrics":
self._on_train = True
elif on == "validation_metrics":
self._on_valid = True
elif on == "cross_validation_metrics":
self._on_xval = True
elif on is None:
pass
else:
raise ValueError("on expected to be train,valid,or xval. Got: " + str(on))
@classmethod
def make(cls, kvs):
"""Factory method to instantiate a MetricsBase object from the list of key-value pairs."""
return cls(metric_json=dict(kvs))
def __repr__(self):
# FIXME !!! __repr__ should never print anything, but return a string
self.show()
return ""
# TODO: convert to actual fields list
def __getitem__(self, key):
return self._metric_json.get(key)
@staticmethod
def _has(dictionary, key):
return key in dictionary and dictionary[key] is not None
def show(self):
"""
Display a short summary of the metrics.
:return: None
"""
metric_type = self._metric_json['__meta']['schema_type']
types_w_glm = ['ModelMetricsRegressionGLM', 'ModelMetricsBinomialGLM']
types_w_clustering = ['ModelMetricsClustering']
types_w_mult = ['ModelMetricsMultinomial']
types_w_bin = ['ModelMetricsBinomial', 'ModelMetricsBinomialGLM']
types_w_r2 = ['ModelMetricsRegressionGLM']
types_w_mean_residual_deviance = ['ModelMetricsRegressionGLM', 'ModelMetricsRegression']
types_w_mean_absolute_error = ['ModelMetricsRegressionGLM', 'ModelMetricsRegression']
types_w_logloss = types_w_bin + types_w_mult
types_w_dim = ["ModelMetricsGLRM"]
print()
print(metric_type + ": " + self._algo)
reported_on = "** Reported on {} data. **"
if self._on_train:
print(reported_on.format("train"))
elif self._on_valid:
print(reported_on.format("validation"))
elif self._on_xval:
print(reported_on.format("cross-validation"))
else:
print(reported_on.format("test"))
print()
print("MSE: " + str(self.mse()))
print("RMSE: " + str(self.rmse()))
if metric_type in types_w_mean_absolute_error:
print("MAE: " + str(self.mae()))
print("RMSLE: " + str(self.rmsle()))
if metric_type in types_w_r2:
print("R^2: " + str(self.r2()))
if metric_type in types_w_mean_residual_deviance:
print("Mean Residual Deviance: " + str(self.mean_residual_deviance()))
if metric_type in types_w_logloss:
print("LogLoss: " + str(self.logloss()))
if metric_type == 'ModelMetricsBinomial':
# second element for first threshold is the actual mean per class error
print("Mean Per-Class Error: %s" % self.mean_per_class_error()[0][1])
if metric_type == 'ModelMetricsMultinomial':
print("Mean Per-Class Error: " + str(self.mean_per_class_error()))
if metric_type in types_w_glm:
print("Null degrees of freedom: " + str(self.null_degrees_of_freedom()))
print("Residual degrees of freedom: " + str(self.residual_degrees_of_freedom()))
print("Null deviance: " + str(self.null_deviance()))
print("Residual deviance: " + str(self.residual_deviance()))
print("AIC: " + str(self.aic()))
if metric_type in types_w_bin:
print("AUC: " + str(self.auc()))
print("Gini: " + str(self.gini()))
self.confusion_matrix().show()
self._metric_json["max_criteria_and_metric_scores"].show()
if self.gains_lift():
print(self.gains_lift())
if metric_type in types_w_mult:
self.confusion_matrix().show()
self.hit_ratio_table().show()
if metric_type in types_w_clustering:
print("Total Within Cluster Sum of Square Error: " + str(self.tot_withinss()))
print("Total Sum of Square Error to Grand Mean: " + str(self.totss()))
print("Between Cluster Sum of Square Error: " + str(self.betweenss()))
self._metric_json['centroid_stats'].show()
if metric_type in types_w_dim:
print("Sum of Squared Error (Numeric): " + str(self.num_err()))
print("Misclassification Error (Categorical): " + str(self.cat_err()))
def r2(self):
"""The R^2 coefficient."""
return self._metric_json["r2"]
def logloss(self):
"""Log loss."""
return self._metric_json["logloss"]
def nobs(self):
"""
:return: Retrieve the number of observations.
"""
return self._metric_json["nobs"]
def mean_residual_deviance(self):
"""
:return: Retrieve the mean residual deviance for this set of metrics.
"""
return self._metric_json["mean_residual_deviance"]
def auc(self):
"""
:return: Retrieve the AUC for this set of metrics.
"""
return self._metric_json['AUC']
def aic(self):
"""
:return: Retrieve the AIC for this set of metrics.
"""
return self._metric_json['AIC']
def gini(self):
"""Gini coefficient."""
return self._metric_json['Gini']
def mse(self):
"""
:return: Retrieve the MSE for this set of metrics
"""
return self._metric_json['MSE']
def rmse(self):
"""
:return: Retrieve the RMSE for this set of metrics
"""
return self._metric_json['RMSE']
def mae(self):
"""
:return: Retrieve the MAE for this set of metrics
"""
return self._metric_json['mae']
def rmsle(self):
"""
:return: Retrieve the RMSLE for this set of metrics
"""
return self._metric_json['rmsle']
def residual_deviance(self):
"""
:return: the residual deviance if the model has residual deviance, or None if no residual deviance.
"""
if MetricsBase._has(self._metric_json, "residual_deviance"):
return self._metric_json["residual_deviance"]
return None
def residual_degrees_of_freedom(self):
"""
:return: the residual dof if the model has residual deviance, or None if no residual dof.
"""
if MetricsBase._has(self._metric_json, "residual_degrees_of_freedom"):
return self._metric_json["residual_degrees_of_freedom"]
return None
def null_deviance(self):
"""
:return: the null deviance if the model has residual deviance, or None if no null deviance.
"""
if MetricsBase._has(self._metric_json, "null_deviance"):
return self._metric_json["null_deviance"]
return None
def null_degrees_of_freedom(self):
"""
:return: the null dof if the model has residual deviance, or None if no null dof.
"""
if MetricsBase._has(self._metric_json, "null_degrees_of_freedom"):
return self._metric_json["null_degrees_of_freedom"]
return None
def mean_per_class_error(self):
"""
Retrieve the mean per class error.
"""
return self._metric_json['mean_per_class_error']
# Deprecated functions; left here for backward compatibility
_bcim = {
"giniCoef": lambda self, *args, **kwargs: self.gini(*args, **kwargs)
}
class H2ORegressionModelMetrics(MetricsBase):
"""
This class provides an API for inspecting the metrics returned by a regression model.
It is possible to retrieve the R^2 (1 - MSE/variance) and MSE
"""
def __init__(self, metric_json, on=None, algo=""):
super(H2ORegressionModelMetrics, self).__init__(metric_json, on, algo)
class H2OClusteringModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2OClusteringModelMetrics, self).__init__(metric_json, on, algo)
def tot_withinss(self):
"""
:return: the Total Within Cluster Sum-of-Square Error, or None if not present.
"""
if MetricsBase._has(self._metric_json, "tot_withinss"):
return self._metric_json["tot_withinss"]
return None
def totss(self):
"""
:return: the Total Sum-of-Square Error to Grand Mean, or None if not present.
"""
if MetricsBase._has(self._metric_json, "totss"):
return self._metric_json["totss"]
return None
def betweenss(self):
"""
:return: the Between Cluster Sum-of-Square Error, or None if not present.
"""
if MetricsBase._has(self._metric_json, "betweenss"):
return self._metric_json["betweenss"]
return None
class H2OMultinomialModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2OMultinomialModelMetrics, self).__init__(metric_json, on, algo)
def confusion_matrix(self):
"""
Returns a confusion matrix based of H2O's default prediction threshold for a dataset
"""
return self._metric_json['cm']['table']
def hit_ratio_table(self):
"""
Retrieve the Hit Ratios
"""
return self._metric_json['hit_ratio_table']
class H2OBinomialModelMetrics(MetricsBase):
"""
This class is essentially an API for the AUC object.
This class contains methods for inspecting the AUC for different criteria.
To input the different criteria, use the static variable `criteria`
"""
def __init__(self, metric_json, on=None, algo=""):
"""
Create a new Binomial Metrics object (essentially a wrapper around some json)
:param metric_json: A blob of json holding all of the needed information
:param on_train: Metrics built on training data (default is False)
:param on_valid: Metrics built on validation data (default is False)
:param on_xval: Metrics built on cross validation data (default is False)
:param algo: The algorithm the metrics are based off of (e.g. deeplearning, gbm, etc.)
:return: A new H2OBinomialModelMetrics object.
"""
super(H2OBinomialModelMetrics, self).__init__(metric_json, on, algo)
def F1(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The F1 for the given set of thresholds.
"""
return self.metric("f1", thresholds=thresholds)
def F2(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The F2 for this set of metrics and thresholds
"""
return self.metric("f2", thresholds=thresholds)
def F0point5(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The F0point5 for this set of metrics and thresholds.
"""
return self.metric("f0point5", thresholds=thresholds)
def accuracy(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The accuracy for this set of metrics and thresholds
"""
return self.metric("accuracy", thresholds=thresholds)
def error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The error for this set of metrics and thresholds.
"""
return 1 - self.metric("accuracy", thresholds=thresholds)
def precision(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The precision for this set of metrics and thresholds.
"""
return self.metric("precision", thresholds=thresholds)
def tpr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The True Postive Rate
"""
return self.metric("tpr", thresholds=thresholds)
def tnr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The True Negative Rate
"""
return self.metric("tnr", thresholds=thresholds)
def fnr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The False Negative Rate
"""
return self.metric("fnr", thresholds=thresholds)
def fpr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The False Positive Rate
"""
return self.metric("fpr", thresholds=thresholds)
def recall(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: Recall for this set of metrics and thresholds
"""
return self.metric("tpr", thresholds=thresholds)
def sensitivity(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: Sensitivity or True Positive Rate for this set of metrics and thresholds
"""
return self.metric("tpr", thresholds=thresholds)
def fallout(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The fallout or False Positive Rate for this set of metrics and thresholds
"""
return self.metric("fpr", thresholds=thresholds)
def missrate(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: THe missrate or False Negative Rate.
"""
return self.metric("fnr", thresholds=thresholds)
def specificity(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The specificity or True Negative Rate.
"""
return self.metric("tnr", thresholds=thresholds)
def mcc(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: The absolute MCC (a value between 0 and 1, 0 being totally dissimilar, 1 being identical)
"""
return self.metric("absolute_mcc", thresholds=thresholds)
def max_per_class_error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: Return 1 - min_per_class_accuracy
"""
return 1 - self.metric("min_per_class_accuracy", thresholds=thresholds)
def mean_per_class_error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:return: Return mean_per_class_error
"""
return [[x[0], 1 - x[1]] for x in self.metric("mean_per_class_accuracy", thresholds=thresholds)]
def metric(self, metric, thresholds=None):
"""
:param metric: The desired metric
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then
the thresholds in this set of metrics will be used.
:return: The set of metrics for the list of thresholds
"""
assert_is_type(thresholds, None, [numeric])
if not thresholds: thresholds = [self.find_threshold_by_max_metric(metric)]
thresh2d = self._metric_json['thresholds_and_metric_scores']
metrics = []
for t in thresholds:
idx = self.find_idx_by_threshold(t)
metrics.append([t, thresh2d[metric][idx]])
return metrics
def plot(self, type="roc", server=False):
"""
Produce the desired metric plot
:param type: the type of metric plot (currently, only ROC supported)
:param show: if False, the plot is not shown. matplotlib show method is blocking.
:return: None
"""
# TODO: add more types (i.e. cutoffs)
assert_is_type(type, "roc")
# check for matplotlib. exit if absent.
try:
imp.find_module('matplotlib')
import matplotlib
if server: matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
except ImportError:
print("matplotlib is required for this function!")
return
if type == "roc":
plt.xlabel('False Positive Rate (FPR)')
plt.ylabel('True Positive Rate (TPR)')
plt.title('ROC Curve')
plt.text(0.5, 0.5, r'AUC={0:.4f}'.format(self._metric_json["AUC"]))
plt.plot(self.fprs, self.tprs, 'b--')
plt.axis([0, 1, 0, 1])
if not server: plt.show()
@property
def fprs(self):
"""
Return all false positive rates for all threshold values.
:return: a list of false positive rates.
"""
return self._metric_json["thresholds_and_metric_scores"]["fpr"]
@property
def tprs(self):
"""
Return all true positive rates for all threshold values.
:return: a list of true positive rates.
"""
return self._metric_json["thresholds_and_metric_scores"]["tpr"]
def confusion_matrix(self, metrics=None, thresholds=None):
"""
Get the confusion matrix for the specified metric
:param metrics: A string (or list of strings) in {"min_per_class_accuracy", "absolute_mcc", "tnr", "fnr", "fpr",
"tpr", "precision", "accuracy", "f0point5", "f2", "f1","mean_per_class_accuracy"}
:param thresholds: A value (or list of values) between 0 and 1
:return: a list of ConfusionMatrix objects (if there are more than one to return), or a single ConfusionMatrix
(if there is only one)
"""
# make lists out of metrics and thresholds arguments
if metrics is None and thresholds is None: metrics = ["f1"]
if isinstance(metrics, list):
metrics_list = metrics
elif metrics is None:
metrics_list = []
else:
metrics_list = [metrics]
if isinstance(thresholds, list):
thresholds_list = thresholds
elif thresholds is None:
thresholds_list = []
else:
thresholds_list = [thresholds]
# error check the metrics_list and thresholds_list
assert_is_type(thresholds_list, [numeric])
assert_satisfies(thresholds_list, all(0 <= t <= 1 for t in thresholds_list))
if not all(m.lower() in ["min_per_class_accuracy", "absolute_mcc", "precision", "recall", "specificity", "accuracy",
"f0point5", "f2", "f1", "mean_per_class_accuracy"] for m in metrics_list):
raise ValueError(
"The only allowable metrics are min_per_class_accuracy, absolute_mcc, precision, accuracy, f0point5, "
"f2, f1, mean_per_class_accuracy")
# make one big list that combines the thresholds and metric-thresholds
metrics_thresholds = [self.find_threshold_by_max_metric(m) for m in metrics_list]
for mt in metrics_thresholds:
thresholds_list.append(mt)
thresh2d = self._metric_json['thresholds_and_metric_scores']
actual_thresholds = [float(e[0]) for i, e in enumerate(thresh2d.cell_values)]
cms = []
for t in thresholds_list:
idx = self.find_idx_by_threshold(t)
row = thresh2d.cell_values[idx]
tns = row[11]
fns = row[12]
fps = row[13]
tps = row[14]
p = tps + fns
n = tns + fps
c0 = n - fps
c1 = p - tps
if t in metrics_thresholds:
m = metrics_list[metrics_thresholds.index(t)]
table_header = "Confusion Matrix (Act/Pred) for max " + m + " @ threshold = " + str(
actual_thresholds[idx])
else:
table_header = "Confusion Matrix (Act/Pred) @ threshold = " + str(actual_thresholds[idx])
cms.append(ConfusionMatrix(cm=[[c0, fps], [c1, tps]], domains=self._metric_json['domain'],
table_header=table_header))
if len(cms) == 1:
return cms[0]
else:
return cms
def find_threshold_by_max_metric(self, metric):
"""
:param metric: A string in {"min_per_class_accuracy", "absolute_mcc", "precision", "recall", "specificity", "accuracy", "f0point5", "f2", "f1", "mean_per_class_accuracy"}
:return: the threshold at which the given metric is maximum.
"""
crit2d = self._metric_json['max_criteria_and_metric_scores']
for e in crit2d.cell_values:
if e[0] == "max " + metric.lower():
return e[1]
raise ValueError("No metric " + str(metric.lower()))
def find_idx_by_threshold(self, threshold):
"""
Retrieve the index in this metric's threshold list at which the given threshold is located.
:param threshold: Find the index of this input threshold.
:return: Return the index or throw a ValueError if no such index can be found.
"""
assert_is_type(threshold, numeric)
thresh2d = self._metric_json['thresholds_and_metric_scores']
for i, e in enumerate(thresh2d.cell_values):
t = float(e[0])
if abs(t - threshold) < 0.00000001 * max(t, threshold):
return i
if threshold >= 0 and threshold <= 1:
thresholds = [float(e[0]) for i, e in enumerate(thresh2d.cell_values)]
threshold_diffs = [abs(t - threshold) for t in thresholds]
closest_idx = threshold_diffs.index(min(threshold_diffs))
closest_threshold = thresholds[closest_idx]
print("Could not find exact threshold {0}; using closest threshold found {1}." \
.format(threshold, closest_threshold))
return closest_idx
raise ValueError("Threshold must be between 0 and 1, but got {0} ".format(threshold))
def gains_lift(self):
"""
Retrieve the Gains/Lift table
"""
if 'gains_lift_table' in self._metric_json:
return self._metric_json['gains_lift_table']
return None
class H2OAutoEncoderModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2OAutoEncoderModelMetrics, self).__init__(metric_json, on, algo)
class H2ODimReductionModelMetrics(MetricsBase):
def __init__(self, metric_json, on=None, algo=""):
super(H2ODimReductionModelMetrics, self).__init__(metric_json, on, algo)
def num_err(self):
"""
:return: the Sum of Squared Error over non-missing numeric entries, or None if not present.
"""
if MetricsBase._has(self._metric_json, "numerr"):
return self._metric_json["numerr"]
return None
def cat_err(self):
"""
:return: the Number of Misclassified categories over non-missing categorical entries, or None if not present.
"""
if MetricsBase._has(self._metric_json, "caterr"):
return self._metric_json["caterr"]
return None
| apache-2.0 |
OpenUpgrade-dev/OpenUpgrade | addons/resource/faces/timescale.py | 170 | 3902 | ############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# [email protected]
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import faces.pcalendar as pcal
import matplotlib.cbook as cbook
import datetime
import sys
class TimeScale(object):
def __init__(self, calendar):
self.data_calendar = calendar
self._create_chart_calendar()
self.now = self.to_num(self.data_calendar.now)
def to_datetime(self, xval):
return xval.to_datetime()
def to_num(self, date):
return self.chart_calendar.WorkingDate(date)
def is_free_slot(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1 != dt2
def is_free_day(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1.date() != dt2.date()
def _create_chart_calendar(self):
dcal = self.data_calendar
ccal = self.chart_calendar = pcal.Calendar()
ccal.minimum_time_unit = 1
#pad worktime slots of calendar (all days should be equally long)
slot_sum = lambda slots: sum(map(lambda slot: slot[1] - slot[0], slots))
day_sum = lambda day: slot_sum(dcal.get_working_times(day))
max_work_time = max(map(day_sum, range(7)))
#working_time should have 2/3
sum_time = 3 * max_work_time / 2
#now create timeslots for ccal
def create_time_slots(day):
src_slots = dcal.get_working_times(day)
slots = [0, src_slots, 24*60]
slots = tuple(cbook.flatten(slots))
slots = zip(slots[:-1], slots[1:])
#balance non working slots
work_time = slot_sum(src_slots)
non_work_time = sum_time - work_time
non_slots = filter(lambda s: s not in src_slots, slots)
non_slots = map(lambda s: (s[1] - s[0], s), non_slots)
non_slots.sort()
slots = []
i = 0
for l, s in non_slots:
delta = non_work_time / (len(non_slots) - i)
delta = min(l, delta)
non_work_time -= delta
slots.append((s[0], s[0] + delta))
i += 1
slots.extend(src_slots)
slots.sort()
return slots
min_delta = sys.maxint
for i in range(7):
slots = create_time_slots(i)
ccal.working_times[i] = slots
min_delta = min(min_delta, min(map(lambda s: s[1] - s[0], slots)))
ccal._recalc_working_time()
self.slot_delta = min_delta
self.day_delta = sum_time
self.week_delta = ccal.week_time
_default_scale = TimeScale(pcal._default_calendar)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pratapvardhan/scikit-learn | sklearn/datasets/lfw.py | 31 | 19544 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
soulmachine/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
steelee/fishbowl-notebooks | ipython/profile_nbserver/ipython_notebook_config.py | 2 | 19996 | # Configuration file for ipython-notebook.
c = get_config()
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# NotebookApp will inherit config from: BaseIPythonApplication, Application
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = {}
# The IP address the notebook server will listen on.
c.NotebookApp.ip = '*'
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.NotebookApp.verbose_crash = False
# Python modules to load as notebook server extensions. This is an experimental
# API, and may change in future releases.
# c.NotebookApp.server_extensions = []
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = ''
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
# The date format used by logging formatters for %(asctime)s
# c.NotebookApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The port the notebook server will listen on.
c.NotebookApp.port = 80
# Whether to overwrite existing config files when copying
# c.NotebookApp.overwrite = False
# The kernel spec manager class to use. Should be a subclass of
# `IPython.kernel.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of IPython and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = <class 'IPython.kernel.kernelspec.KernelSpecManager'>
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# The notebook manager class to use.
# c.NotebookApp.contents_manager_class = <class 'IPython.html.services.contents.filemanager.FileContentsManager'>
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = u''
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = <class 'IPython.html.auth.logout.LogoutHandler'>
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# The session manager class to use.
# c.NotebookApp.session_manager_class = <class 'IPython.html.services.sessions.sessionmanager.SessionManager'>
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.tornado_settings = {}
# The directory to use for notebooks and kernels.
c.NotebookApp.notebook_dir = u'/notebooks'
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = <class 'IPython.html.services.kernels.kernelmanager.MappingKernelManager'>
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = u''
# Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
# c.NotebookApp.ssl_options = {}
#
# c.NotebookApp.file_to_run = ''
# The IPython profile to use.
# c.NotebookApp.profile = u'default'
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# c.NotebookApp.pylab = 'disabled'
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# Reraise exceptions encountered loading server extensions?
# c.NotebookApp.reraise_server_extension_failures = False
# The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
# The Logging format template
# c.NotebookApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.NotebookApp.ipython_dir = u''
# The cluster manager class to use.
# c.NotebookApp.cluster_manager_class = <class 'IPython.html.services.clusters.clustermanager.ClusterManager'>
# Set the log level by value or name.
# c.NotebookApp.log_level = 30
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from IPython.lib import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = []
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.NotebookApp.extra_config_file = u''
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u''
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from IPython.html.templates.
# c.NotebookApp.extra_template_paths = []
# The config manager class to use
# c.NotebookApp.config_manager_class = <class 'IPython.html.services.config.manager.ConfigManager'>
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.NotebookApp.copy_config_files = False
# The login handler class to use.
# c.NotebookApp.login_handler_class = <class 'IPython.html.auth.login.LoginHandler'>
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = {}
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = u''
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
# set the stdin (ROUTER) port [default: random]
# c.KernelManager.stdin_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = u''
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.KernelManager.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.KernelManager.control_port = 0
# set the heartbeat port [default: random]
# c.KernelManager.hb_port = 0
# set the shell (ROUTER) port [default: random]
# c.KernelManager.shell_port = 0
#
# c.KernelManager.transport = 'tcp'
# set the iopub (PUB) port [default: random]
# c.KernelManager.iopub_port = 0
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'root'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for signing messages.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
# MappingKernelManager will inherit config from: MultiKernelManager
# The name of the default kernel to start
# c.MappingKernelManager.default_kernel_name = 'python2'
#
# c.MappingKernelManager.root_dir = u''
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MappingKernelManager.kernel_manager_class = 'IPython.kernel.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
#
# c.ContentsManager.checkpoints_class = <class 'IPython.html.services.contents.checkpoints.Checkpoints'>
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = [u'__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
#
# c.ContentsManager.checkpoints = None
#
# c.ContentsManager.checkpoints_kwargs = {}
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# FileContentsManager will inherit config from: ContentsManager
#
# c.FileContentsManager.root_dir = u''
# The base name used when creating untitled files.
# c.FileContentsManager.untitled_file = 'untitled'
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.FileContentsManager.pre_save_hook = None
#
# c.FileContentsManager.checkpoints_class = <class 'IPython.html.services.contents.checkpoints.Checkpoints'>
# Glob patterns to hide in file and directory listings.
# c.FileContentsManager.hide_globs = [u'__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
# The base name used when creating untitled notebooks.
# c.FileContentsManager.untitled_notebook = 'Untitled'
# The base name used when creating untitled directories.
# c.FileContentsManager.untitled_directory = 'Untitled Folder'
#
# c.FileContentsManager.checkpoints = None
#
# c.FileContentsManager.checkpoints_kwargs = {}
# DEPRECATED, use post_save_hook
# c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = ''
# The sqlite file in which to store notebook signatures. By default, this will
# be in your IPython profile. You can set it to ':memory:' to disable sqlite
# writing to the filesystem.
# c.NotebookNotary.db_file = u''
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = u''
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
# c.KernelSpecManager.whitelist = set([])
| mit |
mirestrepo/voxels-at-lems | registration_eval/different_days/dd_compute_dense_transformation_error.py | 1 | 2773 | #!/usr/bin/env python
# encoding: utf-8
"""
compute_transformation_error.py
Created by Maria Isabel Restrepo on 2012-09-24.
Copyright (c) 2012 . All rights reserved.
This script computes the distances betweeen an estimated similarity transformation and its ground trutrransformation is used to transform a "source" coordinate system into a "target coordinate system"
To compute the error between the translations, the L2 norm diference translation vectors in the
"source coordinate system" is computed. Since distances are preserved under R and T, only scale is applied.
The rotation error is computed as the half angle between the normalized queternions i.e acos(|<q1,q2>|) in [0, pi/2]
This script was intended to use with Vishal's results
"""
import os
import sys
import logging
import argparse
from vpcl_adaptor import *
import numpy as np
from numpy import linalg as LA
import transformations as tf
import math
import matplotlib.pyplot as plt
sys.path.append(os.pardir)
import reg3d
if __name__ == '__main__':
# fname = "/Users/isa/Dropbox/data/registration_for_vj/capitol_2011/original/2011-2006_Hs_matrix_vj_dense.txt"
# gt_fname = "/Users/isa/Dropbox/data/registration_for_vj/capitol_2011/original/2011-2006_Hs.txt"
# geo_fname ="/Users/isa/Dropbox/data/registration_for_vj/capitol_2006/original/Hs_geo.txt"
# error = reg3d.transformation_error_general(fname = fname,
# gt_fname = gt_fname,
# geo_fname = geo_fname )
# # Error (S,R,T) 1.39523511977e-06 0.802221070301 2.98789826592
# fname = "/Users/isa/Dropbox/data/registration_for_vj/downtown_2006/original/2006-2011_Hs_matrix_vj_dense.txt"
# gt_fname = "/Users/isa/Dropbox/data/registration_for_vj/downtown_2006/original/2006-2011_Hs.txt"
# geo_fname ="/Users/isa/Dropbox/data/registration_for_vj/capitol_2011/original/Hs_geo.txt"
# error = reg3d.transformation_error_general(fname = fname,
# gt_fname = gt_fname,
# geo_fname = geo_fname )
# # Error (S,R,T) 5.31970689721e-08 0.808909241082 4.83449482984
# fname = "/Users/isa/Dropbox/data/registration_for_vj/BH_VSI/original/f4-2006_Hs_matrix_vj_dense.txt"
# gt_fname = "/Users/isa/Dropbox/data/registration_for_vj/BH_VSI/original/f4-2006_Hs.txt"
# geo_fname ="/Users/isa/Dropbox/data/registration_for_vj/BH_2006/original/Hs_geo.txt"
# error = reg3d.transformation_error_general(fname = fname,
# gt_fname = gt_fname,
# geo_fname = geo_fname )
# # Error (S,R,T) 2.57980939389e-07 0.763324882652 4.79257669203 | bsd-2-clause |
nilsgrabbert/spark | python/setup.py | 12 | 9667 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
exit(-1)
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='[email protected]',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.6'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': ['pandas>=0.13.0']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 |
kai-lu/Beginning | day_time_through_a_year.py | 1 | 1317 | # -*- coding: utf-8 -*-
# Calculate daylight time during the whole year
# Kai Lu
import numpy as np
#import scipy as sp
import matplotlib.pyplot as plt
day = np.arange(1,366,1)
# days in a year, start from the autumnal equinox
phi = day*2*np.pi/max(day)
# spanning angle from the current sun to the autumnal-equinox sun
alpha0 = 23.5*np.pi/180
# latitude of the Tropic of Cancer
alpha = np.arcsin(np.sin(phi)*np.sin(alpha0))
# the angle of current sunshine off the equator
theta = 59.3*np.pi/180
# 59.3 is the latitude of Stockholm
beta = 2*np.arccos(np.tan(theta)*np.tan(alpha))
# instant daylight angle on a circle of latitude
T = 12*beta/np.pi
# daylight length in a day
fig = plt.figure(1)
plt.figure(figsize=(16/2, 9/2))
plt.plot(day, T, "g.")
plt.grid(True)
plt.xlim((0, 365))
plt.xlabel('Days since the autumnal equinox')
plt.ylabel('Daylight length (Hours)')
plt.title('Daylight length through the whole year')
plt.show()
# shows the plot
T0 = np.append(T[364],T[:364])
dT = (T - T0)*60
# daylight difference between to consequential days
fig = plt.figure(2)
plt.figure(figsize=(16/2, 9/2))
plt.plot(day, dT, "b-")
plt.grid(True)
plt.xlim((0, 365))
plt.xlabel('Days since the autumnal equinox')
plt.ylabel('Daylight increment (Minutes)')
plt.title('Increment of daylight through the whole year')
plt.show() | gpl-3.0 |
notbeloser/Polo_auto_trade | Loop_to_test_plan.py | 1 | 3413 | from poloniex import Poloniex
from time import time
import sys
import numpy as np
import matplotlib.dates as md
import matplotlib.pyplot as plt
import matplotlib.finance as mpf
from matplotlib.dates import DateFormatter, WeekdayLocator,DayLocator, MONDAY
from colorama import Fore, Back, Style
from datetime import datetime
from math import pi
import pandas as pd
pd.set_option('display.width', 300)
def print_full(x):
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
polo = Poloniex('GBC146G1-M9RGA0VT-T5FL729B-P8OTN6SU',
'a4d44e8e4e4432e9a9a94d66fb17a7b7081858aaeb85c0fdd9b6ebf8a51a7d2fa0160c5db0e55b8d836ba6d64b1c0e324eba164b94278617edd2eec48c09acb7',jsonNums=float)
coin = "BTC_MAID"
period = polo.MINUTE * 5
out=[]
df=pd.DataFrame(polo.returnChartData(coin,period,time()-polo.DAY*10))
df['date'] = df['date'] + polo.DAY / 3 # shift time to UTC+8
df['date'] = pd.to_datetime(df["date"], unit='s')
window_short = 8
window_long = 6
SDP = 0.262626
SDN= -0.232323
index = 0
print(coin)
# for SDN in np.linspace(-1,0,101):
# for SDP in np.linspace(0,1,101):
# for window_short in range(3,11):
# for window_long in range(3,21):
df['short'] = pd.ewma(df['weightedAverage'], com=window_short)
df['long'] = pd.rolling_mean(df['weightedAverage'], window=window_long)
df['short_diff'] = df['short'].diff() / df['short'] * 100
df['long_diff'] = df['long'].diff() / df['long'] * 100
df['SD'] = (df.short - df.long) / df.long * 100
df['SD_diff'] = df['SD'].diff()
df['buy'] = df.SD > SDP
df['sell'] = df.SD < SDN
df['bs'] = df.buy != df.sell
trade_index = df[df['bs'] == True].index.tolist()
df.dropna(inplace=True)
df['trade'] = pd.DataFrame.diff(df.buy[trade_index]*1 + df.sell[trade_index]*-1)
df['trade'].fillna(0,inplace=True)
# df=df.drop(['buy','sell','bs'],axis=1)
print_full(df)
print("max SD %f"%max(df['SD']))
print("min SD %f"%min(df['SD']))
print("mean SD %f" %np.mean(df['SD']))
df_index=df.index.tolist()
#test profit
BTC = 1
fee = 0
trade_state = 0 #0 is buy 1 is sell
last_price = 0
trade_time=0
win =0
lose =0
for i in df_index:
if df.trade[i] == -2 :
if last_price>0:
fee = df['close'][i]/ last_price * BTC * 0.0025 +fee
BTC = df['close'][i]/ last_price * BTC * 0.9975
if last_price < df['close'][i]*0.9975 :
win = win+1
else:
lose = lose +1
last_price = df['close'][i]
trade_time=trade_time+1
elif df.trade[i] == 2 :
if last_price>0:
if last_price*0.9975 > df['close'][i] :
win = win+1
else:
lose = lose +1
fee = last_price / df['close'][i] * BTC * 0.0025 + fee
BTC = last_price / df['close'][i] * BTC * 0.9975
last_price = df['close'][i]
trade_time=trade_time+1
try:
win_rate = win / trade_time * 100
except:
win_rate = 0
print("BTC %f fee %f trade time %d win %d lose %d,win rate %f,SDN %f,SDP %f,window short %d,window long %d"%(BTC,fee,trade_time,win,lose,win_rate,SDN,SDP,window_short,window_long))
out.append([BTC,fee,trade_time,win,lose,win_rate,SDN,SDP,window_short,window_long])
out_df = pd.DataFrame(out,columns=["BTC","fee","trade time","win","lose","win rate","SDN","SDP","window short","window long"])
out_df = out_df.sort_values('BTC',ascending=False)
print_full(out_df) | gpl-2.0 |
kmike/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 4 | 4625 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
k=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['minibatchkmeans_speed'].append(delta)
results['minibatchkmeans_quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['minibatchkmeans_speed'].append(delta)
results['minibatchkmeans_quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure()
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
mclevey/seaborn | seaborn/matrix.py | 5 | 40890 | """Functions to visualize matrices of data."""
import itertools
import colorsys
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from scipy.spatial import distance
from scipy.cluster import hierarchy
from .axisgrid import Grid
from .palettes import cubehelix_palette
from .utils import despine, axis_ticklabels_overlap
from .external.six.moves import range
def _index_to_label(index):
"""Convert a pandas index or multiindex to an axis label."""
if isinstance(index, pd.MultiIndex):
return "-".join(map(str, index.names))
else:
return index.name
def _index_to_ticklabels(index):
"""Convert a pandas index or multiindex into ticklabels."""
if isinstance(index, pd.MultiIndex):
return ["-".join(map(str, i)) for i in index.values]
else:
return index.values
def _convert_colors(colors):
"""Convert either a list of colors or nested lists of colors to RGB."""
to_rgb = mpl.colors.colorConverter.to_rgb
try:
to_rgb(colors[0])
# If this works, there is only one level of colors
return list(map(to_rgb, colors))
except ValueError:
# If we get here, we have nested lists
return [list(map(to_rgb, l)) for l in colors]
def _matrix_mask(data, mask):
"""Ensure that data and mask are compatabile and add missing values.
Values will be plotted for cells where ``mask`` is ``False``.
``data`` is expected to be a DataFrame; ``mask`` can be an array or
a DataFrame.
"""
if mask is None:
mask = np.zeros(data.shape, np.bool)
if isinstance(mask, np.ndarray):
# For array masks, ensure that shape matches data then convert
if mask.shape != data.shape:
raise ValueError("Mask must have the same shape as data.")
mask = pd.DataFrame(mask,
index=data.index,
columns=data.columns,
dtype=np.bool)
elif isinstance(mask, pd.DataFrame):
# For DataFrame masks, ensure that semantic labels match data
if not mask.index.equals(data.index) \
and mask.columns.equals(data.columns):
err = "Mask must have the same index and columns as data."
raise ValueError(err)
# Add any cells with missing data to the mask
# This works around an issue where `plt.pcolormesh` doesn't represent
# missing data properly
mask = mask | pd.isnull(data)
return mask
class _HeatMapper(object):
"""Draw a heatmap plot of a matrix with nice labels and colormaps."""
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws,
xticklabels=True, yticklabels=True, mask=None):
"""Initialize the plotting object."""
# We always want to have a DataFrame with semantic information
# and an ndarray to pass to matplotlib
if isinstance(data, pd.DataFrame):
plot_data = data.values
else:
plot_data = np.asarray(data)
data = pd.DataFrame(plot_data)
# Validate the mask and convet to DataFrame
mask = _matrix_mask(data, mask)
# Reverse the rows so the plot looks like the matrix
plot_data = plot_data[::-1]
data = data.ix[::-1]
mask = mask.ix[::-1]
plot_data = np.ma.masked_where(np.asarray(mask), plot_data)
# Get good names for the rows and columns
xtickevery = 1
if isinstance(xticklabels, int) and xticklabels > 1:
xtickevery = xticklabels
xticklabels = _index_to_ticklabels(data.columns)
elif isinstance(xticklabels, bool) and xticklabels:
xticklabels = _index_to_ticklabels(data.columns)
elif isinstance(xticklabels, bool) and not xticklabels:
xticklabels = ['' for _ in range(data.shape[1])]
ytickevery = 1
if isinstance(yticklabels, int) and yticklabels > 1:
ytickevery = yticklabels
yticklabels = _index_to_ticklabels(data.index)
elif isinstance(yticklabels, bool) and yticklabels:
yticklabels = _index_to_ticklabels(data.index)
elif isinstance(yticklabels, bool) and not yticklabels:
yticklabels = ['' for _ in range(data.shape[0])]
else:
yticklabels = yticklabels[::-1]
# Get the positions and used label for the ticks
nx, ny = data.T.shape
xstart, xend, xstep = 0, nx, xtickevery
self.xticks = np.arange(xstart, xend, xstep) + .5
self.xticklabels = xticklabels[xstart:xend:xstep]
ystart, yend, ystep = (ny - 1) % ytickevery, ny, ytickevery
self.yticks = np.arange(ystart, yend, ystep) + .5
self.yticklabels = yticklabels[ystart:yend:ystep]
# Get good names for the axis labels
xlabel = _index_to_label(data.columns)
ylabel = _index_to_label(data.index)
self.xlabel = xlabel if xlabel is not None else ""
self.ylabel = ylabel if ylabel is not None else ""
# Determine good default values for the colormapping
self._determine_cmap_params(plot_data, vmin, vmax,
cmap, center, robust)
# Save other attributes to the object
self.data = data
self.plot_data = plot_data
self.annot = annot
self.fmt = fmt
self.annot_kws = {} if annot_kws is None else annot_kws
self.cbar = cbar
self.cbar_kws = {} if cbar_kws is None else cbar_kws
def _determine_cmap_params(self, plot_data, vmin, vmax,
cmap, center, robust):
"""Use some heuristics to set good defaults for colorbar and range."""
calc_data = plot_data.data[~np.isnan(plot_data.data)]
if vmin is None:
vmin = np.percentile(calc_data, 2) if robust else calc_data.min()
if vmax is None:
vmax = np.percentile(calc_data, 98) if robust else calc_data.max()
# Simple heuristics for whether these data should have a divergent map
divergent = ((vmin < 0) and (vmax > 0)) or center is not None
# Now set center to 0 so math below makes sense
if center is None:
center = 0
# A divergent map should be symmetric around the center value
if divergent:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
self.divergent = divergent
# Now add in the centering value and set the limits
vmin += center
vmax += center
self.vmin = vmin
self.vmax = vmax
# Choose default colormaps if not provided
if cmap is None:
if divergent:
self.cmap = "RdBu_r"
else:
self.cmap = cubehelix_palette(light=.95, as_cmap=True)
else:
self.cmap = cmap
def _annotate_heatmap(self, ax, mesh):
"""Add textual labels with the value in each cell."""
xpos, ypos = np.meshgrid(ax.get_xticks(), ax.get_yticks())
for x, y, val, color in zip(xpos.flat, ypos.flat,
mesh.get_array(), mesh.get_facecolors()):
if val is not np.ma.masked:
_, l, _ = colorsys.rgb_to_hls(*color[:3])
text_color = ".15" if l > .5 else "w"
val = ("{:" + self.fmt + "}").format(val)
ax.text(x, y, val, color=text_color,
ha="center", va="center", **self.annot_kws)
def plot(self, ax, cax, kws):
"""Draw the heatmap on the provided Axes."""
# Remove all the Axes spines
despine(ax=ax, left=True, bottom=True)
# Draw the heatmap
mesh = ax.pcolormesh(self.plot_data, vmin=self.vmin, vmax=self.vmax,
cmap=self.cmap, **kws)
# Set the axis limits
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
# Add row and column labels
ax.set(xticks=self.xticks, yticks=self.yticks)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation="vertical")
# Possibly rotate them if they overlap
plt.draw()
if axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
if axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
# Add the axis labels
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
# Annotate the cells with the formatted values
if self.annot:
self._annotate_heatmap(ax, mesh)
# Possibly add a colorbar
if self.cbar:
ticker = mpl.ticker.MaxNLocator(6)
cb = ax.figure.colorbar(mesh, cax, ax,
ticks=ticker, **self.cbar_kws)
cb.outline.set_linewidth(0)
def heatmap(data, vmin=None, vmax=None, cmap=None, center=None, robust=False,
annot=False, fmt=".2g", annot_kws=None,
linewidths=0, linecolor="white",
cbar=True, cbar_kws=None, cbar_ax=None,
square=False, ax=None, xticklabels=True, yticklabels=True,
mask=None,
**kwargs):
"""Plot rectangular data as a color-encoded matrix.
This function tries to infer a good colormap to use from the data, but
this is not guaranteed to work, so take care to make sure the kind of
colormap (sequential or diverging) and its limits are appropriate.
This is an Axes-level function and will draw the heatmap into the
currently-active Axes if none is provided to the ``ax`` argument. Part of
this Axes space will be taken and used to plot a colormap, unless ``cbar``
is False or a separate Axes is provided to ``cbar_ax``.
Parameters
----------
data : rectangular dataset
2D dataset that can be coerced into an ndarray. If a Pandas DataFrame
is provided, the index/column information will be used to label the
columns and rows.
vmin, vmax : floats, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments. When a diverging dataset is inferred,
one of these values may be ignored.
cmap : matplotlib colormap name or object, optional
The mapping from data values to color space. If not provided, this
will be either a cubehelix map (if the function infers a sequential
dataset) or ``RdBu_r`` (if the function infers a diverging dataset).
center : float, optional
The value at which to center the colormap. Passing this value implies
use of a diverging colormap.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with robust quantiles instead of the extreme values.
annot : bool, optional
If True, write the data value in each cell.
fmt : string, optional
String formatting code to use when ``annot`` is True.
annot_kws : dict of key, value mappings, optional
Keyword arguments for ``ax.text`` when ``annot`` is True.
linewidths : float, optional
Width of the lines that will divide each cell.
linecolor : color, optional
Color of the lines that will divide each cell.
cbar : boolean, optional
Whether to draw a colorbar.
cbar_kws : dict of key, value mappings, optional
Keyword arguments for `fig.colorbar`.
cbar_ax : matplotlib Axes, optional
Axes in which to draw the colorbar, otherwise take space from the
main Axes.
square : boolean, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
xticklabels : list-like, int, or bool, optional
If True, plot the column names of the dataframe. If False, don't plot
the column names. If list-like, plot these alternate labels as the
xticklabels. If an integer, use the column names but plot only every
n label.
yticklabels : list-like, int, or bool, optional
If True, plot the row names of the dataframe. If False, don't plot
the row names. If list-like, plot these alternate labels as the
yticklabels. If an integer, use the index names but plot only every
n label.
mask : boolean array or DataFrame, optional
If passed, data will not be shown in cells where ``mask`` is True.
Cells with missing values are automatically masked.
kwargs : other keyword arguments
All other keyword arguments are passed to ``ax.pcolormesh``.
Returns
-------
ax : matplotlib Axes
Axes object with the heatmap.
Examples
--------
Plot a heatmap for a numpy array:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(0)
>>> import seaborn as sns; sns.set()
>>> uniform_data = np.random.rand(10, 12)
>>> ax = sns.heatmap(uniform_data)
Change the limits of the colormap:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(uniform_data, vmin=0, vmax=1)
Plot a heatmap for data centered on 0:
.. plot::
:context: close-figs
>>> normal_data = np.random.randn(10, 12)
>>> ax = sns.heatmap(normal_data)
Plot a dataframe with meaningful row and column labels:
.. plot::
:context: close-figs
>>> flights = sns.load_dataset("flights")
>>> flights = flights.pivot("month", "year", "passengers")
>>> ax = sns.heatmap(flights)
Annotate each cell with the numeric value using integer formatting:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, annot=True, fmt="d")
Add lines between each cell:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, linewidths=.5)
Use a different colormap:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, cmap="YlGnBu")
Center the colormap at a specific value:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, center=flights.loc["January", 1955])
Plot every other column label and don't plot row labels:
.. plot::
:context: close-figs
>>> data = np.random.randn(50, 20)
>>> ax = sns.heatmap(data, xticklabels=2, yticklabels=False)
Don't draw a colorbar:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, cbar=False)
Use different axes for the colorbar:
.. plot::
:context: close-figs
>>> grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
>>> f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws)
>>> ax = sns.heatmap(flights, ax=ax,
... cbar_ax=cbar_ax,
... cbar_kws={"orientation": "horizontal"})
Use a mask to plot only part of a matrix
.. plot::
:context: close-figs
>>> corr = np.corrcoef(np.random.randn(10, 200))
>>> mask = np.zeros_like(corr)
>>> mask[np.triu_indices_from(mask)] = True
>>> with sns.axes_style("white"):
... ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True)
"""
# Initialize the plotter object
plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws, xticklabels, yticklabels,
mask)
# Add the pcolormesh kwargs here
kwargs["linewidths"] = linewidths
kwargs["edgecolor"] = linecolor
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax, cbar_ax, kwargs)
return ax
class _DendrogramPlotter(object):
"""Object for drawing tree of similarities between data rows/columns"""
def __init__(self, data, linkage, metric, method, axis, label, rotate):
"""Plot a dendrogram of the relationships between the columns of data
Parameters
----------
data : pandas.DataFrame
Rectangular data
"""
self.axis = axis
if self.axis == 1:
data = data.T
if isinstance(data, pd.DataFrame):
array = data.values
else:
array = np.asarray(data)
data = pd.DataFrame(array)
self.array = array
self.data = data
self.shape = self.data.shape
self.metric = metric
self.method = method
self.axis = axis
self.label = label
self.rotate = rotate
if linkage is None:
self.linkage = self.calculated_linkage
else:
self.linkage = linkage
self.dendrogram = self.calculate_dendrogram()
# Dendrogram ends are always at multiples of 5, who knows why
ticks = 10 * np.arange(self.data.shape[0]) + 5
if self.label:
ticklabels = _index_to_ticklabels(self.data.index)
ticklabels = [ticklabels[i] for i in self.reordered_ind]
if self.rotate:
self.xticks = []
self.yticks = ticks
self.xticklabels = []
self.yticklabels = ticklabels
self.ylabel = _index_to_label(self.data.index)
self.xlabel = ''
else:
self.xticks = ticks
self.yticks = []
self.xticklabels = ticklabels
self.yticklabels = []
self.ylabel = ''
self.xlabel = _index_to_label(self.data.index)
else:
self.xticks, self.yticks = [], []
self.yticklabels, self.xticklabels = [], []
self.xlabel, self.ylabel = '', ''
if self.rotate:
self.X = self.dendrogram['dcoord']
self.Y = self.dendrogram['icoord']
else:
self.X = self.dendrogram['icoord']
self.Y = self.dendrogram['dcoord']
def _calculate_linkage_scipy(self):
if np.product(self.shape) >= 10000:
UserWarning('This will be slow... (gentle suggestion: '
'"pip install fastcluster")')
pairwise_dists = distance.pdist(self.array, metric=self.metric)
linkage = hierarchy.linkage(pairwise_dists, method=self.method)
del pairwise_dists
return linkage
def _calculate_linkage_fastcluster(self):
import fastcluster
# Fastcluster has a memory-saving vectorized version, but only
# with certain linkage methods, and mostly with euclidean metric
vector_methods = ('single', 'centroid', 'median', 'ward')
euclidean_methods = ('centroid', 'median', 'ward')
euclidean = self.metric == 'euclidean' and self.method in \
euclidean_methods
if euclidean or self.method == 'single':
return fastcluster.linkage_vector(self.array,
method=self.method,
metric=self.metric)
else:
pairwise_dists = distance.pdist(self.array, metric=self.metric)
linkage = fastcluster.linkage(pairwise_dists, method=self.method)
del pairwise_dists
return linkage
@property
def calculated_linkage(self):
try:
return self._calculate_linkage_fastcluster()
except ImportError:
return self._calculate_linkage_scipy()
def calculate_dendrogram(self):
"""Calculates a dendrogram based on the linkage matrix
Made a separate function, not a property because don't want to
recalculate the dendrogram every time it is accessed.
Returns
-------
dendrogram : dict
Dendrogram dictionary as returned by scipy.cluster.hierarchy
.dendrogram. The important key-value pairing is
"reordered_ind" which indicates the re-ordering of the matrix
"""
return hierarchy.dendrogram(self.linkage, no_plot=True,
color_list=['k'], color_threshold=-np.inf)
@property
def reordered_ind(self):
"""Indices of the matrix, reordered by the dendrogram"""
return self.dendrogram['leaves']
def plot(self, ax):
"""Plots a dendrogram of the similarities between data on the axes
Parameters
----------
ax : matplotlib.axes.Axes
Axes object upon which the dendrogram is plotted
"""
for x, y in zip(self.X, self.Y):
ax.plot(x, y, color='k', linewidth=.5)
if self.rotate and self.axis == 0:
ax.invert_xaxis()
ax.yaxis.set_ticks_position('right')
ymax = min(map(min, self.Y)) + max(map(max, self.Y))
ax.set_ylim(0, ymax)
ax.invert_yaxis()
else:
xmax = min(map(min, self.X)) + max(map(max, self.X))
ax.set_xlim(0, xmax)
despine(ax=ax, bottom=True, left=True)
ax.set(xticks=self.xticks, yticks=self.yticks,
xlabel=self.xlabel, ylabel=self.ylabel)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')
# Force a draw of the plot to avoid matplotlib window error
plt.draw()
if len(ytl) > 0 and axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
if len(xtl) > 0 and axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
return self
def dendrogram(data, linkage=None, axis=1, label=True, metric='euclidean',
method='average', rotate=False, ax=None):
"""Draw a tree diagram of relationships within a matrix
Parameters
----------
data : pandas.DataFrame
Rectangular data
linkage : numpy.array, optional
Linkage matrix
axis : int, optional
Which axis to use to calculate linkage. 0 is rows, 1 is columns.
label : bool, optional
If True, label the dendrogram at leaves with column or row names
metric : str, optional
Distance metric. Anything valid for scipy.spatial.distance.pdist
method : str, optional
Linkage method to use. Anything valid for
scipy.cluster.hierarchy.linkage
rotate : bool, optional
When plotting the matrix, whether to rotate it 90 degrees
counter-clockwise, so the leaves face right
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis
Returns
-------
dendrogramplotter : _DendrogramPlotter
A Dendrogram plotter object.
Notes
-----
Access the reordered dendrogram indices with
dendrogramplotter.reordered_ind
"""
plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,
metric=metric, method=method,
label=label, rotate=rotate)
if ax is None:
ax = plt.gca()
return plotter.plot(ax=ax)
class ClusterGrid(Grid):
def __init__(self, data, pivot_kws=None, z_score=None, standard_scale=None,
figsize=None, row_colors=None, col_colors=None, mask=None):
"""Grid object for organizing clustered heatmap input on to axes"""
if isinstance(data, pd.DataFrame):
self.data = data
else:
self.data = pd.DataFrame(data)
self.data2d = self.format_data(self.data, pivot_kws, z_score,
standard_scale)
self.mask = _matrix_mask(self.data2d, mask)
if figsize is None:
width, height = 10, 10
figsize = (width, height)
self.fig = plt.figure(figsize=figsize)
if row_colors is not None:
row_colors = _convert_colors(row_colors)
self.row_colors = row_colors
if col_colors is not None:
col_colors = _convert_colors(col_colors)
self.col_colors = col_colors
width_ratios = self.dim_ratios(self.row_colors,
figsize=figsize,
axis=1)
height_ratios = self.dim_ratios(self.col_colors,
figsize=figsize,
axis=0)
nrows = 3 if self.col_colors is None else 4
ncols = 3 if self.row_colors is None else 4
self.gs = gridspec.GridSpec(nrows, ncols, wspace=0.01, hspace=0.01,
width_ratios=width_ratios,
height_ratios=height_ratios)
self.ax_row_dendrogram = self.fig.add_subplot(self.gs[nrows - 1, 0:2],
axisbg="white")
self.ax_col_dendrogram = self.fig.add_subplot(self.gs[0:2, ncols - 1],
axisbg="white")
self.ax_row_colors = None
self.ax_col_colors = None
if self.row_colors is not None:
self.ax_row_colors = self.fig.add_subplot(
self.gs[nrows - 1, ncols - 2])
if self.col_colors is not None:
self.ax_col_colors = self.fig.add_subplot(
self.gs[nrows - 2, ncols - 1])
self.ax_heatmap = self.fig.add_subplot(self.gs[nrows - 1, ncols - 1])
# colorbar for scale to left corner
self.cax = self.fig.add_subplot(self.gs[0, 0])
self.dendrogram_row = None
self.dendrogram_col = None
def format_data(self, data, pivot_kws, z_score=None,
standard_scale=None):
"""Extract variables from data or use directly."""
# Either the data is already in 2d matrix format, or need to do a pivot
if pivot_kws is not None:
data2d = data.pivot(**pivot_kws)
else:
data2d = data
if z_score is not None and standard_scale is not None:
raise ValueError(
'Cannot perform both z-scoring and standard-scaling on data')
if z_score is not None:
data2d = self.z_score(data2d, z_score)
if standard_scale is not None:
data2d = self.standard_scale(data2d, standard_scale)
return data2d
@staticmethod
def z_score(data2d, axis=1):
"""Standarize the mean and variance of the data axis
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
Returns
-------
normalized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
"""
if axis == 1:
z_scored = data2d
else:
z_scored = data2d.T
z_scored = (z_scored - z_scored.mean()) / z_scored.std()
if axis == 1:
return z_scored
else:
return z_scored.T
@staticmethod
def standard_scale(data2d, axis=1):
"""Divide the data by the difference between the max and min
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
vmin : int
If 0, then subtract the minimum of the data before dividing by
the range.
Returns
-------
standardized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
>>> import numpy as np
>>> d = np.arange(5, 8, 0.5)
>>> ClusterGrid.standard_scale(d)
array([ 0. , 0.2, 0.4, 0.6, 0.8, 1. ])
"""
# Normalize these values to range from 0 to 1
if axis == 1:
standardized = data2d
else:
standardized = data2d.T
subtract = standardized.min()
standardized = (standardized - subtract) / (
standardized.max() - standardized.min())
if axis == 1:
return standardized
else:
return standardized.T
def dim_ratios(self, side_colors, axis, figsize, side_colors_ratio=0.05):
"""Get the proportions of the figure taken up by each axes
"""
figdim = figsize[axis]
# Get resizing proportion of this figure for the dendrogram and
# colorbar, so only the heatmap gets bigger but the dendrogram stays
# the same size.
dendrogram = min(2. / figdim, .2)
# add the colorbar
colorbar_width = .8 * dendrogram
colorbar_height = .2 * dendrogram
if axis == 0:
ratios = [colorbar_width, colorbar_height]
else:
ratios = [colorbar_height, colorbar_width]
if side_colors is not None:
# Add room for the colors
ratios += [side_colors_ratio]
# Add the ratio for the heatmap itself
ratios += [.8]
return ratios
@staticmethod
def color_list_to_matrix_and_cmap(colors, ind, axis=0):
"""Turns a list of colors into a numpy matrix and matplotlib colormap
These arguments can now be plotted using heatmap(matrix, cmap)
and the provided colors will be plotted.
Parameters
----------
colors : list of matplotlib colors
Colors to label the rows or columns of a dataframe.
ind : list of ints
Ordering of the rows or columns, to reorder the original colors
by the clustered dendrogram order
axis : int
Which axis this is labeling
Returns
-------
matrix : numpy.array
A numpy array of integer values, where each corresponds to a color
from the originally provided list of colors
cmap : matplotlib.colors.ListedColormap
"""
# check for nested lists/color palettes.
# Will fail if matplotlib color is list not tuple
if any(issubclass(type(x), list) for x in colors):
all_colors = set(itertools.chain(*colors))
n = len(colors)
m = len(colors[0])
else:
all_colors = set(colors)
n = 1
m = len(colors)
colors = [colors]
color_to_value = dict((col, i) for i, col in enumerate(all_colors))
matrix = np.array([color_to_value[c]
for color in colors for c in color])
shape = (n, m)
matrix = matrix.reshape(shape)
matrix = matrix[:, ind]
if axis == 0:
# row-side:
matrix = matrix.T
cmap = mpl.colors.ListedColormap(all_colors)
return matrix, cmap
def savefig(self, *args, **kwargs):
if 'bbox_inches' not in kwargs:
kwargs['bbox_inches'] = 'tight'
self.fig.savefig(*args, **kwargs)
def plot_dendrograms(self, row_cluster, col_cluster, metric, method,
row_linkage, col_linkage):
# Plot the row dendrogram
if row_cluster:
self.dendrogram_row = dendrogram(
self.data2d, metric=metric, method=method, label=False, axis=0,
ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage)
else:
self.ax_row_dendrogram.set_xticks([])
self.ax_row_dendrogram.set_yticks([])
# PLot the column dendrogram
if col_cluster:
self.dendrogram_col = dendrogram(
self.data2d, metric=metric, method=method, label=False,
axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage)
else:
self.ax_col_dendrogram.set_xticks([])
self.ax_col_dendrogram.set_yticks([])
despine(ax=self.ax_row_dendrogram, bottom=True, left=True)
despine(ax=self.ax_col_dendrogram, bottom=True, left=True)
def plot_colors(self, xind, yind, **kws):
"""Plots color labels between the dendrogram and the heatmap
Parameters
----------
heatmap_kws : dict
Keyword arguments heatmap
"""
# Remove any custom colormap and centering
kws = kws.copy()
kws.pop('cmap', None)
kws.pop('center', None)
kws.pop('vmin', None)
kws.pop('vmax', None)
kws.pop('xticklabels', None)
kws.pop('yticklabels', None)
if self.row_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.row_colors, yind, axis=0)
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,
xticklabels=False, yticklabels=False,
**kws)
else:
despine(self.ax_row_colors, left=True, bottom=True)
if self.col_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.col_colors, xind, axis=1)
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,
xticklabels=False, yticklabels=False,
**kws)
else:
despine(self.ax_col_colors, left=True, bottom=True)
def plot_matrix(self, colorbar_kws, xind, yind, **kws):
self.data2d = self.data2d.iloc[yind, xind]
self.mask = self.mask.iloc[yind, xind]
# Try to reorganize specified tick labels, if provided
xtl = kws.pop("xticklabels", True)
try:
xtl = np.asarray(xtl)[xind]
except (TypeError, IndexError):
pass
ytl = kws.pop("yticklabels", True)
try:
ytl = np.asarray(ytl)[yind]
except (TypeError, IndexError):
pass
heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.cax,
cbar_kws=colorbar_kws, mask=self.mask,
xticklabels=xtl, yticklabels=ytl, **kws)
self.ax_heatmap.yaxis.set_ticks_position('right')
self.ax_heatmap.yaxis.set_label_position('right')
def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,
row_linkage, col_linkage, **kws):
colorbar_kws = {} if colorbar_kws is None else colorbar_kws
self.plot_dendrograms(row_cluster, col_cluster, metric, method,
row_linkage=row_linkage, col_linkage=col_linkage)
try:
xind = self.dendrogram_col.reordered_ind
except AttributeError:
xind = np.arange(self.data2d.shape[1])
try:
yind = self.dendrogram_row.reordered_ind
except AttributeError:
yind = np.arange(self.data2d.shape[0])
self.plot_colors(xind, yind, **kws)
self.plot_matrix(colorbar_kws, xind, yind, **kws)
return self
def clustermap(data, pivot_kws=None, method='average', metric='euclidean',
z_score=None, standard_scale=None, figsize=None, cbar_kws=None,
row_cluster=True, col_cluster=True,
row_linkage=None, col_linkage=None,
row_colors=None, col_colors=None, mask=None, **kwargs):
"""Plot a hierarchically clustered heatmap of a pandas DataFrame
Parameters
----------
data: pandas.DataFrame
Rectangular data for clustering. Cannot contain NAs.
pivot_kws : dict, optional
If `data` is a tidy dataframe, can provide keyword arguments for
pivot to create a rectangular dataframe.
method : str, optional
Linkage method to use for calculating clusters.
See scipy.cluster.hierarchy.linkage documentation for more information:
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
metric : str, optional
Distance metric to use for the data. See
scipy.spatial.distance.pdist documentation for more options
http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html
z_score : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores
for the rows or the columns. Z scores are: z = (x - mean)/std, so
values in each row (column) will get the mean of the row (column)
subtracted, then divided by the standard deviation of the row (column).
This ensures that each row (column) has mean of 0 and variance of 1.
standard_scale : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to standardize that
dimension, meaning for each row or column, subtract the minimum and
divide each by its maximum.
figsize: tuple of two ints, optional
Size of the figure to create.
cbar_kws : dict, optional
Keyword arguments to pass to ``cbar_kws`` in ``heatmap``, e.g. to
add a label to the colorbar.
{row,col}_cluster : bool, optional
If True, cluster the {rows, columns}.
{row,col}_linkage : numpy.array, optional
Precomputed linkage matrix for the rows or columns. See
scipy.cluster.hierarchy.linkage for specific formats.
{row,col}_colors : list-like, optional
List of colors to label for either the rows or columns. Useful to
evaluate whether samples within a group are clustered together. Can
use nested lists for multiple color levels of labeling.
mask : boolean array or DataFrame, optional
If passed, data will not be shown in cells where ``mask`` is True.
Cells with missing values are automatically masked. Only used for
visualizing, not for calculating.
kwargs : other keyword arguments
All other keyword arguments are passed to ``sns.heatmap``
Returns
-------
clustergrid : ClusterGrid
A ClusterGrid instance.
Notes
-----
The returned object has a ``savefig`` method that should be used if you
want to save the figure object without clipping the dendrograms.
To access the reordered row indices, use:
``clustergrid.dendrogram_row.reordered_ind``
Column indices, use:
``clustergrid.dendrogram_col.reordered_ind``
Examples
--------
Plot a clustered heatmap:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> flights = sns.load_dataset("flights")
>>> flights = flights.pivot("month", "year", "passengers")
>>> g = sns.clustermap(flights)
Don't cluster one of the axes:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, col_cluster=False)
Use a different colormap and add lines to separate the cells:
.. plot::
:context: close-figs
>>> cmap = sns.cubehelix_palette(as_cmap=True, rot=-.3, light=1)
>>> g = sns.clustermap(flights, cmap=cmap, linewidths=.5)
Use a different figure size:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, cmap=cmap, figsize=(7, 5))
Standardize the data across the columns:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, standard_scale=1)
Normalize the data across the rows:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, z_score=0)
Use a different clustering method:
.. plot::
:context: close-figs
>>> g = sns.clustermap(flights, method="single", metric="cosine")
Add colored labels on one of the axes:
.. plot::
:context: close-figs
>>> season_colors = (sns.color_palette("BuPu", 3) +
... sns.color_palette("RdPu", 3) +
... sns.color_palette("YlGn", 3) +
... sns.color_palette("OrRd", 3))
>>> g = sns.clustermap(flights, row_colors=season_colors)
"""
plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize,
row_colors=row_colors, col_colors=col_colors,
z_score=z_score, standard_scale=standard_scale,
mask=mask)
return plotter.plot(metric=metric, method=method,
colorbar_kws=cbar_kws,
row_cluster=row_cluster, col_cluster=col_cluster,
row_linkage=row_linkage, col_linkage=col_linkage,
**kwargs)
| bsd-3-clause |
dlu-ch/dlb | test/benchmark/plot_result.py | 1 | 7183 | # Plot data collected by build-with-dlb-and-scons-and-make.bash.
# Run in the directory of the script.
#
# Input: build/result.txt
# Output: build/*.svg
import re
import os.path
import matplotlib.pyplot as plt
import matplotlib.pylab
build_dir_path = os.path.join('..', '..', 'build', 'out', 'benchmark')
result_file_path = os.path.join(build_dir_path, 'result.txt')
matplotlib.pylab.rcParams.update({
'figure.titlesize': 'medium',
'axes.titlesize': 'medium'
})
def safe_fig(fig, name):
fig.set_size_inches(*[m / 24.3e-3 for m in [200e-3, 150e-3]])
fig.savefig(os.path.join(build_dir_path, f'benchmark-{name}.svg'), format='svg', transparent=True)
description_by_tool = {}
durations_by_configuration = {}
with open(result_file_path, 'r') as result_file:
for line in result_file:
line = line.strip()
if line[:1] == '#':
m = re.fullmatch(r'# dlb version: ([0-9a-z.+]+)\.', line)
if m:
description_by_tool['dlb'] = f'dlb {m.group(1)}'
continue
m = re.fullmatch(r'# GNU Make ([0-9a-z.]+).*', line)
if m:
description_by_tool['make'] = f'GNU Make {m.group(1)}'
continue
m = re.fullmatch(r'# SCons .* v([0-9][0-9a-z.]+).*', line)
if m:
v = m.group(1)
if len(v) > 10:
v = f'{v[:15]}...'
description_by_tool['scons'] = f'SCons {v}'
continue
raise ValueError(f'unexpected comment line: {line!r}')
fields = line.split(' ')
tool_name, number_of_libraries, number_of_classes_per_library = fields[:3]
configuration = tool_name, int(number_of_libraries), int(number_of_classes_per_library)
if len(fields) > 3:
t0, t1, t2, tpartial0, tpartial1 = [float(f) for f in fields[3:][:5]]
durations_by_configuration[configuration] = t0, t1, t2, tpartial0, tpartial1
else:
durations_by_configuration[configuration] = None # failed
description_by_tool['make2'] = description_by_tool['make']
description_by_tool['make'] = '{}\n+ makedepend (simplistic)'.format(description_by_tool['make'])
description_by_tool['dlb2'] = '{}\n(grouped)'.format(description_by_tool['dlb'])
description_by_tool['dlb3'] = '{}\n(hierarchical)'.format(description_by_tool['dlb'])
tools = ['make', 'make2', 'dlb', 'dlb2', 'dlb3', 'scons'] # as used in file *result_file_path*
colormap = plt.get_cmap("tab10")
style_by_tool = {
'make': (colormap(2), '-', 'x', 'none'),
'make2': (colormap(2), '-', 'v', 'full'),
'dlb': (colormap(0), '-', 'o', 'full'),
'dlb2': (colormap(0), '-', 'o', 'none'),
'dlb3': (colormap(0), '-', 'o', 'left'),
'scons': (colormap(1), '-', 's', 'full')
}
# vary number_of_classes_per_library
fig, axs = plt.subplots(2, 2, gridspec_kw={'hspace': 0})
number_of_libraries = 3
ncls = set(ncls for (t, nlib, ncls), v in durations_by_configuration.items())
fig.suptitle(f'{number_of_libraries} static libraries with {min(ncls)} to {max(ncls)} C++ source files each')
for tool in tools:
line_color, line_style, marker, marker_fillstyle = style_by_tool[tool]
# full build (maximum of first two runs)
x, y = zip(*[
(ncls, max(v[0], v[1]))
for (t, nlib, ncls), v in durations_by_configuration.items()
if v and nlib == number_of_libraries and t == tool
])
axs[0][0].plot(x, y, label=tool, color=line_color, linestyle=line_style,
marker=marker, fillstyle=marker_fillstyle)
axs[0][0].grid()
axs[0][0].set_xticklabels([])
axs[1][0].semilogy(x, y, label=tool, color=line_color, linestyle=line_style,
marker=marker, fillstyle=marker_fillstyle)
axs[1][0].grid(which='both')
axs[0][0].set_title('full build\n(each source file compiled, linked)')
axs[1][0].set_xlabel('number of source files per library')
axs[1][0].set_ylabel('duration (s)')
# partial build, vary number_of_classes_per_library
x, y = zip(*[
(ncls, v[4])
for (t, nlib, ncls), v in durations_by_configuration.items()
if v and nlib == 3 and t == tool
])
axs[0][1].plot(x, y, label=tool, color=line_color, linestyle=line_style,
marker=marker, fillstyle=marker_fillstyle)
axs[0][1].grid()
axs[0][1].set_xticklabels([])
axs[0][1].yaxis.tick_right()
axs[1][1].semilogy(x, y, label=tool, color=line_color, linestyle=line_style,
marker=marker, fillstyle=marker_fillstyle)
axs[1][1].grid(which='both')
axs[0][1].set_title(f'partial build\n(after one source file has been changed)')
axs[1][1].set_xlabel('number of source files per library')
axs[1][1].set_ylabel('duration (s)')
axs[1][1].yaxis.tick_right()
axs[1][1].yaxis.set_label_position("right")
axs[0][1].legend([description_by_tool[t] for t in tools], fancybox=True, framealpha=0.5)
safe_fig(fig, '1')
# vary number_of_libraries
fig, axs = plt.subplots(2, 2, gridspec_kw={'hspace': 0})
number_of_classes_per_library = 100
nlib = set(nlib for (t, nlib, ncls), v in durations_by_configuration.items())
fig.suptitle(f'{min(nlib)} to {max(nlib)} static libraries with {number_of_classes_per_library} C++ source files each')
for tool in style_by_tool:
line_color, line_style, marker, marker_fillstyle = style_by_tool[tool]
# full build (first run)
x, y = zip(*[
(nlib, v[0])
for (t, nlib, ncls), v in durations_by_configuration.items()
if v and ncls == number_of_classes_per_library and t == tool
])
axs[0][0].plot(x, y, label=tool, color=line_color, linestyle=line_style,
marker=marker, fillstyle=marker_fillstyle)
axs[0][0].grid()
axs[0][0].set_xticklabels([])
axs[1][0].semilogy(x, y, label=tool, color=line_color, linestyle=line_style,
marker=marker, fillstyle=marker_fillstyle)
axs[1][0].grid(which='both')
axs[0][0].set_title('full build\n(each source file compiled, linked)')
axs[1][0].set_xlabel('number of libraries')
axs[1][0].set_ylabel('duration (s)')
# partial build, vary number_of_classes_per_library
x, y = zip(*[
(nlib, v[4])
for (t, nlib, ncls), v in durations_by_configuration.items()
if v and ncls == number_of_classes_per_library and t == tool
])
axs[0][1].plot(x, y, label=tool, color=line_color, linestyle=line_style,
marker=marker, fillstyle=marker_fillstyle)
axs[0][1].grid()
axs[0][1].set_xticklabels([])
axs[0][1].yaxis.tick_right()
axs[1][1].semilogy(x, y, label=tool, color=line_color, linestyle=line_style,
marker=marker, fillstyle=marker_fillstyle)
axs[1][1].grid(which='both')
axs[0][1].set_title('partial build\n(after one source file has been changed)')
axs[1][1].set_xlabel('number of libraries')
axs[1][1].set_ylabel('duration (s)')
axs[1][1].yaxis.tick_right()
axs[1][1].yaxis.set_label_position("right")
axs[0][1].legend([description_by_tool[t] for t in tools], fancybox=True, framealpha=0.5)
safe_fig(fig, '2')
| gpl-3.0 |
euclides5414/ml_utils | ml_utils/ensemble/classes.py | 1 | 6134 | from __future__ import division, print_function
__author__ = 'Euclides Fernandes Filho <[email protected]>'
"""
ml_utils
Copyright (C) 2015 Euclides Fernandes Filho <[email protected]>
http://www.gnu.org/licenses/gpl-2.0.html#SEC4
"""
from ml_utils.utils.logger import get_loggers
logi, logd, logw, loge = get_loggers(__name__)
import pandas as pd
import numpy as np
import sklearn
import sklearn.base
from sklearn.metrics import make_scorer
from sklearn.preprocessing import StandardScaler
import ml_utils.utils as u
from ml_utils.cv import KFoldPred, KStratifiedPred
from ml_utils.utils import stringify2
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
SEED = 17
np.random.seed(SEED)
class KFoldEnsemble:
def __init__(self, x, y, models, ensemble_model, scoring=None, n_folds=3, random_state=SEED,
shuffle=False, n_jobs=-1, stratified=False, preprocessor=None, verbose=0,
ensemble_grid_params=None, score_greater_is_better=False):
assert isinstance(models, (list, tuple, set)), type(models)
assert isinstance(ensemble_model, sklearn.base.BaseEstimator), \
"%s != %s" % (type(ensemble_model), type(sklearn.base.BaseEstimator))
self.X = x
self.y = y
self.ensemble_model = ensemble_model
self.n_folds = n_folds
self.shuffle = shuffle
self.models = models
self.stratified = stratified
self.random_state = random_state
self.predictors_n_jobs = n_jobs
self.scoring = scoring
self.preprocessor = preprocessor
self.verbose = verbose
self.ensemble_scaler = None
self.score_greater_is_better = score_greater_is_better
self.base_predictor = KStratifiedPred if self.stratified else KFoldPred
# TODO n_jobs split ensemble and CV
self.true_col = "TRUE"
self.cols = map(lambda i: "%s" % stringify2(i[1], i[0]), enumerate(models))
self.predictions_cv = None
self.predictions = None
self.ensemble_grid_params = ensemble_grid_params
self.predictors = {}
def fit(self, external_cols=None):
self.predictors = {}
self.predictions_cv = pd.DataFrame()
for i_model, model in enumerate(self.models):
model_name = stringify2(model, i_model)
if self.verbose:
logd(model_name)
t0 = u.t()
i_predictor = self.base_predictor(self.X, self.y, model, scoring=self.scoring,
n_folds=self.n_folds, random_state=self.random_state,
shuffle=self.shuffle, n_jobs=self.predictors_n_jobs,
preprocessor=self.preprocessor, verbose=self.verbose)
col = model_name
i_predictor.fit()
i_prediction_cv = i_predictor.predict()
if not len(self.predictions_cv):
self.predictions_cv = i_prediction_cv.rename(columns={i_predictor.cv_col: col}) # [i_predictor.cv_col]
else:
df = i_prediction_cv[[i_predictor.cv_col]].rename(columns={i_predictor.cv_col: col})
# TODO assert index is not duplicate
self.predictions_cv = self.predictions_cv.merge(df, left_index=True, right_index=True)
i_predictor.fit_test()
self.predictors[model_name] = i_predictor
if self.verbose:
logd("Fit %s in %2.2f seconds" % (model_name, u.td(t0)))
self.fit_ensemble(external_cols=external_cols)
def fit_ensemble(self, external_cols=None):
t0 = u.t()
_x = self.predictions_cv[self.cols] if self.predictions_cv is not None else pd.DataFrame()
if external_cols is not None:
if not isinstance(external_cols, pd.DataFrame):
external_cols = pd.DataFrame(external_cols)
for col in external_cols.columns:
_x["ADD_%s" % col] = external_cols[col]
_y = self.predictions_cv[self.true_col]
self.ensemble_scaler = StandardScaler()
x = self.ensemble_scaler.fit_transform(_x)
if self.ensemble_grid_params:
scorer = make_scorer(self.scoring, greater_is_better=self.score_greater_is_better)
self.ensemble_model, _ = \
u.get_best_model(self.ensemble_model, self.ensemble_grid_params, x, _y,
scoring=scorer, cv=self.n_folds, refit=True)
else:
self.ensemble_model.fit(x, _y)
if self.verbose:
logd("Fit Ensemble in %2.2f seconds" % u.td(t0))
self.predictions_cv["ENS"] = self.ensemble_model.predict(x)
self.predictions_cv = self.predictions_cv[self.cols + ["ENS", self.true_col]]
def predict(self, x, external_cols=None):
if not isinstance(x, pd.DataFrame):
x = pd.DataFrame(x)
self.predictions = pd.DataFrame(columns=self.cols, index=x.index) \
if self.predictions_cv is not None else pd.DataFrame()
for model_name, i_predictor in self.predictors.iteritems():
i_prediction = i_predictor.predict_test(x)
self.predictions[model_name] = i_prediction
if external_cols is not None:
if not isinstance(external_cols, pd.DataFrame):
external_cols = pd.DataFrame(external_cols)
for col in external_cols.columns:
self.predictions["ADD_%s" % col] = external_cols[col]
return self.ensemble_model.predict(self.ensemble_scaler.transform(self.predictions))
def pickle(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self, f)
@staticmethod
def from_pickle(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
def score_ensemble(self):
if self.scoring:
df = self.predictions_cv
for col in df.columns:
if col == "TRUE":
continue
logd(col, self.scoring(df[col], df["TRUE"]))
| gpl-2.0 |
craigmbooth/instagram-map | src/instagram_map_visualize.py | 1 | 9879 | import os
import argparse
import pymongo
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.patches as patches
import datetime
import logging
from configs import (cameras, ValidRegions)
from extract_data import (add_timezone_info, read_results_from_mongo)
from utils import TimedLogger
def fix_opacity_and_color_map(x, opacity_thresh=0.1, max_opacity=0.8):
""""Given a 2d image, x, get rgbt values for each pixel and modify
opacity values"""
tmp = cm.hot(x)
for i in xrange(tmp.shape[0]):
for j in xrange(tmp.shape[1]):
# XXXXXXXXXXXX
tmp[i,j][0] *= 2
tmp[i,j][1] *= 2
tmp[i,j][2] *= 2
# ############
if x[i,j] > opacity_thresh:
tmp[i,j][3] = max_opacity
else:
tmp[i,j][3] = max_opacity * x[i,j] / opacity_thresh
return tmp
def add_time_labels(fig, target_time, background_color="#FFFFFF"):
"""Adds time labels to the plot"""
#Axes with range [0,1] to allow for easy absolute positioning of text
ax = fig.add_axes([0,0,1,1])
# axes coordinates are 0,0 is bottom left and 1,1 is upper right
p = patches.Rectangle((0, 0), 1, 1, fill=True, transform=ax.transAxes,
clip_on=False, color=background_color)
ax.add_patch(p)
font_kwargs = {"color": "white", "fontsize": 40, "transform": ax.transAxes}
gmt_time = target_time - datetime.timedelta(hours=0)
ax.text(0.5, 0.02, gmt_time.strftime("%I%p GMT"),
horizontalalignment='center',
verticalalignment='bottom',
**font_kwargs)
eastern_time = target_time - datetime.timedelta(hours=4)
ax.text(0.02, 0.02, eastern_time.strftime("%I%p EST"),
horizontalalignment='left',
verticalalignment='bottom',
**font_kwargs)
aest_time = target_time + datetime.timedelta(hours=10)
ax.text(0.98, 0.02, aest_time.strftime("%I%p AEST"),
horizontalalignment='right',
verticalalignment='bottom',
**font_kwargs)
ax.set_axis_off()
def make_single_map(target_time, camera, lons, lats, weights, gauss_sigma=1,
sea_color="#111111", land_color="#888888", nheatmapbins=500,
file_prefix="USA", opacity_thresh=0.1, max_opacity=0.8,
calc_norm_map=False, norm_map=None, do_map_normalization=False):
"""Makes a single image"""
fig = plt.gcf()
fig = plt.figure()
fig.set_size_inches(16,9)
plt.clf()
add_time_labels(fig, target_time, background_color=sea_color)
m = Basemap(
projection=camera["projection"],
resolution=camera["resolution"],
**camera["projection_kwargs"])
m.drawcoastlines(color=sea_color)
m.drawcountries(color=sea_color)
m.drawstates(color=sea_color)
m.drawmapboundary(fill_color=sea_color)
m.fillcontinents(color=land_color,lake_color=sea_color)
xpoints,ypoints = m(lons,lats)
im, xedges, yedges = np.histogram2d(xpoints, ypoints,
range=((m.llcrnrx, m.urcrnrx),
(m.llcrnry, m.urcrnry)),
bins=(nheatmapbins, (9./16.)*nheatmapbins),
weights=weights)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
im = np.log(np.rot90(im)+1)
if calc_norm_map:
return im
if do_map_normalization:
im = np.divide(im, norm_map)
im[np.isnan(im)] = 0.0
if not calc_norm_map:
im = gaussian_filter(im, gauss_sigma)
plt.imshow(fix_opacity_and_color_map(im, max_opacity=max_opacity,
opacity_thresh=opacity_thresh),
extent=extent, zorder=10)
logging.getLogger().info("Saving file : "+file_prefix+".png")
plt.savefig(file_prefix+".png")
def calculate_point_weights(points, time, decay_hours=1):
"""For each of the lat and lng points calculate a weight.
Point weights should drop off as the associated time falls further into
the past"""
lat = []
lon = []
weights = []
for res in points:
image_time = res["created_time"]
#Time to the nearest minute from the photo being taken to now:
#+ve times mean that the photo was taken earlier than now.
dt_in_minutes = (60*(time.hour - image_time.hour) +
(time.minute-image_time.minute))
if dt_in_minutes < 0:
dt_in_minutes = dt_in_minutes + 24*60
weight = np.exp(-float(dt_in_minutes)/60/decay_hours)
lat.append(res["latitude"])
lon.append(res["longitude"])
weights.append(weight)
return lat, lon, weights
def make_map_sequence(args, full_results, calc_norm_map=False,
do_map_normalization=False, aggregate_norm_frame=None):
"""Generate heatmap frames and do one of two things, if ``calc_norm_map`` is
False then save an image of the heatmap overlaid on a Basemap. If
``calc_norm_map`` is True then calculate the heatmap and return it for use
in an integrated normalization map.
args: argparse parsed arguments for this program
full_results: Full dump of the MongoDB results
"""
timedelta = datetime.timedelta(minutes=args.minutes_step)
target_time = datetime.datetime(2014, 1, 1, 0,0,0)
while target_time.day == 1:
target_time += timedelta
lat, lon, weights = calculate_point_weights(full_results,
target_time, decay_hours=1)
file_prefix = os.path.join(args.data_dir,
args.region+target_time.strftime("%H%M"))
map_kwargs = {
"gauss_sigma": 1,
"file_prefix": file_prefix,
"opacity_thresh": opacity_thresh,
"max_opacity": max_opacity,
"calc_norm_map": calc_norm_map,
"do_map_normalization": do_map_normalization,
"norm_map": aggregate_norm_frame
}
with TimedLogger("Generating frame with prefix %s" % file_prefix,
logging.getLogger()):
norm_frame = make_single_map(target_time, cameras[args.region], lon,
lat, weights, **map_kwargs)
if calc_norm_map:
if aggregate_norm_frame is None:
aggregate_norm_frame = norm_frame
else:
aggregate_norm_frame = np.add(aggregate_norm_frame,
norm_frame)
return aggregate_norm_frame
if __name__ == "__main__":
"""If run directly from the command line, parse arguments and write
movie frames"""
parser = argparse.ArgumentParser(description="Code that reads points from "
"a MongoDB database, and makes movie frames showing how the points "
"are distributed on a map")
parser.add_argument('--minutes_step', type=int, default=60,
help='number of minutes to advance for each frame')
parser.add_argument('--region', type=ValidRegions, default="World",
help='Select which of the configs in config.py to use, by '
'dictionary key. Allowed values : \n%s' % cameras.keys())
parser.add_argument('--add_timezones', action="count",
help='If present then add timezone offsets to any DB '
'elements that are missing them')
parser.add_argument('--data_dir', type=str, default="data",
help='Path to store output images')
parser.add_argument('--logfile', type=str, default="instagram_map.log",
help='Name of logfile')
parser.add_argument('--normalize_map', action="count",
help='If present, then generate the map twice, and '
'use the maximum values for each pixel generated in the '
'normalized map to rescale the output on the second run '
'through')
args = parser.parse_args()
logging.basicConfig(filename=os.path.join(args.data_dir, args.logfile),
level=logging.DEBUG)
logging.basicConfig(format='%(asctime)s %(message)s')
# Generate up optional values from the config:
max_opacity = cameras[args.region].get("max_opacity", 0.8)
opacity_thresh = cameras[args.region].get("opacity_thresh", 0.1)
logging.getLogger().info("Setting region=%s" % args.region)
logging.getLogger().info("Setting max_opacity=%f" % max_opacity)
logging.getLogger().info("Setting opacity_thresh=%f" % opacity_thresh)
if args.add_timezones:
with TimedLogger("Adding missing timezone info", logging.getLogger()):
add_timezone_info()
with TimedLogger("Reading full dataset from MongoDB", logging.getLogger()):
full_results = read_results_from_mongo()
# If normalize_map is set then do a first run through an generate an
# integrated map for normalization purposes
full_norm_frame = None
if args.normalize_map:
with TimedLogger("Making normalization map", logging.getLogger()):
normalization_map = make_map_sequence(args, full_results,
calc_norm_map=True,
aggregate_norm_frame=full_norm_frame)
# Write movie frames
with TimedLogger("Writing movie frames", logging.getLogger()):
if args.normalize_map:
make_map_sequence(args, full_results, do_map_normalization=True,
aggregate_norm_frame=normalization_map)
else:
make_map_sequence(args, full_results)
logging.getLogger().info("instagram_map_visualize is COMPLETE")
| gpl-2.0 |
phdowling/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
milankl/swm | calc/misc/tempautocorr_plot_bf.py | 1 | 3288 | ## TEMPAUTOCORR PLOT
from __future__ import print_function
path = '/home/mkloewer/python/swm/'
import os; os.chdir(path) # change working directory
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import cumtrapz
## read data
runpath1 = path+'data/run%04i' % 0
D1 = np.load(runpath1+'/analysis/acfs.npy').all()
param1 = np.load(runpath1+'/param.npy').all()
runpath2 = path+'data/run%04i' % 6
D2 = np.load(runpath2+'/analysis/acfs.npy').all()
param2 = np.load(runpath2+'/param.npy').all()
runpath3 = path+'data/run%04i' % 10
D3 = np.load(runpath3+'/analysis/acfs.npy').all()
param3 = np.load(runpath3+'/param.npy').all()
runpath4 = path+'data/run%04i' % 14
D4 = np.load(runpath4+'/analysis/acfs.npy').all()
param4 = np.load(runpath4+'/param.npy').all()
runpath5 = path+'data/run%04i' % 15
D5 = np.load(runpath5+'/analysis/acfs.npy').all()
param5 = np.load(runpath5+'/param.npy').all()
# read without bottom friction data
# runpath1 = path+'data/newold/run%04i' % 3
# D1 = np.load(runpath1+'/analysis/acfs.npy').all()
# param1 = np.load(runpath1+'/param.npy').all()
#
# runpath2 = path+'data/newold/run%04i' % 10
# D2 = np.load(runpath2+'/analysis/acfs.npy').all()
# param2 = np.load(runpath2+'/param.npy').all()
#
# runpath3 = path+'stoch/data/run%04i' % 13
# D3 = np.load(runpath3+'/analysis/acfs.npy').all()
# param3 = np.load(runpath3+'/param.npy').all()
#
# runpath4 = path+'stoch/data/run%04i' % 12
# D4 = np.load(runpath4+'/analysis/acfs.npy').all()
# param4 = np.load(runpath4+'/param.npy').all()
#
# runpath5 = path+'stoch/data/run%04i' % 14
# D5 = np.load(runpath5+'/analysis/acfs.npy').all()
# param5 = np.load(runpath5+'/param.npy').all()
## Plotting
fig,axs = plt.subplots(2,3,sharex=True,sharey=True,figsize=(9,6))
plt.tight_layout(rect=[0.05,0.02,1,0.96])
fig.subplots_adjust(wspace=0.05,hspace=0.26)
for i in range(3):
for j in range(2):
axs[j,i].plot(D1['time'],D1['acfs'][:,i,j],'C0',label=r'Low resolution, $\Delta x = $30km',lw=2)
axs[j,i].plot(D2['time'],D2['acfs'][:,i,j],'C2',label=r'High resolution, $\Delta x = $7.5km',lw=2)
axs[j,i].plot(D3['time'],D3['acfs'][:,i,j],'C3',label=r'LR + weak backscatter',lw=1,ls='--')
axs[j,i].plot(D4['time'],D4['acfs'][:,i,j],'C1',label=r'LR + moderate backscatter',lw=1,ls='--')
axs[j,i].plot(D5['time'],D5['acfs'][:,i,j],'C5',label=r'LR + strong backscatter',lw=1,ls='--')
axs[j,i].plot([0,50],[0,0],'C7',alpha=.5)
axs[0,0].set_xlim(0,49)
axs[0,0].set_ylim(-0.9,1)
axs[1,1].legend(bbox_to_anchor=(-1, 1.02, 3., .102), loc=3, fontsize=9, ncol=3, mode="expand", borderaxespad=0.)
axs[0,0].set_title(r'Zonal velocity $u$',loc='left')
axs[0,1].set_title(r'Meridional velocity $v$',loc='left')
axs[0,2].set_title(r'Surface displacement $\eta$',loc='left')
axs[1,0].set_xlabel('lag [days]')
axs[1,1].set_xlabel('lag [days]')
axs[1,2].set_xlabel('lag [days]')
axs[0,0].set_ylabel('Autocorrelation \n Point A')
axs[1,0].set_ylabel('Autocorrelation \n Point B')
axs[0,0].set_yticks([-0.5,0,0.5,1])
abc = 'abcdef'
abci = 0
for axcol in axs:
for ax in axcol:
plt.text(0.93,0.93,abc[abci],transform=ax.transAxes,fontweight='bold')
abci += 1
plt.savefig(path+'compare/autocorrelation_bf.pdf')
plt.close(fig)
#plt.show() | gpl-3.0 |