repo_name
stringlengths 6
96
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 762
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
pascalgutjahr/Praktikum-1 | Schwingung/phaselinear.py | 1 | 1829 | import numpy as np
import uncertainties.unumpy as unp
from uncertainties.unumpy import (nominal_values as noms, std_devs as stds)
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.optimize import curve_fit
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 13
plt.rcParams['lines.linewidth'] = 1
csfont = {'fontname': 'Times New Roman'}
# lineare Darstellung
fre, t = np.genfromtxt('tables/phase.txt', unpack=True, skip_header=2)
fre *= 1000
t /= 1e6
phirad = 2 * np.pi * fre * t
#Theoriekurve
L = 3.53 * (10**-3)
C = 5.015 * (10**-9)
w = fre * 2 * np.pi
R = 271.6
# fre = np.linspace(np.log(15),np.log(20))
phi = np.arctan((w * R * C)/(1 - (L * C * (w**2))))
# bis zur Resonanz plotten
#plt.plot(fre/1000, phi, 'b-', label='Theoriekurve')
# plt.plot(fre/1000, -phi, 'b-', label='Theoriekurve')
fre_theo = np.linspace(15000, 37500, 100)
phi_theo = np.arctan((2*np.pi*fre_theo * R * C)/(1 - (L * C * ((2*np.pi*fre_theo)**2))))
fre_theo2 = np.linspace(38000, 55000, 100)
phi_theo2 = np.arctan((2*np.pi*fre_theo2 * R * C)/(1 - (L * C * ((2*np.pi*fre_theo2)**2))))+np.pi
plt.plot(fre_theo/1000, phi_theo, 'b-', label='Theoriekurve')
plt.plot(fre_theo2/1000, phi_theo2, 'b-')
plt.plot(fre/1000, phirad, 'rx', label='Messwerte')
plt.plot((32.196, 32.196), (0.5, 2.5), 'g--', label='untere/obere Grenzfrequenz')
plt.plot((44.442, 44.442), (0.5, 2.5), 'g--')
plt.plot((36.822, 36.822), (0.5, 2.5), 'k--', label='Resonanzfrequenz')
# plt.xlim(30, 45)
plt.yticks(np.arange(0, np.pi, np.pi/4), ['$0$','$\mathrm{\pi}/4$','$\mathrm{\pi}/2$', '$3\mathrm{\pi}/4$'])
# plt.ylim(min(phirad)-5, max(phirad)+5)
plt.xlabel(r'$\mathrm{\nu} \,/\, \mathrm{kHz}$')
plt.ylabel(r'$\mathrm{\varphi}$')
plt.legend(loc='lower right')
plt.grid()
plt.tight_layout()
plt.savefig('Bilder/phaselinear.pdf')
plt.show()
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tseries/tdi.py | 7 | 32620 | """ implement the TimedeltaIndex """
from datetime import timedelta
import numpy as np
from pandas.types.common import (_TD_DTYPE,
is_integer, is_float,
is_bool_dtype,
is_list_like,
is_scalar,
is_integer_dtype,
is_object_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
_ensure_int64)
from pandas.types.missing import isnull
from pandas.types.generic import ABCSeries
from pandas.core.common import _maybe_box, _values_from_object
from pandas.core.index import Index, Int64Index
import pandas.compat as compat
from pandas.compat import u
from pandas.tseries.frequencies import to_offset
from pandas.core.base import _shared_docs
from pandas.core.nanops import _checked_add_with_arr
from pandas.indexes.base import _index_shared_docs
import pandas.core.common as com
import pandas.types.concat as _concat
from pandas.util.decorators import Appender, Substitution
from pandas.tseries.base import TimelikeOps, DatetimeIndexOpsMixin
from pandas.tseries.timedeltas import (to_timedelta,
_coerce_scalar_to_timedelta_type)
from pandas.tseries.offsets import Tick, DateOffset
import pandas.lib as lib
import pandas.tslib as tslib
import pandas._join as _join
import pandas.index as _index
Timedelta = tslib.Timedelta
def _td_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert timedelta-like to timedelta64
"""
def wrapper(self, other):
msg = "cannot compare a TimedeltaIndex with type {0}"
func = getattr(super(TimedeltaIndex, self), opname)
if _is_convertible_to_td(other) or other is tslib.NaT:
try:
other = _to_m8(other)
except ValueError:
# failed to parse as timedelta
raise TypeError(msg.format(type(other)))
result = func(other)
if isnull(other):
result.fill(nat_result)
else:
if not is_list_like(other):
raise TypeError(msg.format(type(other)))
other = TimedeltaIndex(other).values
result = func(other)
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == tslib.iNaT
else:
o_mask = other.view('i8') == tslib.iNaT
if o_mask.any():
result[o_mask] = nat_result
if self.hasnans:
result[self._isnan] = nat_result
# support of bool dtype indexers
if is_bool_dtype(result):
return result
return Index(result)
return wrapper
class TimedeltaIndex(DatetimeIndexOpsMixin, TimelikeOps, Int64Index):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with
unit: unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
which is an integer/float number
freq: a frequency for the index, optional
copy : bool
Make a copy of input ndarray
start : starting value, timedelta-like, optional
If data is None, start is used as the start point in generating regular
timedelta data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, timedelta-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
name : object
Name to be stored in the index
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
_typ = 'timedeltaindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(
joinf, dtype='m8[ns]', **kwargs)
_inner_indexer = _join_i8_wrapper(_join.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_join.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_join.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_join.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
_datetimelike_ops = ['days', 'seconds', 'microseconds', 'nanoseconds',
'freq', 'components']
__eq__ = _td_index_cmp('__eq__')
__ne__ = _td_index_cmp('__ne__', nat_result=True)
__lt__ = _td_index_cmp('__lt__')
__gt__ = _td_index_cmp('__gt__')
__le__ = _td_index_cmp('__le__')
__ge__ = _td_index_cmp('__ge__')
_engine_type = _index.TimedeltaEngine
_comparables = ['name', 'freq']
_attributes = ['name', 'freq']
_is_numeric_dtype = True
_infer_as_myclass = True
freq = None
def __new__(cls, data=None, unit=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None,
closed=None, verify_integrity=True, **kwargs):
if isinstance(data, TimedeltaIndex) and freq is None and name is None:
if copy:
return data.copy()
else:
return data._shallow_copy()
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, freq,
closed=closed)
if unit is not None:
data = to_timedelta(data, unit=unit, box=False)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if is_scalar(data):
raise ValueError('TimedeltaIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# convert if not already
if getattr(data, 'dtype', None) != _TD_DTYPE:
data = to_timedelta(data, unit=unit, box=False)
elif copy:
data = np.array(data, copy=True)
# check that we are matching freqs
if verify_integrity and len(data) > 0:
if freq is not None and not freq_infer:
index = cls._simple_new(data, name=name)
inferred = index.inferred_freq
if inferred != freq.freqstr:
on_freq = cls._generate(
index[0], None, len(index), name, freq)
if not np.array_equal(index.asi8, on_freq.asi8):
raise ValueError('Inferred frequency {0} from passed '
'timedeltas does not conform to '
'passed frequency {1}'
.format(inferred, freq.freqstr))
index.freq = freq
return index
if freq_infer:
index = cls._simple_new(data, name=name)
inferred = index.inferred_freq
if inferred:
index.freq = to_offset(inferred)
return index
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate(cls, start, end, periods, name, offset, closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Must specify two of start, end, or periods')
if start is not None:
start = Timedelta(start)
if end is not None:
end = Timedelta(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
index = _generate_regular_range(start, end, periods, offset)
index = cls._simple_new(index, name=name, freq=offset)
if not left_closed:
index = index[1:]
if not right_closed:
index = index[:-1]
return index
@property
def _box_func(self):
return lambda x: Timedelta(x, unit='ns')
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
if not getattr(values, 'dtype', None):
values = np.array(values, copy=False)
if values.dtype == np.object_:
values = tslib.array_to_timedelta64(values)
if values.dtype != _TD_DTYPE:
values = _ensure_int64(values).view(_TD_DTYPE)
result = object.__new__(cls)
result._data = values
result.name = name
result.freq = freq
result._reset_identity()
return result
@property
def _formatter_func(self):
from pandas.formats.format import _get_format_timedelta64
return _get_format_timedelta64(self, box=True)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(TimedeltaIndex, self).__setstate__(state)
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
def _add_delta(self, delta):
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
name = self.name
elif isinstance(delta, TimedeltaIndex):
new_values = self._add_delta_tdi(delta)
# update name when delta is index
name = com._maybe_match_name(self, delta)
else:
raise ValueError("cannot add the type {0} to a TimedeltaIndex"
.format(type(delta)))
result = TimedeltaIndex(new_values, freq='infer', name=name)
return result
def _evaluate_with_timedelta_like(self, other, op, opstr):
# allow division by a timedelta
if opstr in ['__div__', '__truediv__']:
if _is_convertible_to_td(other):
other = Timedelta(other)
if isnull(other):
raise NotImplementedError(
"division by pd.NaT not implemented")
i8 = self.asi8
result = i8 / float(other.value)
result = self._maybe_mask_results(result, convert='float64')
return Index(result, name=self.name, copy=False)
return NotImplemented
def _add_datelike(self, other):
# adding a timedeltaindex to a datetimelike
from pandas import Timestamp, DatetimeIndex
if other is tslib.NaT:
result = self._nat_new(box=False)
else:
other = Timestamp(other)
i8 = self.asi8
result = _checked_add_with_arr(i8, other.value)
result = self._maybe_mask_results(result, fill_value=tslib.iNaT)
return DatetimeIndex(result, name=self.name, copy=False)
def _sub_datelike(self, other):
from pandas import DatetimeIndex
if other is tslib.NaT:
result = self._nat_new(box=False)
else:
raise TypeError("cannot subtract a datelike from a TimedeltaIndex")
return DatetimeIndex(result, name=self.name, copy=False)
def _format_native_types(self, na_rep=u('NaT'),
date_format=None, **kwargs):
from pandas.formats.format import Timedelta64Formatter
return Timedelta64Formatter(values=self,
nat_rep=na_rep,
justify='all').get_result()
def _get_field(self, m):
values = self.asi8
hasnans = self.hasnans
if hasnans:
result = np.empty(len(self), dtype='float64')
mask = self._isnan
imask = ~mask
result.flat[imask] = np.array(
[getattr(Timedelta(val), m) for val in values[imask]])
result[mask] = np.nan
else:
result = np.array([getattr(Timedelta(val), m)
for val in values], dtype='int64')
return result
@property
def days(self):
""" Number of days for each element. """
return self._get_field('days')
@property
def seconds(self):
""" Number of seconds (>= 0 and less than 1 day) for each element. """
return self._get_field('seconds')
@property
def microseconds(self):
"""
Number of microseconds (>= 0 and less than 1 second) for each
element. """
return self._get_field('microseconds')
@property
def nanoseconds(self):
"""
Number of nanoseconds (>= 0 and less than 1 microsecond) for each
element.
"""
return self._get_field('nanoseconds')
@property
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = ['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds', 'nanoseconds']
hasnans = self.hasnans
if hasnans:
def f(x):
if isnull(x):
return [np.nan] * len(columns)
return x.components
else:
def f(x):
return x.components
result = DataFrame([f(x) for x in self])
result.columns = columns
if not hasnans:
result = result.astype('int64')
return result
def total_seconds(self):
"""
Total duration of each element expressed in seconds.
.. versionadded:: 0.17.0
"""
return self._maybe_mask_results(1e-9 * self.asi8)
def to_pytimedelta(self):
"""
Return TimedeltaIndex as object ndarray of datetime.timedelta objects
Returns
-------
datetimes : ndarray
"""
return tslib.ints_to_pytimedelta(self.asi8)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = np.dtype(dtype)
if is_object_dtype(dtype):
return self.asobject
elif is_timedelta64_ns_dtype(dtype):
if copy is True:
return self.copy()
return self
elif is_timedelta64_dtype(dtype):
# return an index (essentially this is division)
result = self.values.astype(dtype, copy=copy)
if self.hasnans:
return Index(self._maybe_mask_results(result,
convert='float64'),
name=self.name)
return Index(result.astype('i8'), name=self.name)
elif is_integer_dtype(dtype):
return Index(self.values.astype('i8', copy=copy), dtype='i8',
name=self.name)
raise ValueError('Cannot cast TimedeltaIndex to dtype %s' % dtype)
def union(self, other):
"""
Specialized union for TimedeltaIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
this, other = self, other
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, TimedeltaIndex):
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if _is_convertible_to_index(other):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
return Index.join(self, other, how=how, level=level,
return_indexers=return_indexers)
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, TimedeltaIndex) and self.freq == other.freq and
self._can_fast_union(other)):
joined = self._shallow_copy(joined, name=name)
return joined
else:
return self._simple_new(joined, name)
def _can_fast_union(self, other):
if not isinstance(other, TimedeltaIndex):
return False
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = _concat._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self._simple_new(result, name=name, freq=None)
def intersection(self, other):
"""
Specialized intersection for TimedeltaIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _possibly_promote(self, other):
if other.inferred_type == 'timedelta':
other = TimedeltaIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if _is_convertible_to_td(key):
key = Timedelta(key)
return self.get_value_maybe_box(series, key)
try:
return _maybe_box(self, Index.get_value(self, series, key),
series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
if not isinstance(key, Timedelta):
key = Timedelta(key)
values = self._engine.get_value(_values_from_object(series), key)
return _maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if isnull(key):
key = tslib.NaT
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance)
if _is_convertible_to_td(key):
key = Timedelta(key)
return Index.get_loc(self, key, method, tolerance)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timedelta(key)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to timedelta according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
"""
assert kind in ['ix', 'loc', 'getitem', None]
if isinstance(label, compat.string_types):
parsed = _coerce_scalar_to_timedelta_type(label, box=True)
lbound = parsed.round(parsed.resolution)
if side == 'left':
return lbound
else:
return (lbound + to_offset(parsed.resolution) -
Timedelta(1, 'ns'))
elif is_integer(label) or is_float(label):
self._invalid_indexer('slice', label)
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
if is_integer(key) or is_float(key) or key is tslib.NaT:
self._invalid_indexer('slice', key)
loc = self._partial_td_slice(key, freq, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def _partial_td_slice(self, key, freq, use_lhs=True, use_rhs=True):
# given a key, try to figure out a location for a partial slice
if not isinstance(key, compat.string_types):
return key
raise NotImplementedError
# TODO(wesm): dead code
# parsed = _coerce_scalar_to_timedelta_type(key, box=True)
# is_monotonic = self.is_monotonic
# # figure out the resolution of the passed td
# # and round to it
# # t1 = parsed.round(reso)
# t2 = t1 + to_offset(parsed.resolution) - Timedelta(1, 'ns')
# stamps = self.asi8
# if is_monotonic:
# # we are out of range
# if (len(stamps) and ((use_lhs and t1.value < stamps[0] and
# t2.value < stamps[0]) or
# ((use_rhs and t1.value > stamps[-1] and
# t2.value > stamps[-1])))):
# raise KeyError
# # a monotonic (sorted) series can be sliced
# left = (stamps.searchsorted(t1.value, side='left')
# if use_lhs else None)
# right = (stamps.searchsorted(t2.value, side='right')
# if use_rhs else None)
# return slice(left, right)
# lhs_mask = (stamps >= t1.value) if use_lhs else True
# rhs_mask = (stamps <= t2.value) if use_rhs else True
# # try to find a the dates
# return (lhs_mask & rhs_mask).nonzero()[0]
@Substitution(klass='TimedeltaIndex', value='key')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, key, side='left', sorter=None):
if isinstance(key, (np.ndarray, Index)):
key = np.array(key, dtype=_TD_DTYPE, copy=False)
else:
key = _to_m8(key)
return self.values.searchsorted(key, side=side, sorter=sorter)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'timedelta'
@property
def inferred_type(self):
return 'timedelta64'
@property
def dtype(self):
return _TD_DTYPE
@property
def is_all_dates(self):
return True
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
# try to convert if possible
if _is_convertible_to_td(item):
try:
item = Timedelta(item)
except:
pass
freq = None
if isinstance(item, (Timedelta, tslib.NaTType)):
# check freq can be preserved on edge cases
if self.freq is not None:
if ((loc == 0 or loc == -len(self)) and
item + self.freq == self[0]):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item)
try:
new_tds = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError(
"cannot insert TimedeltaIndex with incompatible label")
def delete(self, loc):
"""
Make a new DatetimeIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : TimedeltaIndex
"""
new_tds = np.delete(self.asi8, loc)
freq = 'infer'
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
_ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
TimedeltaIndex._add_numeric_methods()
TimedeltaIndex._add_logical_methods_disabled()
TimedeltaIndex._add_datetimelike_methods()
def _is_convertible_to_index(other):
"""
return a boolean whether I can attempt conversion to a TimedeltaIndex
"""
if isinstance(other, TimedeltaIndex):
return True
elif (len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer', 'integer',
'mixed-integer-float', 'mixed')):
return True
return False
def _is_convertible_to_td(key):
return isinstance(key, (DateOffset, timedelta, Timedelta,
np.timedelta64, compat.string_types))
def _to_m8(key):
"""
Timedelta-like => dt64
"""
if not isinstance(key, Timedelta):
# this also converts strings
key = Timedelta(key)
# return an type that can be compared
return np.int64(key.value).view(_TD_DTYPE)
def _generate_regular_range(start, end, periods, offset):
stride = offset.nanos
if periods is None:
b = Timedelta(start).value
e = Timedelta(end).value
e += stride - e % stride
elif start is not None:
b = Timedelta(start).value
e = b + periods * stride
elif end is not None:
e = Timedelta(end).value + stride
b = e - periods * stride
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
data = np.arange(b, e, stride, dtype=np.int64)
data = TimedeltaIndex._simple_new(data, None)
return data
def timedelta_range(start=None, end=None, periods=None, freq='D',
name=None, closed=None):
"""
Return a fixed frequency timedelta index, with day as the default
frequency
Parameters
----------
start : string or timedelta-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
name : str, default None
Name of the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Returns
-------
rng : TimedeltaIndex
Notes
-----
2 of start, end, or periods must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return TimedeltaIndex(start=start, end=end, periods=periods,
freq=freq, name=name,
closed=closed)
| gpl-3.0 |
vinhqdang/my_mooc | coursera/advanced_machine_learning_spec/4_nlp/natural-language-processing-master/project/dialogue_manager.py | 1 | 3010 | import os
from sklearn.metrics.pairwise import pairwise_distances_argmin
from chatterbot import ChatBot
from utils import *
class ThreadRanker(object):
def __init__(self, paths):
self.word_embeddings, self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS'])
self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER']
def __load_embeddings_by_tag(self, tag_name):
embeddings_path = os.path.join(self.thread_embeddings_folder, tag_name + ".pkl")
thread_ids, thread_embeddings = unpickle_file(embeddings_path)
return thread_ids, thread_embeddings
def get_best_thread(self, question, tag_name):
""" Returns id of the most similar thread for the question.
The search is performed across the threads with a given tag.
"""
thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name)
# HINT: you have already implemented a similar routine in the 3rd assignment.
question_vec = #### YOUR CODE HERE ####
best_thread = #### YOUR CODE HERE ####
return thread_ids[best_thread]
class DialogueManager(object):
def __init__(self, paths):
print("Loading resources...")
# Intent recognition:
self.intent_recognizer = unpickle_file(paths['INTENT_RECOGNIZER'])
self.tfidf_vectorizer = unpickle_file(paths['TFIDF_VECTORIZER'])
self.ANSWER_TEMPLATE = 'I think its about %s\n This thread might help you: https://stackoverflow.com/questions/%s'
# Goal-oriented part:
self.tag_classifier = unpickle_file(paths['TAG_CLASSIFIER'])
self.thread_ranker = ThreadRanker(paths)
def create_chitchat_bot(self):
"""Initializes self.chitchat_bot with some conversational model."""
# Hint: you might want to create and train chatterbot.ChatBot here.
########################
#### YOUR CODE HERE ####
########################
def generate_answer(self, question):
"""Combines stackoverflow and chitchat parts using intent recognition."""
# Recognize intent of the question using `intent_recognizer`.
# Don't forget to prepare question and calculate features for the question.
prepared_question = #### YOUR CODE HERE ####
features = #### YOUR CODE HERE ####
intent = #### YOUR CODE HERE ####
# Chit-chat part:
if intent == 'dialogue':
# Pass question to chitchat_bot to generate a response.
response = #### YOUR CODE HERE ####
return response
# Goal-oriented part:
else:
# Pass features to tag_clasifier to get predictions.
tag = #### YOUR CODE HERE ####
# Pass prepared_question to thread_ranker to get predictions.
thread_id = #### YOUR CODE HERE ####
return self.ANSWER_TEMPLATE % (tag, thread_id)
| mit |
czhengsci/veidt | veidt/metrics.py | 1 | 1250 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from sklearn.metrics import mean_squared_error, mean_absolute_error
from veidt.utils.general_utils import deserialize_veidt_object
from veidt.utils.general_utils import serialize_veidt_object
def binary_accuracy(y_true, y_pred):
return np.mean(np.array(y_true).ravel() == np.array(y_pred).ravel())
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
def serialize(metric):
return serialize_veidt_object(metric)
def deserialize(config):
return deserialize_veidt_object(config,
module_objects=globals(),
printable_module_name='metric function')
def get(identifier):
if isinstance(identifier, dict):
config = {'class_name': identifier['class_name'], 'config': identifier['config']}
return deserialize(config)
elif isinstance(identifier, six.string_types):
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'metric function identifier:', identifier)
| bsd-3-clause |
pepper-johnson/Erudition | Thesis/Processing/Pipeline/reddit_slim_comments.py | 1 | 1815 | import json
import datetime
import pandas as pd
# ***********
# Methods:
# ***********
def get_config(config_file):
assert type(config_file) == str
with open(config_file) as f:
config = json.load(f)
return config
# ********
# Main:
# - purpose: take all reddit comment files that were produced by the bot and slim/sort them.
# prepare comments for edge processing.
# ********
config = get_config('reddit_slim_comments.json')
directory = config['directory']
subreddit = config['subreddit']
r = config['range']
print()
print("configurations:")
print("config", str(config))
print()
print("Starting at", str(datetime.datetime.now()))
print()
all_comments = [ ]
for index in range(r[0], r[1]+1):
file = subreddit + '_comments_' + str(index) + '.csv'
df = pd.read_csv(directory + file, index_col='index', header=0, low_memory=False)
df['name'] = df['name'].astype(str)
df['link_id'] = df['link_id'].astype(str)
df['body'] = df['body'].astype(str)
df['author'] = df['author'].fillna('[deleted]').astype(str)
comments = [
{
'commentId' : row['name'],
'postId' : row['link_id'],
'body' : row['body'],
'score' : row['score'],
'author' : row['author'],
'created_utc' : row['created_utc']
}
for index, row in df.iterrows()
]
for comment in comments:
all_comments.append(comment)
print('finished adding file:', file, 'at', str(datetime.datetime.now()))
print('moving to dataframe at', str(datetime.datetime.now()))
df_comments = pd.DataFrame(all_comments).set_index('commentId').sort_values(['postId', 'created_utc'])
df_comments.to_csv(directory + r'slim_sorted_comments.csv', header=True)
print("Completed at", str(datetime.datetime.now())) | apache-2.0 |
henrykironde/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
Myasuka/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/pylab_examples/fill_between_demo.py | 6 | 2116 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0.0, 2, 0.01)
y1 = np.sin(2*np.pi*x)
y2 = 1.2*np.sin(4*np.pi*x)
fig, (ax1, ax2, ax3) = plt.subplots(3,1, sharex=True)
ax1.fill_between(x, 0, y1)
ax1.set_ylabel('between y1 and 0')
ax2.fill_between(x, y1, 1)
ax2.set_ylabel('between y1 and 1')
ax3.fill_between(x, y1, y2)
ax3.set_ylabel('between y1 and y2')
ax3.set_xlabel('x')
# now fill between y1 and y2 where a logical condition is met. Note
# this is different than calling
# fill_between(x[where], y1[where],y2[where]
# because of edge effects over multiple contiguous regions.
fig, (ax, ax1) = plt.subplots(2, 1, sharex=True)
ax.plot(x, y1, x, y2, color='black')
ax.fill_between(x, y1, y2, where=y2>=y1, facecolor='green', interpolate=True)
ax.fill_between(x, y1, y2, where=y2<=y1, facecolor='red', interpolate=True)
ax.set_title('fill between where')
# Test support for masked arrays.
y2 = np.ma.masked_greater(y2, 1.0)
ax1.plot(x, y1, x, y2, color='black')
ax1.fill_between(x, y1, y2, where=y2>=y1, facecolor='green', interpolate=True)
ax1.fill_between(x, y1, y2, where=y2<=y1, facecolor='red', interpolate=True)
ax1.set_title('Now regions with y2>1 are masked')
# This example illustrates a problem; because of the data
# gridding, there are undesired unfilled triangles at the crossover
# points. A brute-force solution would be to interpolate all
# arrays to a very fine grid before plotting.
# show how to use transforms to create axes spans where a certain condition is satisfied
fig, ax = plt.subplots()
y = np.sin(4*np.pi*x)
ax.plot(x, y, color='black')
# use the data coordinates for the x-axis and the axes coordinates for the y-axis
import matplotlib.transforms as mtransforms
trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)
theta = 0.9
ax.axhline(theta, color='green', lw=2, alpha=0.5)
ax.axhline(-theta, color='red', lw=2, alpha=0.5)
ax.fill_between(x, 0, 1, where=y>theta, facecolor='green', alpha=0.5, transform=trans)
ax.fill_between(x, 0, 1, where=y<-theta, facecolor='red', alpha=0.5, transform=trans)
plt.show()
| mit |
equialgo/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
jjx02230808/project0223 | examples/manifold/plot_mds.py | 45 | 2731 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
muneebalam/scrapenhl2 | scrapenhl2/scrape/schedules.py | 1 | 15634 | """
This module contains methods related to season schedules.
"""
import arrow
import datetime
import functools
import json
import os.path
import urllib.request
import feather
import pandas as pd
import scrapenhl2.scrape.general_helpers as helpers
import scrapenhl2.scrape.organization as organization
import scrapenhl2.scrape.team_info as team_info
def _get_current_season():
"""
Runs at import only. Sets current season as today's year minus 1, or today's year if it's September or later
:return: int, current season
"""
date = arrow.now()
season = date.year - 1
if date.month >= 9:
season += 1
return season
def get_current_season():
"""
Returns the current season.
:return: The current season variable (generated at import from _get_current_season)
"""
return _CURRENT_SEASON
def get_season_schedule_filename(season):
"""
Gets the filename for the season's schedule file
:param season: int, the season
:return: str, /scrape/data/other/[season]_schedule.feather
"""
return os.path.join(organization.get_other_data_folder(), '{0:d}_schedule.feather'.format(season))
def get_season_schedule(season):
"""
Gets the the season's schedule file from memory.
:param season: int, the season
:return: dataframe (originally from /scrape/data/other/[season]_schedule.feather)
"""
return _SCHEDULES[season]
def get_team_schedule(season=None, team=None, startdate=None, enddate=None):
"""
Gets the schedule for given team in given season. Or if startdate and enddate are specified, searches between
those dates. If season and startdate (and/or enddate) are specified, searches that season between those dates.
:param season: int, the season
:param team: int or str, the team
:param startdate: str, YYYY-MM-DD
:param enddate: str, YYYY-MM-DD
:return: dataframe
"""
# TODO handle case when only team and startdate, or only team and enddate, are given
if season is not None:
df = get_season_schedule(season).query('Status != "Scheduled"')
if startdate is not None:
df = df.query('Date >= "{0:s}"'.format(startdate))
if enddate is not None:
df = df.query('Date <= "{0:s}"'.format(enddate))
tid = team_info.team_as_id(team)
return df[(df.Home == tid) | (df.Road == tid)]
if startdate is not None and enddate is not None:
dflst = []
startseason = helpers.infer_season_from_date(startdate)
endseason = helpers.infer_season_from_date(enddate)
for season in range(startseason, endseason + 1):
df = get_team_schedule(season, team) \
.query('Status != "Scheduled"') \
.assign(Season=season)
if season == startseason:
df = df.query('Date >= "{0:s}"'.format(startdate))
if season == endseason:
df = df.query('Date <= "{0:s}"'.format(enddate))
dflst.append(df)
df = pd.concat(dflst)
return df
def get_team_games(season=None, team=None, startdate=None, enddate=None):
"""
Returns list of games played by team in season.
Just calls get_team_schedule with the provided arguments, returning the series of games from that dataframe.
:param season: int, the season
:param team: int or str, the team
:param startdate: str or None
:param enddate: str or None
:return: series of games
"""
return get_team_schedule(season, team, startdate, enddate).Game
def _get_season_schedule(season):
"""
Gets the the season's schedule file. Stored as a feather file for fast read/write
:param season: int, the season
:return: dataframe from /scrape/data/other/[season]_schedule.feather
"""
return feather.read_dataframe(get_season_schedule_filename(season))
def write_season_schedule(df, season, force_overwrite):
"""
A helper method that writes the season schedule file to disk (in feather format for fast read/write)
:param df: the season schedule datafraome
:param season: the season
:param force_overwrite: bool. If True, overwrites entire file. If False, only redoes when not Final previously.
:return: Nothing
"""
if force_overwrite: # Easy--just write it
feather.write_dataframe(df, get_season_schedule_filename(season))
else: # Only write new games/previously unfinished games
olddf = get_season_schedule(season)
olddf = olddf.query('Status != "Final"')
# TODO: Maybe in the future set status for games partially scraped as "partial" or something
game_diff = set(df.Game).difference(olddf.Game)
where_diff = df.Key.isin(game_diff)
newdf = pd.concat(olddf, df[where_diff], ignore_index=True)
feather.write_dataframe(newdf, get_season_schedule_filename(season))
schedule_setup()
def clear_caches():
"""
Clears caches for methods in this module.
:return:
"""
get_game_data_from_schedule.cache_clear()
@functools.lru_cache(maxsize=1024, typed=False)
def get_game_data_from_schedule(season, game):
"""
This is a helper method that uses the schedule file to isolate information for current game
(e.g. teams involved, coaches, venue, score, etc.)
:param season: int, the season
:param game: int, the game
:return: dict of game data
"""
schedule_item = get_season_schedule(season).query('Game == {0:d}'.format(game)).to_dict(orient='series')
# The output format of above was {colname: np.array[vals]}. Change to {colname: val}
schedule_item = {k: v.values[0] for k, v in schedule_item.items()}
return schedule_item
def get_game_date(season, game):
"""
Returns the date of this game
:param season: int, the game
:param game: int, the season
:return: str
"""
return get_game_data_from_schedule(season, game)['Date']
def get_home_team(season, game, returntype='id'):
"""
Returns the home team from this game
:param season: int, the game
:param game: int, the season
:param returntype: str, 'id' or 'name'
:return: float or str, depending on returntype
"""
home = get_game_data_from_schedule(season, game)['Home']
if returntype.lower() == 'id':
return team_info.team_as_id(home)
else:
return team_info.team_as_str(home)
def get_road_team(season, game, returntype='id'):
"""
Returns the road team from this game
:param season: int, the game
:param game: int, the season
:param returntype: str, 'id' or 'name'
:return: float or str, depending on returntype
"""
road = get_game_data_from_schedule(season, game)['Road']
if returntype.lower() == 'id':
return team_info.team_as_id(road)
else:
return team_info.team_as_str(road)
def get_home_score(season, game):
"""
Returns the home score from this game
:param season: int, the season
:param game: int, the game
:return: int, the score
"""
return int(get_game_data_from_schedule(season, game)['HomeScore'])
def get_road_score(season, game):
"""
Returns the road score from this game
:param season: int, the season
:param game: int, the game
:return: int, the score
"""
return int(get_game_data_from_schedule(season, game)['RoadScore'])
def get_game_status(season, game):
"""
Returns the status of this game (e.g. Final, In Progress)
:param season: int, the season
:param game: int, the game
:return: int, the score
"""
return get_game_data_from_schedule(season, game)['Status']
def get_game_result(season, game):
"""
Returns the result of this game for home team (e.g. W, SOL)
:param season: int, the season
:param game: int, the game
:return: int, the score
"""
return get_game_data_from_schedule(season, game)['Result']
def get_season_schedule_url(season):
"""
Gets the url for a page containing all of this season's games (Sep 1 to Jun 26) from NHL API.
:param season: int, the season
:return: str, https://statsapi.web.nhl.com/api/v1/schedule?startDate=[season]-09-01&endDate=[season+1]-06-25
"""
return 'https://statsapi.web.nhl.com/api/v1/schedule?startDate=' \
'{0:d}-09-01&endDate={1:d}-06-25'.format(season, season + 1)
def get_teams_in_season(season):
"""
Returns all teams that have a game in the schedule for this season
:param season: int, the season
:return: set of team IDs
"""
sch = get_season_schedule(season)
allteams = set(sch.Road).union(sch.Home)
return set(allteams)
def check_valid_game(season, game):
"""
Checks if gameid in season schedule.
:param season: int, season
:param game: int, game
:return: bool
"""
try:
get_game_status(season, game)
return True
except IndexError:
return False
def schedule_setup():
"""
Reads current season and schedules into memory.
:return: nothing
"""
clear_caches()
global _SCHEDULES, _CURRENT_SEASON
_CURRENT_SEASON = _get_current_season()
for season in range(2005, get_current_season() + 1):
if not os.path.exists(get_season_schedule_filename(season)):
generate_season_schedule_file(season) # season schedule
# There is a potential issue here for current season.
# For current season, we'll update this as we go along.
# But original creation first time you start up in a new season is automatic, here.
# When we autoupdate season date, we need to make sure to re-access this file and add in new entries
_SCHEDULES = {season: _get_season_schedule(season) for season in range(2005, _CURRENT_SEASON + 1)}
def generate_season_schedule_file(season, force_overwrite=True):
"""
Reads season schedule from NHL API and writes to file.
The output contains the following columns:
- Season: int, the season
- Date: str, the dates
- Game: int, the game id
- Type: str, the game type (for preseason vs regular season, etc)
- Status: str, e.g. Final
- Road: int, the road team ID
- RoadScore: int, number of road team goals
- RoadCoach str, 'N/A' when this function is run (edited later with road coach name)
- Home: int, the home team ID
- HomeScore: int, number of home team goals
- HomeCoach: str, 'N/A' when this function is run (edited later with home coach name)
- Venue: str, the name of the arena
- Result: str, 'N/A' when this function is run (edited accordingly later from PoV of home team: W, OTW, SOL, etc)
- PBPStatus: str, 'Not scraped' when this function is run (edited accordingly later)
- TOIStatus: str, 'Not scraped' when this function is run (edited accordingly later)
:param season: int, the season
:param force_overwrite: bool. If True, generates entire file from scratch.
If False, only redoes when not Final previously.
:return: Nothing
"""
page = helpers.try_url_n_times(get_season_schedule_url(season))
page2 = json.loads(page)
df = _create_schedule_dataframe_from_json(page2)
df.loc[:, 'Season'] = season
# Last step: we fill in some info from the pbp. If current schedule already exists, fill in that info.
df = _fill_in_schedule_from_pbp(df, season)
write_season_schedule(df, season, force_overwrite)
clear_caches()
def _create_schedule_dataframe_from_json(jsondict):
"""
Reads game, game type, status, visitor ID, home ID, visitor score, and home score for each game in this dict
:param jsondict: a dictionary formed from season schedule json
:return: pandas dataframe
"""
dates = []
games = []
gametypes = []
statuses = []
vids = []
vscores = []
hids = []
hscores = []
venues = []
for datejson in jsondict['dates']:
try:
date = datejson.get('date', None)
for gamejson in datejson['games']:
game = int(str(helpers.try_to_access_dict(gamejson, 'gamePk'))[-5:])
gametype = helpers.try_to_access_dict(gamejson, 'gameType')
status = helpers.try_to_access_dict(gamejson, 'status', 'detailedState')
vid = helpers.try_to_access_dict(gamejson, 'teams', 'away', 'team', 'id')
vscore = int(helpers.try_to_access_dict(gamejson, 'teams', 'away', 'score'))
hid = helpers.try_to_access_dict(gamejson, 'teams', 'home', 'team', 'id')
hscore = int(helpers.try_to_access_dict(gamejson, 'teams', 'home', 'score'))
venue = helpers.try_to_access_dict(gamejson, 'venue', 'name')
dates.append(date)
games.append(game)
gametypes.append(gametype)
statuses.append(status)
vids.append(vid)
vscores.append(vscore)
hids.append(hid)
hscores.append(hscore)
venues.append(venue)
except KeyError:
pass
df = pd.DataFrame({'Date': dates,
'Game': games,
'Type': gametypes,
'Status': statuses,
'Road': vids,
'RoadScore': vscores,
'Home': hids,
'HomeScore': hscores,
'Venue': venues}).sort_values('Game')
return df
def _fill_in_schedule_from_pbp(df, season):
"""
Fills in columns for coaches, result, pbp status, and toi status as N/A, not scraped, etc.
Use methods prefixed with update_schedule to actually fill in with correct values.
:param df: dataframe, season schedule dataframe as created by _create_schedule_dataframe_from_json
:param season: int, the season
:return: df, with coaches, result, and status filled in
"""
if os.path.exists(get_season_schedule_filename(season)):
# only final games--this way pbp status and toistatus will be ok.
cur_season = get_season_schedule(season).query('Status == "Final"')
cur_season = cur_season[['Season', 'Game', 'HomeCoach', 'RoadCoach', 'Result', 'PBPStatus', 'TOIStatus']]
df = df.merge(cur_season, how='left', on=['Season', 'Game'])
# Fill in NAs
df.loc[:, 'Season'] = df.Season.fillna(season)
df.loc[:, 'HomeCoach'] = df.HomeCoach.fillna('N/A')
df.loc[:, 'RoadCoach'] = df.RoadCoach.fillna('N/A')
df.loc[:, 'Result'] = df.Result.fillna('N/A')
df.loc[:, 'PBPStatus'] = df.PBPStatus.fillna('Not scraped')
df.loc[:, 'TOIStatus'] = df.TOIStatus.fillna('Not scraped')
else:
df.loc[:, 'HomeCoach'] = 'N/A' # Tried to set this to None earlier, but Arrow couldn't handle it, so 'N/A'
df.loc[:, 'RoadCoach'] = 'N/A'
df.loc[:, 'Result'] = 'N/A'
df.loc[:, 'PBPStatus'] = 'Not scraped'
df.loc[:, 'TOIStatus'] = 'Not scraped'
return df
def attach_game_dates_to_dateframe(df):
"""
Takes dataframe with Season and Game columns and adds a Date column (for that game)
:param df: dataframe
:return: dataframe with one more column
"""
dflst = []
for season in df.Season.unique():
temp = df.query("Season == {0:d}".format(int(season))) \
.merge(get_season_schedule(season)[['Game', 'Date']], how='left', on='Game')
dflst.append(temp)
df2 = pd.concat(dflst)
return df2
_CURRENT_SEASON = None
_SCHEDULES = None
schedule_setup()
| mit |
Odingod/mne-python | mne/io/fiff/tests/test_raw.py | 1 | 38869 | from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import glob
from copy import deepcopy
import warnings
import itertools as itt
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from nose.tools import assert_true, assert_raises, assert_not_equal
from mne.datasets import testing
from mne.io.constants import FIFF
from mne.io import Raw, concatenate_raws, read_raw_fif
from mne.io.tests.test_raw import _test_concat
from mne import (concatenate_events, find_events, equalize_channels,
compute_proj_raw, pick_types, pick_channels)
from mne.utils import (_TempDir, requires_pandas, slow_test,
requires_mne, run_subprocess, run_tests_if_main)
from mne.externals.six.moves import zip, cPickle as pickle
from mne.io.proc_history import _get_sss_rank
from mne.io.pick import _picks_by_type
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
test_fif_fname = op.join(base_dir, 'test_raw.fif')
test_fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
fif_bad_marked_fname = op.join(base_dir, 'test_withbads_raw.fif')
bad_file_works = op.join(base_dir, 'test_bads.txt')
bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt')
hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
@slow_test
def test_concat():
"""Test RawFIF concatenation"""
_test_concat(read_raw_fif, test_fif_fname)
@testing.requires_testing_data
def test_hash_raw():
"""Test hashing raw objects
"""
raw = read_raw_fif(fif_fname)
assert_raises(RuntimeError, raw.__hash__)
raw = Raw(fif_fname).crop(0, 0.5, False)
raw.preload_data()
raw_2 = Raw(fif_fname).crop(0, 0.5, False)
raw_2.preload_data()
assert_equal(hash(raw), hash(raw_2))
# do NOT use assert_equal here, failing output is terrible
assert_equal(pickle.dumps(raw), pickle.dumps(raw_2))
raw_2._data[0, 0] -= 1
assert_not_equal(hash(raw), hash(raw_2))
@testing.requires_testing_data
def test_subject_info():
"""Test reading subject information
"""
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 1, False)
assert_true(raw.info['subject_info'] is None)
# fake some subject data
keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex',
'hand']
vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1]
subject_info = dict()
for key, val in zip(keys, vals):
subject_info[key] = val
raw.info['subject_info'] = subject_info
out_fname = op.join(tempdir, 'test_subj_info_raw.fif')
raw.save(out_fname, overwrite=True)
raw_read = Raw(out_fname)
for key in keys:
assert_equal(subject_info[key], raw_read.info['subject_info'][key])
raw_read.anonymize()
assert_true(raw_read.info.get('subject_info') is None)
out_fname_anon = op.join(tempdir, 'test_subj_info_anon_raw.fif')
raw_read.save(out_fname_anon, overwrite=True)
raw_read = Raw(out_fname_anon)
assert_true(raw_read.info.get('subject_info') is None)
@testing.requires_testing_data
def test_copy_append():
"""Test raw copying and appending combinations
"""
raw = Raw(fif_fname, preload=True).copy()
raw_full = Raw(fif_fname)
raw_full.append(raw)
data = raw_full[:, :][0]
assert_equal(data.shape[1], 2 * raw._data.shape[1])
@slow_test
@testing.requires_testing_data
def test_rank_estimation():
"""Test raw rank estimation
"""
iter_tests = itt.product(
[fif_fname, hp_fif_fname], # sss
['norm', dict(mag=1e11, grad=1e9, eeg=1e5)]
)
for fname, scalings in iter_tests:
raw = Raw(fname)
(_, picks_meg), (_, picks_eeg) = _picks_by_type(raw.info,
meg_combined=True)
n_meg = len(picks_meg)
n_eeg = len(picks_eeg)
raw = Raw(fname, preload=True)
if 'proc_history' not in raw.info:
expected_rank = n_meg + n_eeg
else:
mf = raw.info['proc_history'][0]['max_info']
expected_rank = _get_sss_rank(mf) + n_eeg
assert_array_equal(raw.estimate_rank(scalings=scalings), expected_rank)
assert_array_equal(raw.estimate_rank(picks=picks_eeg,
scalings=scalings),
n_eeg)
raw = Raw(fname, preload=False)
if 'sss' in fname:
tstart, tstop = 0., 30.
raw.add_proj(compute_proj_raw(raw))
raw.apply_proj()
else:
tstart, tstop = 10., 20.
raw.apply_proj()
n_proj = len(raw.info['projs'])
assert_array_equal(raw.estimate_rank(tstart=tstart, tstop=tstop,
scalings=scalings),
expected_rank - (1 if 'sss' in fname else n_proj))
@testing.requires_testing_data
def test_output_formats():
"""Test saving and loading raw data using multiple formats
"""
tempdir = _TempDir()
formats = ['short', 'int', 'single', 'double']
tols = [1e-4, 1e-7, 1e-7, 1e-15]
# let's fake a raw file with different formats
raw = Raw(test_fif_fname).crop(0, 1, copy=False)
temp_file = op.join(tempdir, 'raw.fif')
for ii, (fmt, tol) in enumerate(zip(formats, tols)):
# Let's test the overwriting error throwing while we're at it
if ii > 0:
assert_raises(IOError, raw.save, temp_file, fmt=fmt)
raw.save(temp_file, fmt=fmt, overwrite=True)
raw2 = Raw(temp_file)
raw2_data = raw2[:, :][0]
assert_allclose(raw2_data, raw[:, :][0], rtol=tol, atol=1e-25)
assert_equal(raw2.orig_format, fmt)
def _compare_combo(raw, new, times, n_times):
for ti in times: # let's do a subset of points for speed
orig = raw[:, ti % n_times][0]
# these are almost_equals because of possible dtype differences
assert_allclose(orig, new[:, ti][0])
@slow_test
@testing.requires_testing_data
def test_multiple_files():
"""Test loading multiple files simultaneously
"""
# split file
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 10, False)
raw.preload_data()
raw.preload_data() # test no operation
split_size = 3. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp)
tmins = np.round(np.arange(0., nsamp, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp]))
tmaxs /= sfreq
tmins /= sfreq
assert_equal(raw.n_times, len(raw.times))
# going in reverse order so the last fname is the first file (need later)
raws = [None] * len(tmins)
for ri in range(len(tmins) - 1, -1, -1):
fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri)
raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri])
raws[ri] = Raw(fname)
events = [find_events(r, stim_channel='STI 014') for r in raws]
last_samps = [r.last_samp for r in raws]
first_samps = [r.first_samp for r in raws]
# test concatenation of split file
assert_raises(ValueError, concatenate_raws, raws, True, events[1:])
all_raw_1, events1 = concatenate_raws(raws, preload=False,
events_list=events)
assert_equal(raw.first_samp, all_raw_1.first_samp)
assert_equal(raw.last_samp, all_raw_1.last_samp)
assert_allclose(raw[:, :][0], all_raw_1[:, :][0])
raws[0] = Raw(fname)
all_raw_2 = concatenate_raws(raws, preload=True)
assert_allclose(raw[:, :][0], all_raw_2[:, :][0])
# test proper event treatment for split files
events2 = concatenate_events(events, first_samps, last_samps)
events3 = find_events(all_raw_2, stim_channel='STI 014')
assert_array_equal(events1, events2)
assert_array_equal(events1, events3)
# test various methods of combining files
raw = Raw(fif_fname, preload=True)
n_times = raw.n_times
# make sure that all our data match
times = list(range(0, 2 * n_times, 999))
# add potentially problematic points
times.extend([n_times - 1, n_times, 2 * n_times - 1])
raw_combo0 = Raw([fif_fname, fif_fname], preload=True)
_compare_combo(raw, raw_combo0, times, n_times)
raw_combo = Raw([fif_fname, fif_fname], preload=False)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = Raw([fif_fname, fif_fname], preload='memmap8.dat')
_compare_combo(raw, raw_combo, times, n_times)
assert_raises(ValueError, Raw, [fif_fname, ctf_fname])
assert_raises(ValueError, Raw, [fif_fname, fif_bad_marked_fname])
assert_equal(raw[:, :][0].shape[1] * 2, raw_combo0[:, :][0].shape[1])
assert_equal(raw_combo0[:, :][0].shape[1], raw_combo0.n_times)
# with all data preloaded, result should be preloaded
raw_combo = Raw(fif_fname, preload=True)
raw_combo.append(Raw(fif_fname, preload=True))
assert_true(raw_combo.preload is True)
assert_equal(raw_combo.n_times, raw_combo._data.shape[1])
_compare_combo(raw, raw_combo, times, n_times)
# with any data not preloaded, don't set result as preloaded
raw_combo = concatenate_raws([Raw(fif_fname, preload=True),
Raw(fif_fname, preload=False)])
assert_true(raw_combo.preload is False)
assert_array_equal(find_events(raw_combo, stim_channel='STI 014'),
find_events(raw_combo0, stim_channel='STI 014'))
_compare_combo(raw, raw_combo, times, n_times)
# user should be able to force data to be preloaded upon concat
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=True)],
preload=True)
assert_true(raw_combo.preload is True)
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=True)],
preload='memmap3.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=True),
Raw(fif_fname, preload=True)],
preload='memmap4.dat')
_compare_combo(raw, raw_combo, times, n_times)
raw_combo = concatenate_raws([Raw(fif_fname, preload=False),
Raw(fif_fname, preload=False)],
preload='memmap5.dat')
_compare_combo(raw, raw_combo, times, n_times)
# verify that combining raws with different projectors throws an exception
raw.add_proj([], remove_existing=True)
assert_raises(ValueError, raw.append, Raw(fif_fname, preload=True))
# now test event treatment for concatenated raw files
events = [find_events(raw, stim_channel='STI 014'),
find_events(raw, stim_channel='STI 014')]
last_samps = [raw.last_samp, raw.last_samp]
first_samps = [raw.first_samp, raw.first_samp]
events = concatenate_events(events, first_samps, last_samps)
events2 = find_events(raw_combo0, stim_channel='STI 014')
assert_array_equal(events, events2)
# check out the len method
assert_equal(len(raw), raw.n_times)
assert_equal(len(raw), raw.last_samp - raw.first_samp + 1)
@testing.requires_testing_data
def test_split_files():
"""Test writing and reading of split raw files
"""
tempdir = _TempDir()
raw_1 = Raw(fif_fname, preload=True)
split_fname = op.join(tempdir, 'split_raw.fif')
raw_1.save(split_fname, buffer_size_sec=1.0, split_size='10MB')
raw_2 = Raw(split_fname)
data_1, times_1 = raw_1[:, :]
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
# test the case where the silly user specifies the split files
fnames = [split_fname]
fnames.extend(sorted(glob.glob(op.join(tempdir, 'split_raw-*.fif'))))
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
raw_2 = Raw(fnames)
data_2, times_2 = raw_2[:, :]
assert_array_equal(data_1, data_2)
assert_array_equal(times_1, times_2)
def test_load_bad_channels():
"""Test reading/writing of bad channels
"""
tempdir = _TempDir()
# Load correctly marked file (manually done in mne_process_raw)
raw_marked = Raw(fif_bad_marked_fname)
correct_bads = raw_marked.info['bads']
raw = Raw(test_fif_fname)
# Make sure it starts clean
assert_array_equal(raw.info['bads'], [])
# Test normal case
raw.load_bad_channels(bad_file_works)
# Write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'))
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal(correct_bads, raw_new.info['bads'])
# Reset it
raw.info['bads'] = []
# Test bad case
assert_raises(ValueError, raw.load_bad_channels, bad_file_wrong)
# Test forcing the bad case
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw.load_bad_channels(bad_file_wrong, force=True)
n_found = sum(['1 bad channel' in str(ww.message) for ww in w])
assert_equal(n_found, 1) # there could be other irrelevant errors
# write it out, read it in, and check
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal(correct_bads, raw_new.info['bads'])
# Check that bad channels are cleared
raw.load_bad_channels(None)
raw.save(op.join(tempdir, 'foo_raw.fif'), overwrite=True)
raw_new = Raw(op.join(tempdir, 'foo_raw.fif'))
assert_equal([], raw_new.info['bads'])
@slow_test
@testing.requires_testing_data
def test_io_raw():
"""Test IO for raw data (Neuromag + CTF + gz)
"""
tempdir = _TempDir()
# test unicode io
for chars in [b'\xc3\xa4\xc3\xb6\xc3\xa9', b'a']:
with Raw(fif_fname) as r:
assert_true('Raw' in repr(r))
desc1 = r.info['description'] = chars.decode('utf-8')
temp_file = op.join(tempdir, 'raw.fif')
r.save(temp_file, overwrite=True)
with Raw(temp_file) as r2:
desc2 = r2.info['description']
assert_equal(desc1, desc2)
# Let's construct a simple test for IO first
raw = Raw(fif_fname).crop(0, 3.5, False)
raw.preload_data()
# put in some data that we know the values of
data = np.random.randn(raw._data.shape[0], raw._data.shape[1])
raw._data[:, :] = data
# save it somewhere
fname = op.join(tempdir, 'test_copy_raw.fif')
raw.save(fname, buffer_size_sec=1.0)
# read it in, make sure the whole thing matches
raw = Raw(fname)
assert_allclose(data, raw[:, :][0], rtol=1e-6, atol=1e-20)
# let's read portions across the 1-sec tag boundary, too
inds = raw.time_as_index([1.75, 2.25])
sl = slice(inds[0], inds[1])
assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20)
# now let's do some real I/O
fnames_in = [fif_fname, test_fif_gz_fname, ctf_fname]
fnames_out = ['raw.fif', 'raw.fif.gz', 'raw.fif']
for fname_in, fname_out in zip(fnames_in, fnames_out):
fname_out = op.join(tempdir, fname_out)
raw = Raw(fname_in)
nchan = raw.info['nchan']
ch_names = raw.info['ch_names']
meg_channels_idx = [k for k in range(nchan)
if ch_names[k][0] == 'M']
n_channels = 100
meg_channels_idx = meg_channels_idx[:n_channels]
start, stop = raw.time_as_index([0, 5])
data, times = raw[meg_channels_idx, start:(stop + 1)]
meg_ch_names = [ch_names[k] for k in meg_channels_idx]
# Set up pick list: MEG + STI 014 - bad channels
include = ['STI 014']
include += meg_ch_names
picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
misc=True, ref_meg=True, include=include,
exclude='bads')
# Writing with drop_small_buffer True
raw.save(fname_out, picks, tmin=0, tmax=4, buffer_size_sec=3,
drop_small_buffer=True, overwrite=True)
raw2 = Raw(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_true(times2.max() <= 3)
# Writing
raw.save(fname_out, picks, tmin=0, tmax=5, overwrite=True)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_equal(len(raw.info['dig']), 146)
raw2 = Raw(fname_out)
sel = pick_channels(raw2.ch_names, meg_ch_names)
data2, times2 = raw2[sel, :]
assert_allclose(data, data2, rtol=1e-6, atol=1e-20)
assert_allclose(times, times2)
assert_allclose(raw.info['sfreq'], raw2.info['sfreq'], rtol=1e-5)
# check transformations
for trans in ['dev_head_t', 'dev_ctf_t', 'ctf_head_t']:
if raw.info[trans] is None:
assert_true(raw2.info[trans] is None)
else:
assert_array_equal(raw.info[trans]['trans'],
raw2.info[trans]['trans'])
# check transformation 'from' and 'to'
if trans.startswith('dev'):
from_id = FIFF.FIFFV_COORD_DEVICE
else:
from_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
if trans[4:8] == 'head':
to_id = FIFF.FIFFV_COORD_HEAD
else:
to_id = FIFF.FIFFV_MNE_COORD_CTF_HEAD
for raw_ in [raw, raw2]:
assert_equal(raw_.info[trans]['from'], from_id)
assert_equal(raw_.info[trans]['to'], to_id)
if fname_in == fif_fname or fname_in == fif_fname + '.gz':
assert_allclose(raw.info['dig'][0]['r'], raw2.info['dig'][0]['r'])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
raw_badname = op.join(tempdir, 'test-bad-name.fif.gz')
raw.save(raw_badname)
Raw(raw_badname)
assert_true(len(w) > 0) # len(w) should be 2 but Travis sometimes has more
@testing.requires_testing_data
def test_io_complex():
"""Test IO with complex data types
"""
tempdir = _TempDir()
dtypes = [np.complex64, np.complex128]
raw = Raw(fif_fname, preload=True)
picks = np.arange(5)
start, stop = raw.time_as_index([0, 5])
data_orig, _ = raw[picks, start:stop]
for di, dtype in enumerate(dtypes):
imag_rand = np.array(1j * np.random.randn(data_orig.shape[0],
data_orig.shape[1]), dtype)
raw_cp = raw.copy()
raw_cp._data = np.array(raw_cp._data, dtype)
raw_cp._data[picks, start:stop] += imag_rand
# this should throw an error because it's complex
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw_cp.save(op.join(tempdir, 'raw.fif'), picks, tmin=0, tmax=5,
overwrite=True)
# warning gets thrown on every instance b/c simplifilter('always')
assert_equal(len(w), 1)
raw2 = Raw(op.join(tempdir, 'raw.fif'))
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
# with preloading
raw2 = Raw(op.join(tempdir, 'raw.fif'), preload=True)
raw2_data, _ = raw2[picks, :]
n_samp = raw2_data.shape[1]
assert_allclose(raw2_data[:, :n_samp], raw_cp._data[picks, :n_samp])
@testing.requires_testing_data
def test_getitem():
"""Test getitem/indexing of Raw
"""
for preload in [False, True, 'memmap.dat']:
raw = Raw(fif_fname, preload=preload)
data, times = raw[0, :]
data1, times1 = raw[0]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
data1, times1 = raw[[0, 1]]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
@testing.requires_testing_data
def test_proj():
"""Test SSP proj operations
"""
tempdir = _TempDir()
for proj in [True, False]:
raw = Raw(fif_fname, preload=False, proj=proj)
assert_true(all(p['active'] == proj for p in raw.info['projs']))
data, times = raw[0:2, :]
data1, times1 = raw[0:2]
assert_array_equal(data, data1)
assert_array_equal(times, times1)
# test adding / deleting proj
if proj:
assert_raises(ValueError, raw.add_proj, [],
{'remove_existing': True})
assert_raises(ValueError, raw.del_proj, 0)
else:
projs = deepcopy(raw.info['projs'])
n_proj = len(raw.info['projs'])
raw.del_proj(0)
assert_equal(len(raw.info['projs']), n_proj - 1)
raw.add_proj(projs, remove_existing=False)
assert_equal(len(raw.info['projs']), 2 * n_proj - 1)
raw.add_proj(projs, remove_existing=True)
assert_equal(len(raw.info['projs']), n_proj)
# test apply_proj() with and without preload
for preload in [True, False]:
raw = Raw(fif_fname, preload=preload, proj=False)
data, times = raw[:, 0:2]
raw.apply_proj()
data_proj_1 = np.dot(raw._projector, data)
# load the file again without proj
raw = Raw(fif_fname, preload=preload, proj=False)
# write the file with proj. activated, make sure proj has been applied
raw.save(op.join(tempdir, 'raw.fif'), proj=True, overwrite=True)
raw2 = Raw(op.join(tempdir, 'raw.fif'), proj=False)
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_true(all(p['active'] for p in raw2.info['projs']))
# read orig file with proj. active
raw2 = Raw(fif_fname, preload=preload, proj=True)
data_proj_2, _ = raw2[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_true(all(p['active'] for p in raw2.info['projs']))
# test that apply_proj works
raw.apply_proj()
data_proj_2, _ = raw[:, 0:2]
assert_allclose(data_proj_1, data_proj_2)
assert_allclose(data_proj_2, np.dot(raw._projector, data_proj_2))
tempdir = _TempDir()
out_fname = op.join(tempdir, 'test_raw.fif')
raw = read_raw_fif(test_fif_fname, preload=True).crop(0, 0.002, copy=False)
raw.pick_types(meg=False, eeg=True)
raw.info['projs'] = [raw.info['projs'][-1]]
raw._data.fill(0)
raw._data[-1] = 1.
raw.save(out_fname)
raw = read_raw_fif(out_fname, proj=True, preload=False)
assert_allclose(raw[:, :][0][:1], raw[0, :][0])
@testing.requires_testing_data
def test_preload_modify():
"""Test preloading and modifying data
"""
tempdir = _TempDir()
for preload in [False, True, 'memmap.dat']:
raw = Raw(fif_fname, preload=preload)
nsamp = raw.last_samp - raw.first_samp + 1
picks = pick_types(raw.info, meg='grad', exclude='bads')
data = np.random.randn(len(picks), nsamp // 2)
try:
raw[picks, :nsamp // 2] = data
except RuntimeError as err:
if not preload:
continue
else:
raise err
tmp_fname = op.join(tempdir, 'raw.fif')
raw.save(tmp_fname, overwrite=True)
raw_new = Raw(tmp_fname)
data_new, _ = raw_new[picks, :nsamp / 2]
assert_allclose(data, data_new)
@slow_test
@testing.requires_testing_data
def test_filter():
"""Test filtering (FIR and IIR) and Raw.apply_function interface
"""
raw = Raw(fif_fname).crop(0, 7, False)
raw.preload_data()
sig_dec = 11
sig_dec_notch = 12
sig_dec_notch_fit = 12
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
raw_lp = raw.copy()
raw_lp.filter(0., 4.0 - 0.25, picks=picks, n_jobs=2)
raw_hp = raw.copy()
raw_hp.filter(8.0 + 0.25, None, picks=picks, n_jobs=2)
raw_bp = raw.copy()
raw_bp.filter(4.0 + 0.25, 8.0 - 0.25, picks=picks)
raw_bs = raw.copy()
raw_bs.filter(8.0 + 0.25, 4.0 - 0.25, picks=picks, n_jobs=2)
data, _ = raw[picks, :]
lp_data, _ = raw_lp[picks, :]
hp_data, _ = raw_hp[picks, :]
bp_data, _ = raw_bp[picks, :]
bs_data, _ = raw_bs[picks, :]
assert_array_almost_equal(data, lp_data + bp_data + hp_data, sig_dec)
assert_array_almost_equal(data, bp_data + bs_data, sig_dec)
raw_lp_iir = raw.copy()
raw_lp_iir.filter(0., 4.0, picks=picks, n_jobs=2, method='iir')
raw_hp_iir = raw.copy()
raw_hp_iir.filter(8.0, None, picks=picks, n_jobs=2, method='iir')
raw_bp_iir = raw.copy()
raw_bp_iir.filter(4.0, 8.0, picks=picks, method='iir')
lp_data_iir, _ = raw_lp_iir[picks, :]
hp_data_iir, _ = raw_hp_iir[picks, :]
bp_data_iir, _ = raw_bp_iir[picks, :]
summation = lp_data_iir + hp_data_iir + bp_data_iir
assert_array_almost_equal(data[:, 100:-100], summation[:, 100:-100],
sig_dec)
# make sure we didn't touch other channels
data, _ = raw[picks_meg[4:], :]
bp_data, _ = raw_bp[picks_meg[4:], :]
assert_array_equal(data, bp_data)
bp_data_iir, _ = raw_bp_iir[picks_meg[4:], :]
assert_array_equal(data, bp_data_iir)
# do a very simple check on line filtering
raw_bs = raw.copy()
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
raw_bs.filter(60.0 + 0.5, 60.0 - 0.5, picks=picks, n_jobs=2)
data_bs, _ = raw_bs[picks, :]
raw_notch = raw.copy()
raw_notch.notch_filter(60.0, picks=picks, n_jobs=2, method='fft')
data_notch, _ = raw_notch[picks, :]
assert_array_almost_equal(data_bs, data_notch, sig_dec_notch)
# now use the sinusoidal fitting
raw_notch = raw.copy()
raw_notch.notch_filter(None, picks=picks, n_jobs=2, method='spectrum_fit')
data_notch, _ = raw_notch[picks, :]
data, _ = raw[picks, :]
assert_array_almost_equal(data, data_notch, sig_dec_notch_fit)
@testing.requires_testing_data
def test_crop():
"""Test cropping raw files
"""
# split a concatenated file to test a difficult case
raw = Raw([fif_fname, fif_fname], preload=False)
split_size = 10. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp + 1)
# do an annoying case (off-by-one splitting)
tmins = np.r_[1., np.round(np.arange(0., nsamp - 1, split_size * sfreq))]
tmins = np.sort(tmins)
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.crop(tmin, tmax, True)
all_raw_2 = concatenate_raws(raws, preload=False)
assert_equal(raw.first_samp, all_raw_2.first_samp)
assert_equal(raw.last_samp, all_raw_2.last_samp)
assert_array_equal(raw[:, :][0], all_raw_2[:, :][0])
tmins = np.round(np.arange(0., nsamp - 1, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp - 1]))
tmaxs /= sfreq
tmins /= sfreq
# going in revere order so the last fname is the first file (need it later)
raws = [None] * len(tmins)
for ri, (tmin, tmax) in enumerate(zip(tmins, tmaxs)):
raws[ri] = raw.copy()
raws[ri].crop(tmin, tmax, False)
# test concatenation of split file
all_raw_1 = concatenate_raws(raws, preload=False)
all_raw_2 = raw.crop(0, None, True)
for ar in [all_raw_1, all_raw_2]:
assert_equal(raw.first_samp, ar.first_samp)
assert_equal(raw.last_samp, ar.last_samp)
assert_array_equal(raw[:, :][0], ar[:, :][0])
@testing.requires_testing_data
def test_resample():
"""Test resample (with I/O and multiple files)
"""
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 3, False)
raw.preload_data()
raw_resamp = raw.copy()
sfreq = raw.info['sfreq']
# test parallel on upsample
raw_resamp.resample(sfreq * 2, n_jobs=2)
assert_equal(raw_resamp.n_times, len(raw_resamp.times))
raw_resamp.save(op.join(tempdir, 'raw_resamp-raw.fif'))
raw_resamp = Raw(op.join(tempdir, 'raw_resamp-raw.fif'), preload=True)
assert_equal(sfreq, raw_resamp.info['sfreq'] / 2)
assert_equal(raw.n_times, raw_resamp.n_times / 2)
assert_equal(raw_resamp._data.shape[1], raw_resamp.n_times)
assert_equal(raw._data.shape[0], raw_resamp._data.shape[0])
# test non-parallel on downsample
raw_resamp.resample(sfreq, n_jobs=1)
assert_equal(raw_resamp.info['sfreq'], sfreq)
assert_equal(raw._data.shape, raw_resamp._data.shape)
assert_equal(raw.first_samp, raw_resamp.first_samp)
assert_equal(raw.last_samp, raw.last_samp)
# upsampling then downsampling doubles resampling error, but this still
# works (hooray). Note that the stim channels had to be sub-sampled
# without filtering to be accurately preserved
# note we have to treat MEG and EEG+STIM channels differently (tols)
assert_allclose(raw._data[:306, 200:-200],
raw_resamp._data[:306, 200:-200],
rtol=1e-2, atol=1e-12)
assert_allclose(raw._data[306:, 200:-200],
raw_resamp._data[306:, 200:-200],
rtol=1e-2, atol=1e-7)
# now check multiple file support w/resampling, as order of operations
# (concat, resample) should not affect our data
raw1 = raw.copy()
raw2 = raw.copy()
raw3 = raw.copy()
raw4 = raw.copy()
raw1 = concatenate_raws([raw1, raw2])
raw1.resample(10)
raw3.resample(10)
raw4.resample(10)
raw3 = concatenate_raws([raw3, raw4])
assert_array_equal(raw1._data, raw3._data)
assert_array_equal(raw1._first_samps, raw3._first_samps)
assert_array_equal(raw1._last_samps, raw3._last_samps)
assert_array_equal(raw1._raw_lengths, raw3._raw_lengths)
assert_equal(raw1.first_samp, raw3.first_samp)
assert_equal(raw1.last_samp, raw3.last_samp)
assert_equal(raw1.info['sfreq'], raw3.info['sfreq'])
@testing.requires_testing_data
def test_hilbert():
"""Test computation of analytic signal using hilbert
"""
raw = Raw(fif_fname, preload=True)
picks_meg = pick_types(raw.info, meg=True, exclude='bads')
picks = picks_meg[:4]
raw2 = raw.copy()
raw.apply_hilbert(picks)
raw2.apply_hilbert(picks, envelope=True, n_jobs=2)
env = np.abs(raw._data[picks, :])
assert_allclose(env, raw2._data[picks, :], rtol=1e-2, atol=1e-13)
@testing.requires_testing_data
def test_raw_copy():
"""Test Raw copy
"""
raw = Raw(fif_fname, preload=True)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
raw = Raw(fif_fname, preload=False)
data, _ = raw[:, :]
copied = raw.copy()
copied_data, _ = copied[:, :]
assert_array_equal(data, copied_data)
assert_equal(sorted(raw.__dict__.keys()),
sorted(copied.__dict__.keys()))
@requires_pandas
def test_to_data_frame():
"""Test raw Pandas exporter"""
raw = Raw(test_fif_fname, preload=True)
_, times = raw[0, :10]
df = raw.to_data_frame()
assert_true((df.columns == raw.ch_names).all())
assert_array_equal(np.round(times * 1e3), df.index.values[:10])
df = raw.to_data_frame(index=None)
assert_true('time' in df.index.names)
assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
assert_array_equal(df.values[:, 2], raw._data[2] * 1e15)
@testing.requires_testing_data
def test_raw_index_as_time():
""" Test index as time conversion"""
raw = Raw(fif_fname, preload=True)
t0 = raw.index_as_time([0], True)[0]
t1 = raw.index_as_time([100], False)[0]
t2 = raw.index_as_time([100], True)[0]
assert_equal(t2 - t1, t0)
# ensure we can go back and forth
t3 = raw.index_as_time(raw.time_as_index([0], True), True)
assert_array_almost_equal(t3, [0.0], 2)
t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], True), True)
assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], False), False)
assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
i0 = raw.time_as_index(raw.index_as_time([0], True), True)
assert_equal(i0[0], 0)
i1 = raw.time_as_index(raw.index_as_time([100], True), True)
assert_equal(i1[0], 100)
# Have to add small amount of time because we truncate via int casting
i1 = raw.time_as_index(raw.index_as_time([100.0001], False), False)
assert_equal(i1[0], 100)
@testing.requires_testing_data
def test_raw_time_as_index():
""" Test time as index conversion"""
raw = Raw(fif_fname, preload=True)
first_samp = raw.time_as_index([0], True)[0]
assert_equal(raw.first_samp, -first_samp)
@testing.requires_testing_data
def test_save():
""" Test saving raw"""
tempdir = _TempDir()
raw = Raw(fif_fname, preload=False)
# can't write over file being read
assert_raises(ValueError, raw.save, fif_fname)
raw = Raw(fif_fname, preload=True)
# can't overwrite file without overwrite=True
assert_raises(IOError, raw.save, fif_fname)
# test abspath support
new_fname = op.join(op.abspath(op.curdir), 'break-raw.fif')
raw.save(op.join(tempdir, new_fname), overwrite=True)
new_raw = Raw(op.join(tempdir, new_fname), preload=False)
assert_raises(ValueError, new_raw.save, new_fname)
# make sure we can overwrite the file we loaded when preload=True
new_raw = Raw(op.join(tempdir, new_fname), preload=True)
new_raw.save(op.join(tempdir, new_fname), overwrite=True)
os.remove(new_fname)
@testing.requires_testing_data
def test_with_statement():
""" Test with statement """
for preload in [True, False]:
with Raw(fif_fname, preload=preload) as raw_:
print(raw_)
def test_compensation_raw():
"""Test Raw compensation
"""
tempdir = _TempDir()
raw1 = Raw(ctf_comp_fname, compensation=None)
assert_true(raw1.comp is None)
data1, times1 = raw1[:, :]
raw2 = Raw(ctf_comp_fname, compensation=3)
data2, times2 = raw2[:, :]
assert_true(raw2.comp is None) # unchanged (data come with grade 3)
assert_array_equal(times1, times2)
assert_array_equal(data1, data2)
raw3 = Raw(ctf_comp_fname, compensation=1)
data3, times3 = raw3[:, :]
assert_true(raw3.comp is not None)
assert_array_equal(times1, times3)
# make sure it's different with a different compensation:
assert_true(np.mean(np.abs(data1 - data3)) > 1e-12)
assert_raises(ValueError, Raw, ctf_comp_fname, compensation=33)
# Try IO with compensation
temp_file = op.join(tempdir, 'raw.fif')
raw1.save(temp_file, overwrite=True)
raw4 = Raw(temp_file)
data4, times4 = raw4[:, :]
assert_array_equal(times1, times4)
assert_array_equal(data1, data4)
# Now save the file that has modified compensation
# and make sure we can the same data as input ie. compensation
# is undone
raw3.save(temp_file, overwrite=True)
raw5 = Raw(temp_file)
data5, times5 = raw5[:, :]
assert_array_equal(times1, times5)
assert_allclose(data1, data5, rtol=1e-12, atol=1e-22)
@requires_mne
def test_compensation_raw_mne():
"""Test Raw compensation by comparing with MNE
"""
tempdir = _TempDir()
def compensate_mne(fname, grad):
tmp_fname = op.join(tempdir, 'mne_ctf_test_raw.fif')
cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname,
'--grad', str(grad), '--projoff', '--filteroff']
run_subprocess(cmd)
return Raw(tmp_fname, preload=True)
for grad in [0, 2, 3]:
raw_py = Raw(ctf_comp_fname, preload=True, compensation=grad)
raw_c = compensate_mne(ctf_comp_fname, grad)
assert_allclose(raw_py._data, raw_c._data, rtol=1e-6, atol=1e-17)
@testing.requires_testing_data
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
raw = Raw(fif_fname, preload=True)
drop_ch = raw.ch_names[:3]
ch_names = raw.ch_names[3:]
ch_names_orig = raw.ch_names
dummy = raw.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.drop_channels(drop_ch)
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
@testing.requires_testing_data
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
# preload is True
raw = Raw(fif_fname, preload=True)
ch_names = raw.ch_names[:3]
ch_names_orig = raw.ch_names
dummy = raw.pick_channels(ch_names, copy=True) # copy is True
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, raw.ch_names)
assert_equal(len(ch_names_orig), raw._data.shape[0])
raw.pick_channels(ch_names, copy=False) # copy is False
assert_equal(ch_names, raw.ch_names)
assert_equal(len(ch_names), len(raw._cals))
assert_equal(len(ch_names), raw._data.shape[0])
raw = Raw(fif_fname, preload=False)
assert_raises(RuntimeError, raw.pick_channels, ch_names)
assert_raises(RuntimeError, raw.drop_channels, ch_names)
@testing.requires_testing_data
def test_equalize_channels():
"""Test equalization of channels
"""
raw1 = Raw(fif_fname, preload=True)
raw2 = raw1.copy()
ch_names = raw1.ch_names[2:]
raw1.drop_channels(raw1.ch_names[:1])
raw2.drop_channels(raw2.ch_names[1:2])
my_comparison = [raw1, raw2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
run_tests_if_main()
| bsd-3-clause |
HolgerPeters/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
apdavison/elephant | elephant/current_source_density_src/icsd.py | 9 | 35175 | # -*- coding: utf-8 -*-
'''
py-iCSD toolbox!
Translation of the core functionality of the CSDplotter MATLAB package
to python.
The methods were originally developed by Klas H. Pettersen, as described in:
Klas H. Pettersen, Anna Devor, Istvan Ulbert, Anders M. Dale, Gaute T. Einevoll,
Current-source density estimation based on inversion of electrostatic forward
solution: Effects of finite extent of neuronal activity and conductivity
discontinuities, Journal of Neuroscience Methods, Volume 154, Issues 1-2,
30 June 2006, Pages 116-133, ISSN 0165-0270,
http://dx.doi.org/10.1016/j.jneumeth.2005.12.005.
(http://www.sciencedirect.com/science/article/pii/S0165027005004541)
The method themselves are implemented as callable subclasses of the base
CSD class object, which sets some common attributes,
and a basic function for calculating the iCSD, and a generic spatial filter
implementation.
The raw- and filtered CSD estimates are returned as Quantity arrays.
Requires pylab environment to work, i.e numpy+scipy+matplotlib, with the
addition of quantities (http://pythonhosted.org/quantities) and
neo (https://pythonhosted.org/neo)-
Original implementation from CSDplotter-0.1.1
(http://software.incf.org/software/csdplotter) by Klas. H. Pettersen 2005.
Written by:
- [email protected], 2010,
- [email protected], 2015-2016
'''
import numpy as np
import scipy.integrate as si
import scipy.signal as ss
import quantities as pq
class CSD(object):
'''Base iCSD class'''
def __init__(self, lfp, f_type='gaussian', f_order=(3, 1)):
'''Initialize parent class iCSD
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps)
f_type : str
type of spatial filter, must be a scipy.signal filter design method
f_order : list
settings for spatial filter, arg passed to filter design function
'''
self.name = 'CSD estimate parent class'
self.lfp = lfp
self.f_matrix = np.eye(lfp.shape[0]) * pq.m**3 / pq.S
self.f_type = f_type
self.f_order = f_order
def get_csd(self, ):
'''
Perform the CSD estimate from the LFP and forward matrix F, i.e as
CSD=F**-1*LFP
Arguments
---------
Returns
-------
csd : np.ndarray * quantity.Quantity
Array with the csd estimate
'''
csd = np.linalg.solve(self.f_matrix, self.lfp)
return csd * (self.f_matrix.units**-1 * self.lfp.units).simplified
def filter_csd(self, csd, filterfunction='convolve'):
'''
Spatial filtering of the CSD estimate, using an N-point filter
Arguments
---------
csd : np.ndarrray * quantity.Quantity
Array with the csd estimate
filterfunction : str
'filtfilt' or 'convolve'. Apply spatial filter using
scipy.signal.filtfilt or scipy.signal.convolve.
'''
if self.f_type == 'gaussian':
try:
assert(len(self.f_order) == 2)
except AssertionError as ae:
raise ae('filter order f_order must be a tuple of length 2')
else:
try:
assert(self.f_order > 0 and isinstance(self.f_order, int))
except AssertionError as ae:
raise ae('Filter order must be int > 0!')
try:
assert(filterfunction in ['filtfilt', 'convolve'])
except AssertionError as ae:
raise ae("{} not equal to 'filtfilt' or \
'convolve'".format(filterfunction))
if self.f_type == 'boxcar':
num = ss.boxcar(self.f_order)
denom = np.array([num.sum()])
elif self.f_type == 'hamming':
num = ss.hamming(self.f_order)
denom = np.array([num.sum()])
elif self.f_type == 'triangular':
num = ss.triang(self.f_order)
denom = np.array([num.sum()])
elif self.f_type == 'gaussian':
num = ss.gaussian(self.f_order[0], self.f_order[1])
denom = np.array([num.sum()])
elif self.f_type == 'identity':
num = np.array([1.])
denom = np.array([1.])
else:
print('%s Wrong filter type!' % self.f_type)
raise
num_string = '[ '
for i in num:
num_string = num_string + '%.3f ' % i
num_string = num_string + ']'
denom_string = '[ '
for i in denom:
denom_string = denom_string + '%.3f ' % i
denom_string = denom_string + ']'
print(('discrete filter coefficients: \nb = {}, \
\na = {}'.format(num_string, denom_string)))
if filterfunction == 'filtfilt':
return ss.filtfilt(num, denom, csd, axis=0) * csd.units
elif filterfunction == 'convolve':
csdf = csd / csd.units
for i in range(csdf.shape[1]):
csdf[:, i] = ss.convolve(csdf[:, i], num / denom.sum(), 'same')
return csdf * csd.units
class StandardCSD(CSD):
'''
Standard CSD method with and without Vaknin electrodes
'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initialize standard CSD method class with & without Vaknin electrodes.
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m, must be monotonously increasing
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S/m
vaknin_el : bool
flag for using method of Vaknin to endpoint electrodes
Defaults to True
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for the gaussian
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
diff_diff_coord = np.diff(np.diff(coord_electrode)).magnitude
zeros_ddc = np.zeros_like(diff_diff_coord)
try:
assert(np.all(np.isclose(diff_diff_coord, zeros_ddc, atol=1e-12)))
except AssertionError as ae:
print('coord_electrode not monotonously varying')
raise ae
if self.vaknin_el:
# extend lfps array by duplicating potential at endpoint contacts
if lfp.ndim == 1:
self.lfp = np.empty((lfp.shape[0] + 2, )) * lfp.units
else:
self.lfp = np.empty((lfp.shape[0] + 2, lfp.shape[1])) * lfp.units
self.lfp[0, ] = lfp[0, ]
self.lfp[1:-1, ] = lfp
self.lfp[-1, ] = lfp[-1, ]
else:
self.lfp = lfp
self.name = 'Standard CSD method'
self.coord_electrode = coord_electrode
self.f_inv_matrix = self.get_f_inv_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.vaknin_el = kwargs.pop('vaknin_el', True)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_inv_matrix(self):
'''Calculate the inverse F-matrix for the standard CSD method'''
h_val = abs(np.diff(self.coord_electrode)[0])
f_inv = -np.eye(self.lfp.shape[0])
# Inner matrix elements is just the discrete laplacian coefficients
for j in range(1, f_inv.shape[0] - 1):
f_inv[j, j - 1: j + 2] = np.array([1., -2., 1.])
return f_inv * -self.sigma / h_val
def get_csd(self):
'''
Perform the iCSD calculation, i.e: iCSD=F_inv*LFP
Returns
-------
csd : np.ndarray * quantity.Quantity
Array with the csd estimate
'''
csd = np.dot(self.f_inv_matrix, self.lfp)[1:-1, ]
# `np.dot()` does not return correct units, so the units of `csd` must
# be assigned manually
csd_units = (self.f_inv_matrix.units * self.lfp.units).simplified
csd = csd.magnitude * csd_units
return csd
class DeltaiCSD(CSD):
'''
delta-iCSD method
'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initialize the delta-iCSD method class object
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m
diam : float * quantity.Quantity
diamater of the assumed circular planar current sources centered
at each contact
Defaults to 500E-6 meters
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
sigma_top : float * quantity.Quantity
conductivity on top of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for gaussian
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
try: # Should the class not take care of this?!
assert(self.diam.units == coord_electrode.units)
except AssertionError as ae:
print('units of coord_electrode ({}) and diam ({}) differ'
.format(coord_electrode.units, self.diam.units))
raise ae
try:
assert(np.all(np.diff(coord_electrode) > 0))
except AssertionError as ae:
print('values of coord_electrode not continously increasing')
raise ae
try:
assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
if self.diam.size == coord_electrode.size:
assert(np.all(self.diam > 0 * self.diam.units))
else:
assert(self.diam > 0 * self.diam.units)
except AssertionError as ae:
print('diam must be positive scalar or of same shape \
as coord_electrode')
raise ae
if self.diam.size == 1:
self.diam = np.ones(coord_electrode.size) * self.diam
self.name = 'delta-iCSD method'
self.coord_electrode = coord_electrode
# initialize F- and iCSD-matrices
self.f_matrix = np.empty((self.coord_electrode.size,
self.coord_electrode.size))
self.f_matrix = self.get_f_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.diam = kwargs.pop('diam', 500E-6 * pq.m)
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_matrix(self):
'''Calculate the F-matrix'''
f_matrix = np.empty((self.coord_electrode.size,
self.coord_electrode.size)) * self.coord_electrode.units
for j in range(self.coord_electrode.size):
for i in range(self.coord_electrode.size):
f_matrix[j, i] = ((np.sqrt((self.coord_electrode[j] -
self.coord_electrode[i])**2 +
(self.diam[j] / 2)**2) - abs(self.coord_electrode[j] -
self.coord_electrode[i])) +
(self.sigma - self.sigma_top) / (self.sigma +
self.sigma_top) *
(np.sqrt((self.coord_electrode[j] +
self.coord_electrode[i])**2 + (self.diam[j] / 2)**2)-
abs(self.coord_electrode[j] + self.coord_electrode[i])))
f_matrix /= (2 * self.sigma)
return f_matrix
class StepiCSD(CSD):
'''step-iCSD method'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initializing step-iCSD method class object
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m
diam : float or np.ndarray * quantity.Quantity
diameter(s) of the assumed circular planar current sources centered
at each contact
Defaults to 500E-6 meters
h : float or np.ndarray * quantity.Quantity
assumed thickness of the source cylinders at all or each contact
Defaults to np.ones(15) * 100E-6 * pq.m
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
sigma_top : float * quantity.Quantity
conductivity on top of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
tol : float
tolerance of numerical integration
Defaults 1e-6
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for the gaussian
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
try: # Should the class not take care of this?
assert(self.diam.units == coord_electrode.units)
except AssertionError as ae:
print('units of coord_electrode ({}) and diam ({}) differ'
.format(coord_electrode.units, self.diam.units))
raise ae
try:
assert(np.all(np.diff(coord_electrode) > 0))
except AssertionError as ae:
print('values of coord_electrode not continously increasing')
raise ae
try:
assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
if self.diam.size == coord_electrode.size:
assert(np.all(self.diam > 0 * self.diam.units))
else:
assert(self.diam > 0 * self.diam.units)
except AssertionError as ae:
print('diam must be positive scalar or of same shape \
as coord_electrode')
raise ae
if self.diam.size == 1:
self.diam = np.ones(coord_electrode.size) * self.diam
try:
assert(self.h.size == 1 or self.h.size == coord_electrode.size)
if self.h.size == coord_electrode.size:
assert(np.all(self.h > 0 * self.h.units))
except AssertionError as ae:
print('h must be scalar or of same shape as coord_electrode')
raise ae
if self.h.size == 1:
self.h = np.ones(coord_electrode.size) * self.h
self.name = 'step-iCSD method'
self.coord_electrode = coord_electrode
# compute forward-solution matrix
self.f_matrix = self.get_f_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.diam = kwargs.pop('diam', 500E-6 * pq.m)
self.h = kwargs.pop('h', np.ones(23) * 100E-6 * pq.m)
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
self.tol = kwargs.pop('tol', 1e-6)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_matrix(self):
'''Calculate F-matrix for step iCSD method'''
el_len = self.coord_electrode.size
f_matrix = np.zeros((el_len, el_len))
for j in range(el_len):
for i in range(el_len):
lower_int = self.coord_electrode[i] - self.h[j] / 2
if lower_int < 0:
lower_int = self.h[j].units
upper_int = self.coord_electrode[i] + self.h[j] / 2
# components of f_matrix object
f_cyl0 = si.quad(self._f_cylinder,
a=lower_int, b=upper_int,
args=(float(self.coord_electrode[j]),
float(self.diam[j]),
float(self.sigma)),
epsabs=self.tol)[0]
f_cyl1 = si.quad(self._f_cylinder, a=lower_int, b=upper_int,
args=(-float(self.coord_electrode[j]),
float(self.diam[j]), float(self.sigma)),
epsabs=self.tol)[0]
# method of images coefficient
mom = (self.sigma - self.sigma_top) / (self.sigma + self.sigma_top)
f_matrix[j, i] = f_cyl0 + mom * f_cyl1
# assume si.quad trash the units
return f_matrix * self.h.units**2 / self.sigma.units
def _f_cylinder(self, zeta, z_val, diam, sigma):
'''function used by class method'''
f_cyl = 1. / (2. * sigma) * \
(np.sqrt((diam / 2)**2 + ((z_val - zeta))**2) - abs(z_val - zeta))
return f_cyl
class SplineiCSD(CSD):
'''spline iCSD method'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initializing spline-iCSD method class object
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m
diam : float * quantity.Quantity
diamater of the assumed circular planar current sources centered
at each contact
Defaults to 500E-6 meters
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
sigma_top : float * quantity.Quantity
conductivity on top of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
tol : float
tolerance of numerical integration
Defaults 1e-6
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for the gaussian
num_steps : int
number of data points for the spatially upsampled LFP/CSD data
Defaults to 200
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
try: # Should the class not take care of this?!
assert(self.diam.units == coord_electrode.units)
except AssertionError as ae:
print('units of coord_electrode ({}) and diam ({}) differ'
.format(coord_electrode.units, self.diam.units))
raise
try:
assert(np.all(np.diff(coord_electrode) > 0))
except AssertionError as ae:
print('values of coord_electrode not continously increasing')
raise ae
try:
assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
if self.diam.size == coord_electrode.size:
assert(np.all(self.diam > 0 * self.diam.units))
except AssertionError as ae:
print('diam must be scalar or of same shape as coord_electrode')
raise ae
if self.diam.size == 1:
self.diam = np.ones(coord_electrode.size) * self.diam
self.name = 'spline-iCSD method'
self.coord_electrode = coord_electrode
# compute stuff
self.f_matrix = self.get_f_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.diam = kwargs.pop('diam', 500E-6 * pq.m)
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
self.tol = kwargs.pop('tol', 1e-6)
self.num_steps = kwargs.pop('num_steps', 200)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_matrix(self):
'''Calculate the F-matrix for cubic spline iCSD method'''
el_len = self.coord_electrode.size
z_js = np.zeros(el_len + 1)
z_js[:-1] = np.array(self.coord_electrode)
z_js[-1] = z_js[-2] + float(np.diff(self.coord_electrode).mean())
# Define integration matrixes
f_mat0 = np.zeros((el_len, el_len + 1))
f_mat1 = np.zeros((el_len, el_len + 1))
f_mat2 = np.zeros((el_len, el_len + 1))
f_mat3 = np.zeros((el_len, el_len + 1))
# Calc. elements
for j in range(el_len):
for i in range(el_len):
f_mat0[j, i] = si.quad(self._f_mat0, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
f_mat1[j, i] = si.quad(self._f_mat1, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1], z_js[i],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
f_mat2[j, i] = si.quad(self._f_mat2, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1], z_js[i],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
f_mat3[j, i] = si.quad(self._f_mat3, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1], z_js[i],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
# image technique if conductivity not constant:
if self.sigma != self.sigma_top:
f_mat0[j, i] = f_mat0[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat0, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1],
float(self.sigma), float(self.diam[j])), \
epsabs=self.tol)[0]
f_mat1[j, i] = f_mat1[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat1, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1], z_js[i], float(self.sigma),
float(self.diam[j])), epsabs=self.tol)[0]
f_mat2[j, i] = f_mat2[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat2, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1], z_js[i], float(self.sigma),
float(self.diam[j])), epsabs=self.tol)[0]
f_mat3[j, i] = f_mat3[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat3, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1], z_js[i], float(self.sigma),
float(self.diam[j])), epsabs=self.tol)[0]
e_mat0, e_mat1, e_mat2, e_mat3 = self._calc_e_matrices()
# Calculate the F-matrix
f_matrix = np.eye(el_len + 2)
f_matrix[1:-1, :] = np.dot(f_mat0, e_mat0) + \
np.dot(f_mat1, e_mat1) + \
np.dot(f_mat2, e_mat2) + \
np.dot(f_mat3, e_mat3)
return f_matrix * self.coord_electrode.units**2 / self.sigma.units
def get_csd(self):
'''
Calculate the iCSD using the spline iCSD method
Returns
-------
csd : np.ndarray * quantity.Quantity
Array with csd estimate
'''
e_mat = self._calc_e_matrices()
el_len = self.coord_electrode.size
# padding the lfp with zeros on top/bottom
if self.lfp.ndim == 1:
cs_lfp = np.r_[[0], np.asarray(self.lfp), [0]].reshape(1, -1).T
csd = np.zeros(self.num_steps)
else:
cs_lfp = np.vstack((np.zeros(self.lfp.shape[1]),
np.asarray(self.lfp),
np.zeros(self.lfp.shape[1])))
csd = np.zeros((self.num_steps, self.lfp.shape[1]))
cs_lfp *= self.lfp.units
# CSD coefficients
csd_coeff = np.linalg.solve(self.f_matrix, cs_lfp)
# The cubic spline polynomial coefficients
a_mat0 = np.dot(e_mat[0], csd_coeff)
a_mat1 = np.dot(e_mat[1], csd_coeff)
a_mat2 = np.dot(e_mat[2], csd_coeff)
a_mat3 = np.dot(e_mat[3], csd_coeff)
# Extend electrode coordinates in both end by min contact interdistance
h = np.diff(self.coord_electrode).min()
z_js = np.zeros(el_len + 2)
z_js[0] = self.coord_electrode[0] - h
z_js[1: -1] = self.coord_electrode
z_js[-1] = self.coord_electrode[-1] + h
# create high res spatial grid
out_zs = np.linspace(z_js[1], z_js[-2], self.num_steps)
# Calculate iCSD estimate on grid from polynomial coefficients.
i = 0
for j in range(self.num_steps):
if out_zs[j] >= z_js[i + 1]:
i += 1
csd[j, ] = a_mat0[i, :] + a_mat1[i, :] * \
(out_zs[j] - z_js[i]) + \
a_mat2[i, :] * (out_zs[j] - z_js[i])**2 + \
a_mat3[i, :] * (out_zs[j] - z_js[i])**3
csd_unit = (self.f_matrix.units**-1 * self.lfp.units).simplified
return csd * csd_unit
def _f_mat0(self, zeta, z_val, sigma, diam):
'''0'th order potential function'''
return 1. / (2. * sigma) * \
(np.sqrt((diam / 2)**2 + ((z_val - zeta))**2) - abs(z_val - zeta))
def _f_mat1(self, zeta, z_val, zi_val, sigma, diam):
'''1'th order potential function'''
return (zeta - zi_val) * self._f_mat0(zeta, z_val, sigma, diam)
def _f_mat2(self, zeta, z_val, zi_val, sigma, diam):
'''2'nd order potential function'''
return (zeta - zi_val)**2 * self._f_mat0(zeta, z_val, sigma, diam)
def _f_mat3(self, zeta, z_val, zi_val, sigma, diam):
'''3'rd order potential function'''
return (zeta - zi_val)**3 * self._f_mat0(zeta, z_val, sigma, diam)
def _calc_k_matrix(self):
'''Calculate the K-matrix used by to calculate E-matrices'''
el_len = self.coord_electrode.size
h = float(np.diff(self.coord_electrode).min())
c_jm1 = np.eye(el_len + 2, k=0) / h
c_jm1[0, 0] = 0
c_j0 = np.eye(el_len + 2) / h
c_j0[-1, -1] = 0
c_jall = c_j0
c_jall[0, 0] = 1
c_jall[-1, -1] = 1
tjp1 = np.eye(el_len + 2, k=1)
tjm1 = np.eye(el_len + 2, k=-1)
tj0 = np.eye(el_len + 2)
tj0[0, 0] = 0
tj0[-1, -1] = 0
# Defining K-matrix used to calculate e_mat1-3
return np.dot(np.linalg.inv(np.dot(c_jm1, tjm1) +
2 * np.dot(c_jm1, tj0) +
2 * c_jall +
np.dot(c_j0, tjp1)),
3 * (np.dot(np.dot(c_jm1, c_jm1), tj0) -
np.dot(np.dot(c_jm1, c_jm1), tjm1) +
np.dot(np.dot(c_j0, c_j0), tjp1) -
np.dot(np.dot(c_j0, c_j0), tj0)))
def _calc_e_matrices(self):
'''Calculate the E-matrices used by cubic spline iCSD method'''
el_len = self.coord_electrode.size
# expanding electrode grid
h = float(np.diff(self.coord_electrode).min())
# Define transformation matrices
c_mat3 = np.eye(el_len + 1) / h
# Get K-matrix
k_matrix = self._calc_k_matrix()
# Define matrixes for C to A transformation:
tja = np.eye(el_len + 2)[:-1, ]
tjp1a = np.eye(el_len + 2, k=1)[:-1, ]
# Define spline coefficients
e_mat0 = tja
e_mat1 = np.dot(tja, k_matrix)
e_mat2 = 3 * np.dot(c_mat3**2, (tjp1a - tja)) - \
np.dot(np.dot(c_mat3, (tjp1a + 2 * tja)), k_matrix)
e_mat3 = 2 * np.dot(c_mat3**3, (tja - tjp1a)) + \
np.dot(np.dot(c_mat3**2, (tjp1a + tja)), k_matrix)
return e_mat0, e_mat1, e_mat2, e_mat3
if __name__ == '__main__':
from scipy.io import loadmat
import matplotlib.pyplot as plt
#loading test data
test_data = loadmat('test_data.mat')
#prepare lfp data for use, by changing the units to SI and append quantities,
#along with electrode geometry, conductivities and assumed source geometry
lfp_data = test_data['pot1'] * 1E-6 * pq.V # [uV] -> [V]
z_data = np.linspace(100E-6, 2300E-6, 23) * pq.m # [m]
diam = 500E-6 * pq.m # [m]
h = 100E-6 * pq.m # [m]
sigma = 0.3 * pq.S / pq.m # [S/m] or [1/(ohm*m)]
sigma_top = 0.3 * pq.S / pq.m # [S/m] or [1/(ohm*m)]
# Input dictionaries for each method
delta_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'diam' : diam, # source diameter
'sigma' : sigma, # extracellular conductivity
'sigma_top' : sigma, # conductivity on top of cortex
'f_type' : 'gaussian', # gaussian filter
'f_order' : (3, 1), # 3-point filter, sigma = 1.
}
step_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'diam' : diam,
'h' : h, # source thickness
'sigma' : sigma,
'sigma_top' : sigma,
'tol' : 1E-12, # Tolerance in numerical integration
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
spline_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'diam' : diam,
'sigma' : sigma,
'sigma_top' : sigma,
'num_steps' : 201, # Spatial CSD upsampling to N steps
'tol' : 1E-12,
'f_type' : 'gaussian',
'f_order' : (20, 5),
}
std_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'sigma' : sigma,
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
#Create the different CSD-method class instances. We use the class methods
#get_csd() and filter_csd() below to get the raw and spatially filtered
#versions of the current-source density estimates.
csd_dict = dict(
delta_icsd = DeltaiCSD(**delta_input),
step_icsd = StepiCSD(**step_input),
spline_icsd = SplineiCSD(**spline_input),
std_csd = StandardCSD(**std_input),
)
#plot
for method, csd_obj in list(csd_dict.items()):
fig, axes = plt.subplots(3,1, figsize=(8,8))
#plot LFP signal
ax = axes[0]
im = ax.imshow(np.array(lfp_data), origin='upper', vmin=-abs(lfp_data).max(), \
vmax=abs(lfp_data).max(), cmap='jet_r', interpolation='nearest')
ax.axis(ax.axis('tight'))
cb = plt.colorbar(im, ax=ax)
cb.set_label('LFP (%s)' % lfp_data.dimensionality.string)
ax.set_xticklabels([])
ax.set_title('LFP')
ax.set_ylabel('ch #')
#plot raw csd estimate
csd = csd_obj.get_csd()
ax = axes[1]
im = ax.imshow(np.array(csd), origin='upper', vmin=-abs(csd).max(), \
vmax=abs(csd).max(), cmap='jet_r', interpolation='nearest')
ax.axis(ax.axis('tight'))
ax.set_title(csd_obj.name)
cb = plt.colorbar(im, ax=ax)
cb.set_label('CSD (%s)' % csd.dimensionality.string)
ax.set_xticklabels([])
ax.set_ylabel('ch #')
#plot spatially filtered csd estimate
ax = axes[2]
csd = csd_obj.filter_csd(csd)
im = ax.imshow(np.array(csd), origin='upper', vmin=-abs(csd).max(), \
vmax=abs(csd).max(), cmap='jet_r', interpolation='nearest')
ax.axis(ax.axis('tight'))
ax.set_title(csd_obj.name + ', filtered')
cb = plt.colorbar(im, ax=ax)
cb.set_label('CSD (%s)' % csd.dimensionality.string)
ax.set_ylabel('ch #')
ax.set_xlabel('timestep')
plt.show()
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/make.py | 1 | 7453 | #!/usr/bin/env python
from __future__ import print_function
import fileinput
import glob
import os
import shutil
import sys
### Begin compatibility block for pre-v2.6: ###
#
# ignore_patterns and copytree funtions are copies of what is included
# in shutil.copytree of python v2.6 and later.
#
### When compatibility is no-longer needed, this block
### can be replaced with:
###
### from shutil import ignore_patterns, copytree
###
### or the "shutil." qualifier can be prepended to the function
### names where they are used.
try:
WindowsError
except NameError:
WindowsError = None
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
import fnmatch
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
from shutil import copy2, Error, copystat
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error, err:
errors.extend(err.args[0])
except EnvironmentError, why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError, why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error, errors
### End compatibility block for pre-v2.6 ###
def copy_if_out_of_date(original, derived):
if (not os.path.exists(derived) or
os.stat(derived).st_mtime < os.stat(original).st_mtime):
shutil.copyfile(original, derived)
def check_build():
build_dirs = ['build', 'build/doctrees', 'build/html', 'build/latex',
'build/texinfo', '_static', '_templates']
for d in build_dirs:
try:
os.mkdir(d)
except OSError:
pass
def doctest():
os.system('sphinx-build -b doctest -d build/doctrees . build/doctest')
def linkcheck():
os.system('sphinx-build -b linkcheck -d build/doctrees . build/linkcheck')
def html():
check_build()
copy_if_out_of_date('../lib/matplotlib/mpl-data/matplotlibrc', '_static/matplotlibrc')
if small_docs:
options = "-D plot_formats=\"[('png', 80)]\""
else:
options = ''
if os.system('sphinx-build %s -b html -d build/doctrees . build/html' % options):
raise SystemExit("Building HTML failed.")
figures_dest_path = 'build/html/pyplots'
if os.path.exists(figures_dest_path):
shutil.rmtree(figures_dest_path)
copytree(
'pyplots', figures_dest_path,
ignore=ignore_patterns("*.pyc"))
# Clean out PDF files from the _images directory
for filename in glob.glob('build/html/_images/*.pdf'):
os.remove(filename)
shutil.copy('../CHANGELOG', 'build/html/_static/CHANGELOG')
def latex():
check_build()
#figs()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -b latex -d build/doctrees . build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('build/latex')
# Call the makefile produced by sphinx...
if os.system('make'):
raise SystemExit("Rendering LaTeX failed.")
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def texinfo():
check_build()
#figs()
if sys.platform != 'win32':
# Texinfo format.
if os.system(
'sphinx-build -b texinfo -d build/doctrees . build/texinfo'):
raise SystemExit("Building Texinfo failed.")
# Produce info file.
os.chdir('build/texinfo')
# Call the makefile produced by sphinx...
if os.system('make'):
raise SystemExit("Rendering Texinfo failed.")
os.chdir('../..')
else:
print('texinfo build has not been tested on windows')
def clean():
shutil.rmtree("build", ignore_errors=True)
shutil.rmtree("examples", ignore_errors=True)
for pattern in ['mpl_examples/api/*.png',
'mpl_examples/pylab_examples/*.png',
'mpl_examples/pylab_examples/*.pdf',
'mpl_examples/units/*.png',
'pyplots/tex_demo.png',
'_static/matplotlibrc',
'_templates/gallery.html',
'users/installing.rst']:
for filename in glob.glob(pattern):
if os.path.exists(filename):
os.remove(filename)
def all():
#figs()
html()
latex()
funcd = {
'html' : html,
'latex' : latex,
'texinfo' : texinfo,
'clean' : clean,
'all' : all,
'doctest' : doctest,
'linkcheck': linkcheck,
}
small_docs = False
# Change directory to the one containing this file
current_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.join(current_dir, __file__)))
copy_if_out_of_date('../INSTALL', 'users/installing.rst')
if len(sys.argv)>1:
if '--small' in sys.argv[1:]:
small_docs = True
sys.argv.remove('--small')
for arg in sys.argv[1:]:
func = funcd.get(arg)
if func is None:
raise SystemExit('Do not know how to handle %s; valid args are %s'%(
arg, funcd.keys()))
func()
else:
small_docs = False
all()
os.chdir(current_dir)
| mit |
jasonleaster/Machine_Learning | SAMME/tester.py | 1 | 1351 | """
Programmer : EOF
Date : 2015.11.22
File : tester.py
File Description:
This file is used to test the adaboost which is a classical
automatic classifier.
"""
import numpy
import matplotlib.pyplot as pyplot
from samme import SAMME
Original_Data = numpy.array([
['teenager', 'no', 'no', 0.0],
['teenager', 'no', 'no', 1.0],
['teenager', 'yes', 'no', 1.0],
['teenager', 'yes', 'yes', 0.0],
['teenager', 'no', 'no', 0.0],
['senior citizen', 'no', 'no', 0.0],
['senior citizen', 'no', 'no', 1.0],
['senior citizen', 'yes', 'yes', 1.0],
['senior citizen', 'no', 'yes', 2.0],
['senior citizen', 'no', 'yes', 2.0],
['old pepple', 'no', 'yes', 2.0],
['old pepple', 'no', 'yes', 1.0],
['old pepple', 'yes', 'no', 1.0],
['old pepple', 'yes', 'no', 2.0],
['old pepple', 'no', 'no', 0.0],
]).transpose()
Tag = numpy.array([
[-1],
[-1],
[+1],
[+1],
[-1],
[-1],
[-1],
[+1],
[+1],
[+1],
[+1],
[+1],
[+1],
[+1],
[-1],
]).transpose()
Tag = Tag.flatten()
discrete = [ True for i in range(Original_Data.shape[0])]
a = SAMME(Original_Data, Tag, discrete)
a.train()
print a.prediction(a._Mat)
| gpl-2.0 |
rolando/theusual-kaggle-seeclickfix-ensemble | Bryan/ensembles.py | 2 | 19613 | """
Classes and functions for working with base models and ensembles.
"""
__author__ = 'bgregory'
__email__ = '[email protected]'
__date__ = '11-23-2013'
#Internal modules
import utils
#Start logger to record all info, warnings, and errors to Logs/logfile.log
log = utils.start_logging(__name__)
import ml_metrics
import data_io
import features
import train
#External modules
import numpy as np
import pandas as pd
from sklearn import (metrics, cross_validation, linear_model, ensemble, tree, preprocessing, svm, neighbors, gaussian_process, naive_bayes, neural_network, pipeline, lda)
########################################################################################
class Model(object):
"""Base class for all models: stand-alone independent models, base models,
and ensemble models.
Parameters
----------
model_name: string, required
Descriptive name of model for use in logging and output
estimator_class: string, required
SKLearn estimator class for model fitting and training
features: dictionary, required
Features of the model. Key is feature name, value is a dictionary with
'train' and 'test' arrays.
Ex.- model.features['foo_feature']['train'] will return an
array with the values in training set for foo_feature
target: string, optional (default = global_target from settings file)
Target variable (column name) for this model
segment: string, optional (default = none)
Segment of data for this model to use
estimator_params: dictionary, optional (default=none, which passes to SKLearn defaults for that estimator)
Parameters of the estimator class
postprocess_scalar: float, optional (default=0)
Scalar to apply to all predictions after model predicting, useful for calibrating predictions
Attributes
----------
"""
def __init__(self, model_name, target, segment, estimator_class, estimator_params, features, postprocess_scalar):
self.model_name = model_name
self.target = target
self.segment = segment
self.estimator_class = estimator_class
self.estimator_set(estimator_class, estimator_params)
self.features_set(features)
self.postprocess_scalar = round(np.float32(postprocess_scalar), 4)
def estimator_set(self, estimator_class, estimator_params):
self.estimator = eval(estimator_class)()
for param in estimator_params:
#Convert any boolean parameters from string to bool
if estimator_params[param] == 'true':
estimator_params[param] = True
elif estimator_params[param] == 'false':
estimator_params[param] = False
#Convert any numerical parameters from the required JSON string type
elif '.' in estimator_params[param]:
try:
estimator_params[param] = float(estimator_params[param])
except:
pass
else:
try:
estimator_params[param] = int(estimator_params[param])
except:
pass
setattr(self.estimator, param, estimator_params[param])
def features_set(self, features):
"""Initialize dictionary of features where keys are the feature names and values are an empty
list for storing the training and testing array/matrix"""
self.features = dict((feature,['','']) for feature in features)
def features_create(self,dfTrn,dfTest):
#Vectorize each text, categorical, or boolean feature into a train and test matrix stored in self.features
features.vectors(dfTrn, dfTest, self.features)
#Transform or scale any numerical features and create feature vector
features.numerical(dfTrn, dfTest, self.features)
def predict(self,dfTrn,dfTest):
#Create feature vectors
self.features_create(dfTrn,dfTest)
#Make predictions
mtxTrn, mtxTest, mtxTrnTarget, mtxTestTarget = train.combine_features(self, dfTrn, dfTest)
train.predict(mtxTrn,mtxTrnTarget.ravel(),mtxTest,dfTest,self)
#Store predictions in dataframe as class attribute
self.dfPredictions = dfTest.ix[:,['id',self.target]]
########################################################################################
class EnsembleAvg (object):
"""Loads already calculated predictions from individual models in the form of CSV files, then applies
average weights to each individual model to create an ensemble model.
If predictions are for a cross-validation, then true target values can be loaded and the ensemble can be scored
using given weights or using optimally derived weights.
Attributes:
df_models = List containing each individual model's predictions
id = unique ID for each record
targets = List containing the target (or targets) for the predictions
df_true = Pandas DataFrame containing the true values for the predictions, only required if performing CV
"""
def __init__(self, targets, id):
self.sub_models = []
self.sub_models_segment = []
self.targets = targets
self.id = id
def load_models_csv(self,filepath, model_no = None):
"""
Load predictions from an individual sub model into a dataframe stored in the sub_models list, if no model_no is given
then load data into next available index. Valid source is a CSV file.
"""
try:
if model_no == None:
model_no = len(self.sub_models)
self.sub_models.append(data_io.load_flatfile_to_df(filepath, delimiter=''))
else:
self.sub_models[model_no]=data_io.load_flatfile_to_df(filepath, delimiter='')
utils.info('Model loaded into index %s' % str(model_no))
except IndexError:
raise Exception('Model number does not exist. Model number given, %s, is out of index range.' % str(model_no))
def load_true_df(self,df):
"""
Load true target values (ground truth) into a dataframe attribute from an in-memory dataframe object.
"""
if type(df) != pd.core.frame.DataFrame:
raise Exception('Object passed, %s, is not a Dataframe. Object passed is of type %s' % (df, type(df)))
elif self.id not in df.columns:
raise Exception('Dataframe passed, %s, does not contain unique ID field: %s' % (df, self.id))
elif not all(x in df.columns for x in self.targets):
raise Exception('Dataframe passed, %s, does not contain all target variables: %s' % (df, self.targets))
else:
self.df_true = df.copy()
utils.info('True value for target variables successfully loaded into self.df_true')
def load_df_true_segment(self,df):
"""
For segmented data.
Load true target values (ground truth) into a dataframe attribute from an in-memory dataframe object.
"""
if type(df) != pd.core.frame.DataFrame:
raise Exception('Object passed, %s, is not a Dataframe. Object passed is of type %s' % (df, type(df)))
elif self.id not in df.columns:
raise Exception('Dataframe passed, %s, does not contain unique ID field: %s' % (df, self.id))
elif not all(x in df.columns for x in self.targets):
raise Exception('Dataframe passed, %s, does not contain all target variables: %s' % (df, self.targets))
else:
self.df_true_segment = df.copy()
utils.info('True value for target variables successfully loaded into self.df_true_segment')
def sort_dataframes(self,sortcolumn):
"""
Sort all data frame attributes of class by a given column for ease of comparison.
"""
try:
for i in range(len(self.sub_models)):
self.sub_models[i] = self.sub_models[i].sort(sortcolumn)
if 'df_true' in dir(self):
self.df_true = self.df_true.sort(sortcolumn)
if 'df_true_segment' in dir(self):
self.df_true_segment = self.df_true_segment.sort(sortcolumn)
except KeyError:
raise Exception('Sort failed. Column %s not found in all dataframes.' % (sortcolumn))
def transform_targets_log(self):
"""
Apply natural log transformation to all targets (both predictions and true values)
"""
for target in self.targets:
if 'df_true' in dir(self):
self.df_true[target] = np.log(self.df_true[target] + 1)
if 'df_true_segment' in dir(self):
self.df_true_segment[target] = np.log(self.df_true_segment[target] + 1)
for i in range(len(self.sub_models)):
self.sub_models[i][target] = np.log(self.sub_models[i][target] + 1)
for i in range(len(self.sub_models_segment)):
self.sub_models_segment[i][target] = np.log(self.sub_models_segment[i][target] + 1)
def transform_targets_exp(self):
"""
Apply exp transformation (inverse of natural log transformation) to all targets (both predictions and true values)
"""
for target in self.targets:
if 'df_true' in dir(self):
self.df_true[target] = np.exp(self.df_true[target])-1
if 'df_true_segment' in dir(self):
self.df_true_segment[target] = np.exp(self.df_true_segment[target])-1
if 'df_ensemble' in dir(self):
self.df_ensemble[target] = np.exp(self.df_ensemble[target])-1
if 'df_ensemble_segment' in dir(self):
self.df_ensemble_segment[target] = np.exp(self.df_ensemble_segment[target])-1
for i in range(len(self.sub_models)):
self.sub_models[i][target] = np.exp(self.sub_models[i][target])-1
for i in range(len(self.sub_models_segment)):
self.sub_models_segment[i][target] = np.exp(self.sub_models_segment[i][target]) -1
def score_rmsle(self,df,df_true):
"""
Calculate CV score of predictions in given dataframe using RMSLE metric. Score individually for each target and
total for targets. Must have df_true loaded prior to running.
"""
all_true = []
all_preds = []
target_scores = []
#Transform predictions back to normal space for scoring
self.transform_targets_exp()
for target in self.targets:
all_true.append(df_true[target].tolist())
all_preds.append(df[target].tolist())
target_score = ml_metrics.rmsle(df_true[target], df[target])
target_scores.append(target_score)
utils.info('RMSLE score for %s: %f' % (target,target_score))
utils.info('Total RMSLE score: %f' % (ml_metrics.rmsle(all_true, all_preds)))
#Transform predictions to log space again for averaging
self.transform_targets_log()
def create_ensemble(self,sub_model_indexes, weights):
"""
Create ensemble from the given sub models using average weights.
Sub_model_indexes is a list of indexes to use for the sub_models list.
Weights is a list of dictionaries with given averages for each target, its ordering must correspond to
the order of sub_model_indexes.
Ex. - >>> weights = [{'target1':.5,'target2':.5},{'target1':.25,'target2':.75}]
"""
if len(sub_model_indexes) != len(weights):
raise Exception('Ensemble failed. Number of sub models, %d, is not equal to number of weights, %d.') \
% (len(sub_model_indexes), len(weights))
else:
#Create new data frame ensemble
self.df_ensemble = self.sub_models[0].copy()
for target in self.targets:
self.df_ensemble[target] = 0
for submodel in sub_model_indexes:
for idx in self.df_ensemble.index:
self.df_ensemble[target][idx] += self.sub_models[submodel][target] * weights[submodel][target]
def create_ensemble_segment(self,sub_model_indexes, weights):
"""
Create ensemble for a certain segment, from the given sub models using average weights.
Sub_model_indexes is a list of indexes to use for the sub_models list.
Weights is a list of dictionaries with given averages for each target, its ordering must correspond to
the order of sub_model_indexes.
Ex. - >>> weights = [{'target1':.5,'target2':.5},{'target1':.25,'target2':.75}]
"""
if len(sub_model_indexes) != len(weights):
raise Exception('Ensemble failed. Number of sub models, %d, is not equal to number of weights, %d.') \
% (len(sub_model_indexes), len(weights))
else:
#Create new data frame ensemble
self.df_ensemble_segment = self.sub_models_segment[0].copy()
for target in self.targets:
self.df_ensemble_segment[target] = 0
for submodel in sub_model_indexes:
self.df_ensemble_segment[target] += self.sub_models_segment[submodel][target] * weights[submodel][target]
def calc_weights(self,sub_model_indexes, step_size):
"""
Calculate optimal weights to use in averaged ensemble using the given sub-models and given score metric
"""
for target in self.targets:
while diff < 0:
score
###############################################################################################################
"""
#---Ensemble Averaging----#
reload(ensembles);ensemble_CV = ensembles.EnsembleAvg(targets=targets,id='id')
ensemble_CV.load_models_csv(filepath='Submits/BryanModel-Updated-CV.csv')
ensemble_CV.load_models_csv(filepath='Submits/ridge_38_cv.csv')
ensemble_CV.load_models_csv(filepath='Submits/weak_geo_cv.csv')
#Parse segments
ensemble_CV.sub_models_segment.append\
(ensemble_CV.sub_models[0][ensemble_CV.sub_models[0]['Segment'] == 'Richmond'].reset_index())
ensemble_CV.sub_models_segment.append\
(ensemble_CV.sub_models[1][ensemble_CV.sub_models[1]['Segment'] == 'Richmond'].reset_index())
ensemble_CV.sub_models_segment.append\
(ensemble_CV.sub_models[2][ensemble_CV.sub_models[2]['Segment'] == 'Richmond'].reset_index())
dfSegTestCV = dfTestCV.merge(ensemble_CV.sub_models_segment[0].ix[:,['id']],on='id',how='inner')
#set targets
ensemble_CV.targets=['num_views']
#Transform CV targets back to normal
for target in ensemble_CV.targets:
dfSegTestCV[target]=np.exp(dfSegTestCV[target])-1
#Load groundtruth values for CV
ensemble_CV.load_df_true_segment(dfSegTestCV)
#Sort all dataframes by ID for easy comparison
ensemble_CV.sort_dataframes('id')
#Transform predictions to log space for averaging
ensemble_CV.transform_targets_log()
#Set weights
#Remote_API: weights = [{'num_views':.16,'num_votes':.3,'num_comments':.9},{'num_views':.84,'num_votes':.7,'num_comments':.1}]
#Richmond: weights = [{'num_views':.7,'num_votes':.45,'num_comments':.7},{'num_views':.3,'num_votes':.55,'num_comments':.3},{'num_views':.4'}]
#Oakland weights = [{'num_views':.2,'num_votes':.1,'num_comments':.7},{'num_views':.8,'num_votes':.9,'num_comments':.3}]
weights = [{'num_views':.2,'num_votes':.1,'num_comments':.6},{'num_views':.8,'num_votes':.9,'num_comments':.4}]
#Create ensemble average
#ensemble_CV.create_ensemble([0,1],weights)
ensemble_CV.create_ensemble_segment([0,1,2],weights)
#Score the ensemble
#ensemble_CV.score_rmsle(ensemble_CV.sub_models_segment[0], df_true=ensemble_CV.df_true_segment)
ensemble_CV.score_rmsle(ensemble_CV.df_ensemble_segment, df_true=ensemble_CV.df_true_segment)
#---Use regressor to find ideal weights for ensemble---#
for target_label in ensemble_CV.targets:
clf.fit_intercept=False
train = np.hstack((ensemble_CV.sub_models_segment[0].ix[:,[target_label]].as_matrix(),
ensemble_CV.sub_models_segment[1].ix[:,[target_label]].as_matrix(),
ensemble_CV.sub_models_segment[2].ix[:,[target_label]].as_matrix()))
target = ensemble_CV.df_true_segment.ix[:,[target_label]].as_matrix()
clf.fit(train,target)
try:
for i in range(len(ensemble_CV.sub_models_segment)):
weights[i][target_label]=clf.coef_[i]
except:
for i in range(len(ensemble_CV.sub_models_segment)):
weights[i][target_label]=clf.coef_[0][i]
utils.info(clf.coef_)
#-----------Test Ensemble--------#
reload(ensembles);ensemble_CV = ensembles.EnsembleAvg(targets=['num_views'],id='id')
ensemble_test.load_models_csv(filepath='Submits/BryanModel-Updated.csv')
ensemble_test.load_models_csv(filepath='Submits/ridge_38_test.csv')
ensemble_test.load_models_csv(filepath='Submits/weak_geo_svr_.75.csv')
#Parse segments
ensemble_test.sub_models_segment.append\
(ensemble_test.sub_models[0][ensemble_CV.sub_models[0]['Segment'] == 'Richmond'].reset_index())
ensemble_test.sub_models_segment.append\
(ensemble_test.sub_models[1][ensemble_CV.sub_models[1]['Segment'] == 'Richmond'].reset_index())
ensemble_test.sub_models_segment.append\
(ensemble_test.sub_models[2][ensemble_CV.sub_models[2]['Segment'] == 'Richmond'].reset_index())
dfSegTestCV = dfTestCV.merge(ensemble_CV.sub_models_segment[0].ix[:,['id']],on='id',how='inner')
#Transform CV targets back to normal
for target in ensemble_CV.targets:
dfSegTestCV[target]=np.exp(dfSegTestCV[target])-1
#Load groundtruth values for CV
ensemble_CV.load_df_true_segment(dfSegTestCV)
#Sort all dataframes by ID for easy comparison
ensemble_CV.sort_dataframes('id')
#Transform predictions to log space for averaging
ensemble_CV.transform_targets_log()
#Set weights
#Remote_API: weights = [{'num_views':.16,'num_votes':.3,'num_comments':.9},{'num_views':.84,'num_votes':.7,'num_comments':.1}]
#Richmond: weights = [{'num_views':.7,'num_votes':.45,'num_comments':.7},{'num_views':.3,'num_votes':.55,'num_comments':.3},{'num_views':.4'}]
#Oakland weights = [{'num_views':.2,'num_votes':.1,'num_comments':.7},{'num_views':.8,'num_votes':.9,'num_comments':.3}]
weights = [{'num_views':.2,'num_votes':.1,'num_comments':.6},{'num_views':.8,'num_votes':.9,'num_comments':.4}]
#Create ensemble average
#ensemble_CV.create_ensemble([0,1],weights)
ensemble_CV.create_ensemble_segment([0,1,2],weights)
#Score the ensemble
#ensemble_CV.score_rmsle(ensemble_CV.sub_models_segment[0], df_true=ensemble_CV.df_true_segment)
ensemble_CV.score_rmsle(ensemble_CV.df_ensemble_segment, df_true=ensemble_CV.df_true_segment)
#Clean off outliers
#Views
dfTrn = dfTrn[dfTrn.num_views_orig < 3]
#dfTest = dfTest[dfTest.num_views_orig < 3]
""" | bsd-3-clause |
dandanvidi/effective-capacity | scripts/gauge.py | 3 | 3523 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 22 13:58:55 2016
@author: dan
"""
import os, sys
import matplotlib
from matplotlib import cm
from matplotlib import pyplot as plt
import numpy as np
from matplotlib.patches import Circle, Wedge, Rectangle
def degree_range(n):
start = np.linspace(0,180,n+1, endpoint=True)[0:-1]
end = np.linspace(0,180,n+1, endpoint=True)[1::]
mid_points = start + ((end-start)/2.)
return np.c_[start, end], mid_points
def rot_text(ang):
rotation = np.degrees(np.radians(ang) * np.pi / np.pi - np.radians(90))
return rotation
def gauge(N=5, labels=None, colors='jet_r',
cat=1, top_title='', title='', fname='./meter.png'):
"""
some sanity checks first
"""
if not labels:
labels = ['']*N
if cat > N:
raise Exception("\n\nThe category ({}) is greated than the length\nof the labels ({})".format(cat, N))
"""
if colors is a string, we assume it's a matplotlib colormap
and we discretize in N discrete colors
"""
if isinstance(colors, str):
cmap = cm.get_cmap(colors)
cmap = cmap(np.linspace(0,1,N))
colors = cmap[::-1,:].tolist()
if isinstance(colors, list):
if len(colors) == N:
colors = colors[::-1]
else:
raise Exception("\n\nnumber of colors {} not equal to number of categories{}\n".format(len(colors), N))
"""
begins the plotting
"""
fig, ax = plt.subplots()
ang_range, mid_points = degree_range(N)
labels = labels[::-1]
"""
plots the sectors and the arcs
"""
patches = []
for ang, c in zip(ang_range, colors):
# sectors
# patches.append(Wedge((0.,0.), .4, *ang, facecolor='w', lw=2))
# arcs
patches.append(Wedge((0.,0.), .4, *ang, width=0.10, facecolor=c, lw=1, alpha=1))
[ax.add_patch(p) for p in patches]
"""
set the labels (e.g. 'LOW','MEDIUM',...)
"""
for mid, lab in zip(mid_points, labels):
ax.text(0.35 * np.cos(np.radians(mid)), 0.35 * np.sin(np.radians(mid)), lab, \
horizontalalignment='center', verticalalignment='center', fontsize=25, \
fontweight='bold', rotation = rot_text(mid))
"""
set the bottom banner and the title
"""
r = Rectangle((-0.45,-0.),0.9,0.001, facecolor='w', lw=2)
ax.add_patch(r)
# ax.line()
ax.text(0, -0.08, title, horizontalalignment='center', \
verticalalignment='center', fontsize=22, fontweight='bold')
"""
plots the arrow now
"""
pos = mid_points[np.abs(cat*N - N)]
ax.arrow(0, 0, 0.225 * np.cos(np.radians(pos)), 0.225 * np.sin(np.radians(pos)), \
width=0.02, head_width=0.05, head_length=0.1, fc='k', ec='k')
# ax.plot([0, 0.015], [0.225* np.cos(np.radians(pos)),0.225 * np.sin(np.radians(pos))], c='g', lw=2)
# ax.plot([0, -0.015], [0.225,0], c='k', lw=2)
ax.add_patch(Circle((0, 0), radius=0.02, facecolor='k'))
ax.add_patch(Circle((0, 0), radius=0.01, facecolor='w', zorder=11))
"""
removes frame and ticks, and makes axis equal and tight
"""
ax.set_frame_on(False)
ax.axes.set_xticks([])
ax.axes.set_yticks([])
ax.axis('equal')
ax.set_title(top_title, fontsize=25)
plt.tight_layout()
fig.savefig(fname)
if __name__ == '__main__':
N = 30
gauge(N=N, colors='viridis', cat=0.68,
title=r'', fname='gauge.svg')
| mit |
ezekielsilverstein/JPL | Sloan_List_Script.py | 1 | 15678 | #Numerical Python
import numpy as np
#Pylab Plotting
import pylab
import matplotlib.pyplot as plt
#INTERNET
#Selenium Internet Browsing
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
from decimal import *
import time
import csv
#Internet
import urllib2
print "Start up complete"
#call time upon starting
start_time=time.time()
#open Atlas
#Atlas=webdriver.Firefox()
#Atlas.get('http://isc.astro.cornell.edu/~sloan/library/swsatlas/aot1.html')
#Import Sloan data
Sloan_List=np.genfromtxt('Sloan_List_RA_DEC.txt',delimiter=' ',\
skip_header=1,dtype=[('source',object),('TDT',object),('RA',object),\
('DEC',object),('classification',object)])
for name in Sloan_List.dtype.names:
if name=='source':
source=Sloan_List[name]
if name=='TDT':
TDT=Sloan_List[name]
if name=='RA':
RA=Sloan_List[name]
if name=='DEC':
DEC=Sloan_List[name]
if name=='classification':
classification=Sloan_List[name]
#Make each TDT entry 8 digits
for i in range(len(TDT)):
if len(TDT[i])<8:
TDT[i]='0'+TDT[i]
#remove spaces at end of source names
for i in range(len(source)):
if source[i][-1]==' ':
source[i]=source[i][:-1]
#MAKE DICTIONARIES
#create lists for the meanings of the classifications, subsets, and suffixes
level1_meanings=['naked star','star associated with dust',\
'warm dusty object with little or no stellar contribution',\
'cool dusty object','red spectrum rising to 45um','no continuum',\
'flawed spectrum']
level2_meanings=['carbon-rich dust emission, dominated by SiC at 11.5um',\
'carbon-rich proto planetary nebula',\
'reddened continuum from amorphous carbon',\
'carbon-rich spectrum showing the 21um feature',\
'emission lines are the only significatn spectral feature',\
'featureless (Groups 4 and 5)','miscellaneous',\
'naked star, no molecular bands (Group 1 only)',\
'naked star with oxygen-rich molecular bands (Group 1 only)',\
'naked star with carbon-rich molecular bands (Group 1 only)',\
'naked star with emission lines (Group 1 only)',\
'a miscellaneous group of naked stars (Group 1 only)',\
'planetary nebula, many emission lines',\
'as PN, but with UIR features',\
'oxygen-rich dust, 10um silicate absorption',\
'oxygen-rich dust, self-absorbed silicate emission at 10um',\
'crystalline silicate emission, especially at longer wavelengths',\
'oxgyen-rich dust emission at 10-12 um',\
'broad low-contrast dust feature from alumina',\
'structured silicate emission',\
'classic narrow silicate emission',\
'crystalline silicate emission at 10-11um and to the red',\
'UIR emission features dominate the spectrum',\
'UIR emission features dominate the spectrum as only significant spectral feature',\
'spectrum peaks 5-8um, drops to red, many are WR stars',\
'silicate/carbon stars',\
'mixture of carbon-rich and crystalline silicate features',\
'mixture of UIR and crystalline silicate features',\
'not applicable']
suffix_meanings=['emission lines','peculiar','UIR features present',\
'uncertain classification','very uncertain classification']
#create lists of the classifications, subsets, and suffixes
classification_subset_names=['level1','level2','suffix']
classification_subsets=[]
level1=['1','2','3','4','5','6','7']
level2=['CE','CN','CR','CT','E','F','M','N','NO','NC','NE','NM',\
'PN','PU','SA','SB','SC','SE','SEa','SEb','SEc','SEC','U','UE','W','C/SE',\
'C/SC','U/SC','N/A']
suffix=['e','p','u',':','::']
classification_subsets.append(level1)
classification_subsets.append(level2)
classification_subsets.append(suffix)
#create dictionaries for the meanings
level1_meanings_dict={}
for i in range(len(level1)):
level1_meanings_dict[level1[i]]=level1_meanings[i]
level2_meanings_dict={}
for i in range(len(level2)):
level2_meanings_dict[level2[i]]=level2_meanings[i]
suffix_meanings_dict={}
for i in range(len(suffix)):
suffix_meanings_dict[suffix[i]]=suffix_meanings[i]
#MAKE MASTER DICTIONARY for classification meanings
Sloan_meanings={}
Sloan_meanings[classification_subset_names[0]]=level1_meanings_dict
Sloan_meanings[classification_subset_names[1]]=level2_meanings_dict
Sloan_meanings[classification_subset_names[2]]=suffix_meanings_dict
#Rawlist of objects and their classifications
Sloan_objects_list=[]
for i in range(len(source)):
Sloan_objects_list+=[[source[i]]+[TDT[i]]+[classification[i]]+[RA[i]]+[DEC[i]]]
Sloan_objects=np.array(Sloan_objects_list)
#How to search
#for i in range(len(Sloan_objects)):
# if Sloan_objects[i][0]=='NGC 1386':
# print i
#outputs instance where object name is 'NGC 1386'
######
#Create folders
#execfile('/Users/esilverstein1992/Desktop/Scripts/JPL/color mag plot/Object Spectra/SLOAN LIST/create_folders.py')
#create base folder location
# base='/Users/esilverstein1992/Desktop/Scripts/JPL/color mag plot/Object Spectra/SLOAN LIST/'
#create Master folder
# os.mkdir(base+'MASTER')
os.mkdir('MASTER')
#enter Master folder
# os.chdir(base+'MASTER')
os.chdir('MASTER')
#create level1 folders
# for i in level1:
# os.mkdir(base+'MASTER/'+str(i))
for i in level1:
os.mkdir(str(i))
#alter names of level2 dictionaries with '/' to create folders
for j in range(len(level2)):
for k in range(len(level2[j])):
if level2[j][k]=='/':
level2[j]=level2[j][0:k]+'|'+level2[j][k+1:]
#alter names of 2.SE dictionaries so SEc and SEC aren't the same
for j in range(len(level2)):
if len(level2[j])==3:
if level2[j][2].islower():
level2[j]=level2[j][0]+level2[j][1]+'_'+level2[j][2]
#enter each level1 folder and create all level2 folders
for i in level1:
os.chdir(str(i))
for j in level2:
os.mkdir(j)
os.chdir('..')
#remove suffixes from classification list
for j in range(len(classification)):
while classification[j][-1]=='e' or classification[j][-1]=='p' or \
classification[j][-1]=='u' or classification[j][-1]==':':
if classification[j][-1]==':' and classification[j][-2]==':':
classification[j]=classification[j][0:-2]
elif classification[j][-1]=='e' or classification[j][-1]=='p' or \
classification[j][-1]=='u' or classification[j][-1]==':':
classification[j]=classification[j][0:-1]
#add underscore to 2.SEa,b,c
for j in range(len(classification)):
if classification[j][-1].islower()==True:
classification[j]=classification[j][0:-1]+'_'+classification[j][-1]
#add N|A to the classification if there isn't one
#to be able to place into a folder
for j in range(len(classification)):
if len(classification[j])==1:
classification[j]=classification[j]+'.'+'N|A'
#change '/' to '|' in the source classifications
for j in range(len(classification)):
for k in range(len(classification[j])):
if classification[j][k]=='/':
classification[j]=classification[j][:k]+'|'+classification[j][k+1:]
#create folder for each source
for i in range(len(classification)):
os.mkdir(classification[i][0]+'/'+classification[i][2:]+'/'+source[i]+' '+TDT[i])
print 'Folders Created'
#END RESULT::
#1 MASTER folder
#7 level 1 folders with the MASTER folder
#29 level2 folders within EACH level1 folder
#within the level2 folders are folders for each individual source of the 1239
#each of these source folders contains the name and TDT number
#many level2 folders won't have ANY source folders within
#####
#create list of objects with negative fluxes
neg_flux_number=[]
neg_flux_source=[]
neg_flux_TDT=[]
#Open browser to SWS Atlas
driver=webdriver.Firefox()
driver.get('http://irsa.ipac.caltech.edu/data/SWS/')
#wait for page to load
time.sleep(3)
#Create MASSIVE 'FOR' LOOP for every object in Sloan List
for a in range(1239):
#create wvlen, flux, error lists as a failsafe
wvlen=[]
flux=[]
flux_error=[]
norm_error=[]
#if they remain empty at the end, then skip to the next source
#Search for the object in SWS Atlas
#click on 'Single Object' to input name
object_input=driver.find_element_by_name('locstr')
#clear the box
object_input.clear()
#input RA-DEC of object
object_input.send_keys(RA[a])
object_input.send_keys(', ')
object_input.send_keys(DEC[a])
#limit search size
#radnumber=driver.find_element_by_name('radius')
#radnumber.clear()
#radnumber.send_keys('1')
#select 'arcseconds'
#radunits=driver.find_element_by_xpath("//select/option[3]").click()
#click on 'Submit'
object_input.send_keys(Keys.RETURN)
#EXAMPLE: driver.find_element_by_xpath("//input[@name='username']")
#switch to this window:
driver.switch_to_window(str(driver.window_handles[1]))
#driver.switch_to_window(str(handle))
#wait for page to load
time.sleep(3)
#click to open the source table
#try to click on the source table
#if it can't, wait 20 seconds to load, then try again
try:
time.sleep(5)
element=driver.find_element_by_xpath\
("html/body/div[2]/form/center/center/table/tbody/tr[2]/td/a")
element.click()
except:
time.sleep(10)
element=driver.find_element_by_xpath\
("html/body/div[2]/form/center/center/table/tbody/tr[2]/td/a")
element.click()
#wait for page to load
time.sleep(1)
#switch to this new window
driver.switch_to_window(str(driver.window_handles[2]))
#select the desired IPAC_FORMAT_ASCII_Data Set
#find the desired row number based on name,TDT and 'filenum'
match=False
i=2
while match==False:
xpathfilenum='''//tr['''+str(i)+''']/td[3]'''
objectfilenum=driver.find_element_by_xpath(xpathfilenum)
if str(objectfilenum.text)!=TDT[a]:
i+=1
elif str(objectfilenum.text)==TDT[a]:
match=True
rownumber=i
#create xpath codes and define the desired link's row, name, filenum and hyperlink
table=driver.find_element_by_xpath("//tbody")
xpathrow='''//tbody/tr['''+str(rownumber)+''']'''
objectrow=driver.find_element_by_xpath(xpathrow)
xpathname='''//tbody/tr['''+str(rownumber)+''']/td[2]'''
objectname=driver.find_element_by_xpath(xpathname)
xpathnumber='''//tbody/tr['''+str(rownumber)+''']/td[3]'''
objectnumber=driver.find_element_by_xpath(xpathnumber)
xpathhyperlink='''//tbody/tr['''+str(rownumber)+''']/td[6]/a'''
objecthyperlink=driver.find_element_by_xpath(xpathhyperlink)
#click on the link
objecthyperlink.click()
#switch to new window with the data
driver.switch_to_window(str(driver.window_handles[3]))
#wait to load
time.sleep(1)
#make sure this is a data table
if driver.title=='':
pass
#if driver.title!='':
# break
######
#import directly into python
url=str(driver.current_url)
downloaded_data=urllib2.urlopen(url)
csv_data=csv.reader(downloaded_data)
#create raw datatable
datatable=[]
for row in csv_data:
datatable.append(''.join(row))
#delete the 3 rows of headers
del datatable[0]
del datatable[0]
del datatable[0]
#split each row into individual values
for i in range(len(datatable)):
datatable[i]=datatable[i].split(' ')
j=0
while j<len(datatable[i]):
if datatable[i][j]=='':
del datatable[i][j]
else:
j=j+1
wvlen=[]
flux=[]
flux_error=[]
norm_error=[]
#append to wavelength, flux, flux_error, and norm_error lists
for i in range(len(datatable)):
wvlen.append(datatable[i][0])
flux.append(datatable[i][1])
flux_error.append(datatable[i][2])
norm_error.append(datatable[i][3])
#if an error occurred and wvlen, flux, errors, haven't been filled,
#continue to next source
if wvlen==[]:
continue
#change strings into numeric values
for i in range(len(wvlen)):
wvlen[i]=float(wvlen[i])
flux[i]=float(flux[i])
flux_error[i]=float(flux_error[i])
norm_error[i]=float(norm_error[i])
#change lists into arrays
wvlen=np.array(wvlen)
flux=np.array(flux)
flux_error=np.array(flux_error)
norm_error=np.array(norm_error)
#Create unsmoothed
pylab.plot(wvlen,flux)
pylab.xlabel('wavelength (microns)')
pylab.ylabel('flux (Janksys)')
pylab.title(source[a]+' '+TDT[a]+' '+classification[a]+' '+'Wavelength vs. Flux 2.36-45um undegraded')
plt.savefig(\
classification[a][0]+'/'+classification[a][2:]+'/'+source[a]+' '+TDT[a]+'/'\
+source[a]+' '+TDT[a]+' 2-45 undegraded.pdf')
plt.close()
#Create 2-5 unsmoothed
shortlist=np.where(wvlen<5)[0]
pylab.plot(wvlen[shortlist],flux[shortlist])
pylab.xlabel('wavelength (microns)')
pylab.ylabel('flux (Janksys)')
pylab.title(source[a]+' '+TDT[a]+' '+classification[a]+' '+'Wavelength vs. Flux 2.36-5um undegraded')
plt.savefig(\
classification[a][0]+'/'+classification[a][2:]+'/'+source[a]+' '+TDT[a]+'/'\
+source[a]+' '+TDT[a]+' 2-5 undegraded.pdf')
plt.close()
#Create 2-5 smoothed
#Degrade flux to resolution limit
res=150.
#Width of wavelength prior, OLD
old_width=np.zeros(len(wvlen[shortlist]))
for i in range(len(wvlen[shortlist])):
if i==0:
pass
elif i==(len(wvlen[shortlist])-1):
pass
if i!=0 and i!=(len(wvlen[shortlist])-1):
old_width[i]=((wvlen[shortlist][i]-wvlen[shortlist][i-1])/2)+\
((wvlen[shortlist][i+1]-wvlen[shortlist][i])/2.)
old_width[0]=old_width[1]
old_width[-1]=old_width[-2]
#Determine the width at each wavelength
width=np.array([])
for i in wvlen[shortlist]:
width=np.append(width,i/res)
#Sum fluxes
sum_flux=np.array([])
for i in wvlen[shortlist]:
item=np.where(wvlen[shortlist]==i)[0][0]
band_size=\
np.where(\
(wvlen[shortlist]>(i-(width[item])/2.))*(wvlen[shortlist]<(i+(width[item])/2.)))[0]
fluxes=flux[shortlist][band_size]
band_sum_fluxes=np.sum(fluxes)
sum_flux=np.append(sum_flux,band_sum_fluxes)
#find out how many datapoints are going into each wavelength width
bin_number=np.array([])
for i in range(len(shortlist)):
length=len(np.where((wvlen>(wvlen[shortlist][i]-width[i]/2))*(wvlen<(wvlen[shortlist][i]+width[i]/2)))[0])
bin_number=np.append(bin_number,length)
new_flux=sum_flux/bin_number
pylab.plot(wvlen[shortlist],new_flux)
pylab.xlabel('wavelength (microns)')
pylab.ylabel('Flux (Janskys)')
pylab.title(source[a]+' '+TDT[a]+' '+classification[a]+' '+'Wavelength vs. Flux 2.36-5um degraded')
plt.savefig(\
classification[a][0]+'/'+classification[a][2:]+'/'+source[a]+' '+TDT[a]+'/'\
+source[a]+' '+TDT[a]+' 2-5 degraded.pdf')
plt.close()
#create 2-5 both
pylab.plot(wvlen[shortlist],new_flux,label='degraded')
pylab.plot(wvlen[shortlist],flux[shortlist],label='undegraded')
pylab.xlabel('wavelength (microns)')
pylab.ylabel('Flux (Janskys)')
pylab.title(source[a]+' '+TDT[a]+' '+classification[a]+' '+'Wavelength vs. Flux 2.36-5um')
plt.legend(loc=4)
plt.savefig(\
classification[a][0]+'/'+classification[a][2:]+'/'+source[a]+' '+TDT[a]+'/'\
+source[a]+' '+TDT[a]+' 2-5.pdf')
plt.close()
#swtich to and close data table window
driver.switch_to_window(str(driver.window_handles[3]))
driver.close()
#switch to window #2 and close
driver.switch_to_window(str(driver.window_handles[2]))
driver.close()
#switch to window #1 and close
driver.switch_to_window(str(driver.window_handles[1]))
driver.close()
#switch back to SWS Atlas page
driver.switch_to_window(str(driver.window_handles[0]))
#print progress
print a
#append to negative flux list if >5% of fluxes are negative
neg_instances=np.array([])
for i in range(len(flux)):
if flux[i]<0:
neg_instances=np.append(neg_instances,i)
if len(neg_instances)>=616:
neg_flux_number.append(str(a))
neg_flux_source.append(source[a])
neg_flux_TDT.append(TDT[a])
#close original window and quit driver
driver.quit()
#call time upon finishing
end_time=time.time()
#total run time in seconds, minutes, and hours
run_time_s=end_time-start_time
run_time_m=run_time_s/60.
run_time_h=run_time_m/60.
#create composite time
run_time_h_comp=int(np.floor(run_time_h))
run_time_m_comp=int(np.floor(run_time_m-run_time_h_comp*60))
run_time_s_comp=int(np.floor(run_time_s-run_time_h_comp*60*60-run_time_m_comp*60))
run_time=str(run_time_h_comp)+':'+str(run_time_m_comp)+':'+str(run_time_s_comp)
print run_time
| mit |
rhuelga/sms-tools | lectures/08-Sound-transformations/plots-code/stftFiltering-orchestra.py | 2 | 1670 | import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import utilFunctions as UF
import stftTransformations as STFTT
import stft as STFT
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
w = np.hamming(2048)
N = 2048
H = 512
# design a band stop filter using a hanning window
startBin = int(N*500.0/fs)
nBins = int(N*2000.0/fs)
bandpass = (np.hanning(nBins) * 65.0) - 60
filt = np.zeros(N//2+1)-60
filt[startBin:startBin+nBins] = bandpass
y = STFTT.stftFiltering(x, fs, w, N, H, filt)
mX,pX = STFT.stftAnal(x, w, N, H)
mY,pY = STFT.stftAnal(y, w, N, H)
plt.figure(1, figsize=(12, 9))
plt.subplot(311)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX))
plt.title('mX (orchestra.wav)')
plt.autoscale(tight=True)
plt.subplot(312)
plt.plot(fs*np.arange(mX[0,:].size)/float(N), filt, 'k', lw=1.3)
plt.axis([0, fs/2, -60, 7])
plt.title('filter shape')
plt.subplot(313)
numFrames = int(mY[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mY[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mY))
plt.title('mY')
plt.autoscale(tight=True)
plt.tight_layout()
UF.wavwrite(y, fs, 'orchestra-stft-filtering.wav')
plt.savefig('stftFiltering-orchestra.png')
plt.show()
| agpl-3.0 |
12yujim/pymtl | pymtl/tools/simulation/SimulationMetrics.py | 8 | 8999 | #=========================================================================
# SimulationMetrics.py
#=========================================================================
from __future__ import print_function
import pickle
#-------------------------------------------------------------------------
# SimulationMetrics
#-------------------------------------------------------------------------
# Utility class for storing various SimulationTool metrics. Useful for
# gaining insight into simulator performace and determining the simulation
# efficiency of hardware model implementations.
class SimulationMetrics( object ):
#-----------------------------------------------------------------------
# __init__
#-----------------------------------------------------------------------
def __init__( self ):
self._ncycles = 0
self._pre_tick = True
self.num_modules = 0
self.num_tick_blocks = 0
self.num_posedge_clk_blocks = 0
self.num_combinational_blocks = 0
self.num_slice_blocks = 0
self.input_add_events_per_cycle = [ 0 ]
self.clock_add_events_per_cycle = [ 0 ]
self.input_add_callbk_per_cycle = [ 0 ]
self.clock_add_callbk_per_cycle = [ 0 ]
self.input_comb_evals_per_cycle = [ 0 ]
self.clock_comb_evals_per_cycle = [ 0 ]
self.slice_comb_evals_per_cycle = [ 0 ]
self.redun_comb_evals_per_cycle = [ 0 ]
self.is_slice = dict()
self.has_run = dict()
#-----------------------------------------------------------------------
# comb_evals_per_cycle
#-----------------------------------------------------------------------
@property
def comb_evals_per_cycle( self ):
return [ x+y for x,y in zip( self.input_comb_evals_per_cycle,
self.clock_comb_evals_per_cycle ) ]
#-----------------------------------------------------------------------
# add_events_per_cycle
#-----------------------------------------------------------------------
@property
def add_events_per_cycle( self ):
return [ x+y for x,y in zip( self.input_add_events_per_cycle,
self.clock_add_events_per_cycle ) ]
#-----------------------------------------------------------------------
# reg_model
#-----------------------------------------------------------------------
# Register a model in the design.
def reg_model( self, model ):
self.num_modules += 1
self.num_tick_blocks += len( model.get_tick_blocks() )
self.num_posedge_clk_blocks += len( model.get_posedge_clk_blocks() )
self.num_combinational_blocks += len( model.get_combinational_blocks() )
#-----------------------------------------------------------------------
# reg_eval
#-----------------------------------------------------------------------
# Register an eval block in the design.
def reg_eval( self, eval, is_slice = False ):
self.has_run [ eval ] = False
self.is_slice[ eval ] = is_slice
if is_slice:
self.num_slice_blocks += 1
#-----------------------------------------------------------------------
# incr_metrics_cycle
#-----------------------------------------------------------------------
# Should be called at the end of each simulation cycle. Initializes data
# structure storage to collect data for the next simulation cycle.
def incr_metrics_cycle( self ):
self._pre_tick = True
self._ncycles += 1
self.input_add_events_per_cycle += [ 0 ]
self.clock_add_events_per_cycle += [ 0 ]
self.input_add_callbk_per_cycle += [ 0 ]
self.clock_add_callbk_per_cycle += [ 0 ]
self.input_comb_evals_per_cycle += [ 0 ]
self.clock_comb_evals_per_cycle += [ 0 ]
self.slice_comb_evals_per_cycle += [ 0 ]
self.redun_comb_evals_per_cycle += [ 0 ]
for key in self.has_run:
self.has_run[ key ] = False
#-----------------------------------------------------------------------
# start_tick
#-----------------------------------------------------------------------
# Should be called before sequential logic blocks are executed. Allows
# collection of unique metrics for each phase of eval execution.
def start_tick( self ):
self._pre_tick = False
#-----------------------------------------------------------------------
# incr_add_events
#-----------------------------------------------------------------------
# Increment the number of times add_event() was called.
def incr_add_events( self ):
if self._pre_tick:
self.input_add_events_per_cycle[ self._ncycles ] += 1
else:
self.clock_add_events_per_cycle[ self._ncycles ] += 1
#-----------------------------------------------------------------------
# incr_add_events
#-----------------------------------------------------------------------
# Increment the number of callbacks we attempted to place on the event
# queue.
def incr_add_callbk( self ):
if self._pre_tick:
self.input_add_callbk_per_cycle[ self._ncycles ] += 1
else:
self.clock_add_callbk_per_cycle[ self._ncycles ] += 1
#-----------------------------------------------------------------------
# incr_comb_evals
#-----------------------------------------------------------------------
# Increment the number of evals we actually executed.
def incr_comb_evals( self, eval ):
if self._pre_tick:
self.input_comb_evals_per_cycle[ self._ncycles ] += 1
else:
self.clock_comb_evals_per_cycle[ self._ncycles ] += 1
if self.has_run[ eval ]:
self.redun_comb_evals_per_cycle[ self._ncycles ] += 1
else:
self.has_run[ eval ] = True
if self.is_slice[ eval ]:
self.slice_comb_evals_per_cycle[ self._ncycles ] += 1
#-----------------------------------------------------------------------
# print_metrics
#-----------------------------------------------------------------------
# Print metrics to the commandline.
def print_metrics( self, detailed = True ):
print("-"*72)
print("Simulation Metrics")
print("-"*72)
print()
print("ncycles: {:4}".format(self._ncycles ))
print("modules: {:4}".format(self.num_modules ))
print("@tick blocks: {:4}".format(self.num_tick_blocks ))
print("@posedge_clk blocks: {:4}".format(self.num_posedge_clk_blocks ))
print("@combinational blocks: {:4}".format(self.num_combinational_blocks))
print("slice blocks: {:4}".format(self.num_slice_blocks ))
print("-"*72)
if not detailed:
return
print()
print(" pre-tick post-tick other ")
print("cycle adde clbk eval adde clbk eval slice redun")
print("-------- ---- ---- ---- ---- ---- ---- ----- -----")
for i in range( self._ncycles ):
print("{:8} {:4} {:4} {:4} {:4} {:4} {:4} {:5} {:5}".format(
i, self.input_add_events_per_cycle[ i ],
self.input_add_callbk_per_cycle[ i ],
self.input_comb_evals_per_cycle[ i ],
self.clock_add_events_per_cycle[ i ],
self.clock_add_callbk_per_cycle[ i ],
self.clock_comb_evals_per_cycle[ i ],
self.slice_comb_evals_per_cycle[ i ],
self.redun_comb_evals_per_cycle[ i ],
))
print("-"*72)
#-----------------------------------------------------------------------
# pickle_metrics
#-----------------------------------------------------------------------
# Pickle metrics to a file. Useful for loading in Python later for
# for creating matplotlib plots.
def pickle_metrics( self, filename ):
del self.is_slice
del self.has_run
pickle.dump( self, open( filename, 'wb' ) )
#-------------------------------------------------------------------------
# DummyMetrics
#-------------------------------------------------------------------------
# Dummy class which provides the interface of the SimulationMetrics
# metrics collection class, but doesn't actually collect anything metrics.
# This is used as a swap in replacement when collection is disabled so
# developers don't have to worry about adding a check to disable each
# call to SimulationMetrics methods.
class DummyMetrics( object ):
def reg_model( self, model ): pass
def reg_eval( self, eval, is_slice = False ): pass
def incr_metrics_cycle( self ): pass
def start_tick( self ): pass
def incr_add_events( self ): pass
def incr_add_callbk( self ): pass
def incr_comb_evals( self, eval ): pass
| bsd-3-clause |
numenta-archive/htmresearch | projects/dp1/dp_experiment1.py | 3 | 12622 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This is for running some very preliminary disjoint pooling experiments.
"""
import cPickle
from multiprocessing import Pool
import random
import time
import numpy
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
from htmresearch.frameworks.layers.l2_l4_inference import L4L2Experiment
from htmresearch.frameworks.layers.object_machine_factory import (
createObjectMachine
)
def printColumnPoolerDiagnostics(pooler):
print "sampleSizeProximal: ", pooler.sampleSizeProximal
print "Average number of proximal synapses per cell:",
print float(pooler.numberOfProximalSynapses()) / pooler.cellCount
print "Average number of distal segments per cell:",
print float(pooler.numberOfDistalSegments()) / pooler.cellCount
print "Average number of connected distal synapses per cell:",
print float(pooler.numberOfConnectedDistalSynapses()) / pooler.cellCount
print "Average number of distal synapses per cell:",
print float(pooler.numberOfDistalSynapses()) / pooler.cellCount
def runExperiment(args):
"""
Run experiment. args is a dict representing the parameters. We do it this way
to support multiprocessing.
The method returns the args dict updated with multiple additional keys
representing accuracy metrics.
"""
numObjects = args.get("numObjects", 10)
numLocations = args.get("numLocations", 10)
numFeatures = args.get("numFeatures", 10)
numColumns = args.get("numColumns", 2)
sensorInputSize = args.get("sensorInputSize", 300)
networkType = args.get("networkType", "MultipleL4L2Columns")
longDistanceConnections = args.get("longDistanceConnections", 0)
locationNoise = args.get("locationNoise", 0.0)
featureNoise = args.get("featureNoise", 0.0)
numPoints = args.get("numPoints", 10)
trialNum = args.get("trialNum", 42)
plotInferenceStats = args.get("plotInferenceStats", True)
settlingTime = args.get("settlingTime", 3)
includeRandomLocation = args.get("includeRandomLocation", False)
enableFeedback = args.get("enableFeedback", True)
numAmbiguousLocations = args.get("numAmbiguousLocations", 0)
numInferenceRpts = args.get("numInferenceRpts", 1)
numLearningRpts = args.get("numLearningRpts", 3)
l2Params = args.get("l2Params", None)
l4Params = args.get("l4Params", None)
# Create the objects
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=sensorInputSize,
externalInputSize=2400,
numCorticalColumns=numColumns,
numFeatures=numFeatures,
numLocations=numLocations,
seed=trialNum
)
objects.createRandomObjects(numObjects, numPoints=numPoints,
numLocations=numLocations,
numFeatures=numFeatures)
r = objects.objectConfusion()
print "Average common pairs in objects=", r[0],
print ", locations=",r[1],", features=",r[2]
# print "Total number of objects created:",len(objects.getObjects())
# print "Objects are:"
# for o in objects:
# pairs = objects[o]
# pairs.sort()
# print str(o) + ": " + str(pairs)
# This object machine will simulate objects where each object is just one
# unique feature/location pair. We will use this to pretrain L4/L2 with
# individual pairs.
pairObjects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=sensorInputSize,
externalInputSize=2400,
numCorticalColumns=numColumns,
numFeatures=numFeatures,
numLocations=numLocations,
seed=trialNum
)
# Create "pair objects" consisting of all unique F/L pairs from our objects.
# These pairs should have the same SDRs as the original objects.
pairObjects.locations = objects.locations
pairObjects.features = objects.features
distinctPairs = objects.getDistinctPairs()
print "Number of distinct feature/location pairs:",len(distinctPairs)
for pairNumber,pair in enumerate(distinctPairs):
pairObjects.addObject([pair], pairNumber)
#####################################################
#
# Setup experiment and train the network
name = "dp_O%03d_L%03d_F%03d_C%03d_T%03d" % (
numObjects, numLocations, numFeatures, numColumns, trialNum
)
exp = L4L2Experiment(
name,
numCorticalColumns=numColumns,
L2Overrides=l2Params,
L4Overrides=l4Params,
networkType = networkType,
longDistanceConnections=longDistanceConnections,
inputSize=sensorInputSize,
externalInputSize=2400,
numInputBits=20,
seed=trialNum,
enableFeedback=enableFeedback,
numLearningPoints=numLearningRpts,
)
# Learn all FL pairs in each L4 and in each L2
# Learning in L2 involves choosing a small random number of cells, growing
# proximal synapses to L4 cells. Growing distal synapses to active cells in
# each neighboring column. Each column gets its own distal segment.
exp.learnObjects(pairObjects.provideObjectsToLearn())
# Verify that all columns learned the pairs
# numCorrectClassifications = 0
# for pairId in pairObjects:
#
# obj = pairObjects[pairId]
# objectSensations = {}
# for c in range(numColumns):
# objectSensations[c] = [obj[0]]*settlingTime
#
# inferConfig = {
# "object": pairId,
# "numSteps": settlingTime,
# "pairs": objectSensations,
# }
#
# inferenceSDRs = pairObjects.provideObjectToInfer(inferConfig)
#
# exp.infer(inferenceSDRs, objectName=pairId, reset=False)
#
# if exp.isObjectClassified(pairId, minOverlap=30):
# numCorrectClassifications += 1
#
# exp.sendReset()
#
# print "Classification accuracy for pairs=",100.0*numCorrectClassifications/len(distinctPairs)
########################################################################
#
# Create "object representations" in L2 by simultaneously invoking the union
# of all FL pairs in an object and doing some sort of spatial pooling to
# create L2 representation.
exp.resetStatistics()
for objectId in objects:
# Create one sensation per object consisting of the union of all features
# and the union of locations.
ul, uf = objects.getUniqueFeaturesLocationsInObject(objectId)
print "Object",objectId,"Num unique features:",len(uf),"Num unique locations:",len(ul)
objectSensations = {}
for c in range(numColumns):
objectSensations[c] = [(tuple(ul), tuple(uf))]*settlingTime
inferConfig = {
"object": objectId,
"numSteps": settlingTime,
"pairs": objectSensations,
}
inferenceSDRs = objects.provideObjectToInfer(inferConfig)
exp.infer(inferenceSDRs, objectName="Object "+str(objectId))
# Compute confusion matrix between all objects as network settles
for iteration in range(settlingTime):
confusion = numpy.zeros((numObjects, numObjects))
for o1 in objects:
for o2 in objects:
confusion[o1, o2] = len(set(exp.statistics[o1]["Full L2 SDR C0"][iteration]) &
set(exp.statistics[o2]["Full L2 SDR C0"][iteration]) )
plt.figure()
plt.imshow(confusion)
plt.xlabel('Object #')
plt.ylabel('Object #')
plt.title("Object overlaps")
plt.colorbar()
plt.savefig("confusion_random_10L_5F_"+str(iteration)+".pdf")
plt.close()
for col in range(numColumns):
print "Diagnostics for column",col
printColumnPoolerDiagnostics(exp.getAlgorithmInstance(column=col))
print
return args
# Show average overlap as a function of number of shared FL pairs,
# shared locations, shared features
# Compute confusion matrix showing number of shared FL pairs
# Compute confusion matrix using our normal method
def runExperimentPool(numObjects,
numLocations,
numFeatures,
numColumns,
longDistanceConnectionsRange = [0.0],
numWorkers=7,
nTrials=1,
numPoints=10,
locationNoiseRange=[0.0],
featureNoiseRange=[0.0],
enableFeedback=[True],
ambiguousLocationsRange=[0],
numInferenceRpts=1,
settlingTime=3,
l2Params=None,
l4Params=None,
resultsName="convergence_results.pkl"):
"""
Allows you to run a number of experiments using multiple processes.
For each parameter except numWorkers, pass in a list containing valid values
for that parameter. The cross product of everything is run, and each
combination is run nTrials times.
Returns a list of dict containing detailed results from each experiment.
Also pickles and saves the results in resultsName for later analysis.
Example:
results = runExperimentPool(
numObjects=[10],
numLocations=[5],
numFeatures=[5],
numColumns=[2,3,4,5,6],
numWorkers=8,
nTrials=5)
"""
# Create function arguments for every possibility
args = []
for c in reversed(numColumns):
for o in reversed(numObjects):
for l in numLocations:
for f in numFeatures:
for p in longDistanceConnectionsRange:
for t in range(nTrials):
for locationNoise in locationNoiseRange:
for featureNoise in featureNoiseRange:
for ambiguousLocations in ambiguousLocationsRange:
for feedback in enableFeedback:
args.append(
{"numObjects": o,
"numLocations": l,
"numFeatures": f,
"numColumns": c,
"trialNum": t,
"numPoints": numPoints,
"longDistanceConnections" : p,
"plotInferenceStats": False,
"locationNoise": locationNoise,
"featureNoise": featureNoise,
"enableFeedback": feedback,
"numAmbiguousLocations": ambiguousLocations,
"numInferenceRpts": numInferenceRpts,
"l2Params": l2Params,
"l4Params": l4Params,
"settlingTime": settlingTime,
}
)
numExperiments = len(args)
print "{} experiments to run, {} workers".format(numExperiments, numWorkers)
# Run the pool
if numWorkers > 1:
pool = Pool(processes=numWorkers)
rs = pool.map_async(runExperiment, args, chunksize=1)
while not rs.ready():
remaining = rs._number_left
pctDone = 100.0 - (100.0*remaining) / numExperiments
print " =>", remaining, "experiments remaining, percent complete=",pctDone
time.sleep(5)
pool.close() # No more work
pool.join()
result = rs.get()
else:
result = []
for arg in args:
result.append(runExperiment(arg))
# print "Full results:"
# pprint.pprint(result, width=150)
# Pickle results for later use
with open(resultsName,"wb") as f:
cPickle.dump(result,f)
return result
if __name__ == "__main__":
# This is how you run a specific experiment in single process mode. Useful
# for debugging, profiling, etc.
results = runExperiment(
{
"numObjects": 20,
"numPoints": 10,
"numLocations": 10,
"numFeatures": 5,
"numColumns": 1,
"trialNum": 4,
"settlingTime": 3,
"plotInferenceStats": False, # Outputs detailed graphs
}
)
| agpl-3.0 |
vigilv/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 53 | 21055 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2 = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
assert_array_almost_equal(P1, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 10):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
P2k = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
# The Barnes-Hut approximation uses a different method to estimate
# P_ij using only a a number of nearest neighbors instead of all
# points (so that k = 3 * perplexity). As a result we set the
# perplexity=5, so that the number of neighbors is 5%.
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(100, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=50,
learning_rate=100.0, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
T = trustworthiness(X, X_embedded, n_neighbors=1)
assert_almost_equal(T, 1.0, decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
metric="precomputed", random_state=0, verbose=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
m = "'init' must be 'pca', 'random' or a NumPy array"
assert_raises_regexp(ValueError, m, TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
_barnes_hut_tsne.gradient(pij_input, pos_output, neighbors,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(100, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method)
tsne.fit_transform(X)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, False)
kl, gradex = _kl_divergence(params, P, degrees_of_freedom, n_samples,
n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
Pbh = _joint_probabilities_nn(distances, neighbors_nn,
perplexity, False)
kl, gradbh = _kl_divergence_bh(params, Pbh, neighbors_nn,
degrees_of_freedom, n_samples,
n_components, angle=angle,
skip_num_points=0, verbose=False)
assert_array_almost_equal(Pbh, P, decimal=5)
assert_array_almost_equal(gradex, gradbh, decimal=5)
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitraryily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitraryily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitraryily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
counts = np.zeros(3, dtype='int64')
_barnes_hut_tsne.check_quadtree(X, counts)
m = "Tree consistency failed: unexpected number of points at root node"
assert_equal(counts[0], counts[1], m)
m = "Tree consistency failed: unexpected number of points on the tree"
assert_equal(counts[0], counts[2], m)
def test_index_offset():
# Make sure translating between 1D and N-D indices are preserved
assert_equal(_barnes_hut_tsne.test_index2offset(), 1)
assert_equal(_barnes_hut_tsne.test_index_offset(), 1)
| bsd-3-clause |
Twangist/log_calls | tests/test_with_sklearn/test_decorate_sklearn_KMeans_functions.py | 1 | 6482 | __author__ = 'brianoneill'
###############################################################################
def test_deco_sklearn_cluster_kmeans_function():
"""
Dunno how to decorate `sklearn.cluster.kmeans` so that the decorated funciton
is called via `sklearn.cluster.kmeans(...)`. What gets decorated is the function
qualified by the *module* name,
`sklearn.cluster.kmeans_.kmeans`
because sklearn.cluster.kmeans_ is the module of the function `sklearn.cluster.kmeans`
as per inspect.getmodule
>>> from sklearn.datasets import make_blobs
>>> n_samples = 1500
>>> random_state = 170
>>> X, y = make_blobs(n_samples=n_samples, random_state=random_state)
>>> from log_calls import log_calls
THIS Doesn't work:
# import sklearn
# import sklearn.cluster
# log_calls.decorate_module_function(sklearn.cluster.k_means)
# ret = sklearn.cluster.k_means(X, n_clusters=45)
### THIS WORKS (import module and call it through module name :| ):
>>> ## TODO Can this be improved?? It's clunky to require that the module name be known.
>>> import sklearn.cluster.k_means_
>>> log_calls.decorate_module_function(sklearn.cluster.k_means_.k_means,
... args_sep='\\n',
... override=True)
>>> ret = sklearn.cluster.k_means_.k_means(X, n_clusters=3, random_state=2) # doctest: +NORMALIZE_WHITESPACE
k_means <== called by <module>
arguments:
X=array([[ -5.19811282e+00, 6.41869316e-01],
[ -5.75229538e+00, 4.18627111e-01],
[ -1.08448984e+01, -7.55352273e+00],
...,
[ 1.36105255e+00, -9.07491863e-01],
[ -3.54141108e-01, 7.12241630e-01],
[ 1.88577252e+00, 1.41185693e-03]])
n_clusters=3
random_state=2
defaults:
init='k-means++'
precompute_distances='auto'
n_init=10
max_iter=300
verbose=False
tol=0.0001
copy_x=True
n_jobs=1
return_n_iter=False
k_means ==> returning to <module>
>>> ret
(array([[ 1.91176144, 0.40634045],
[-8.94137566, -5.48137132],
[-4.55490993, 0.02920864]]), array([2, 2, 1, ..., 0, 0, 0], dtype=int32), 2862.7319140789582)
"""
pass
def test__decorate_functions():
"""
>>> from sklearn.datasets import make_blobs
>>> n_samples = 1500
>>> random_state = 170
>>> X, y = make_blobs(n_samples=n_samples, random_state=random_state)
>>> from log_calls import log_calls
(B)
import the module, deco the fn as a module function, and call the fn via the module
==> OUTPUT:
>>> import sklearn.cluster.k_means_
>>> log_calls.decorate_module_function(sklearn.cluster.k_means_.k_means,
... override=True)
>>> ret_B = sklearn.cluster.k_means_.k_means(X, n_clusters=3, random_state=2) # doctest: +NORMALIZE_WHITESPACE
k_means <== called by <module>
arguments: X=array([[ -5.19811282e+00, 6.41869316e-01],
[ -5.75229538e+00, 4.18627111e-01],
[ -1.08448984e+01, -7.55352273e+00],
...,
[ 1.36105255e+00, -9.07491863e-01],
[ -3.54141108e-01, 7.12241630e-01],
[ 1.88577252e+00, 1.41185693e-03]]), n_clusters=3, random_state=2
defaults: init='k-means++', precompute_distances='auto', n_init=10, max_iter=300, verbose=False, tol=0.0001, copy_x=True, n_jobs=1, return_n_iter=False
k_means ==> returning to <module>
>>> ret_B
(array([[ 1.91176144, 0.40634045],
[-8.94137566, -5.48137132],
[-4.55490993, 0.02920864]]), array([2, 2, 1, ..., 0, 0, 0], dtype=int32), 2862.7319140789582)
(A)
import the package, deco the fn as a package function;
>>> import sklearn.cluster
>>> log_calls.decorate_package_function(sklearn.cluster.k_means,
... override=True)
Call the fn via the package ==> OUTPUT, and
Call the fn via the module ==> OUTPUT.
Call via package -- OUTPUT:
>>> ret = sklearn.cluster.k_means(X, n_clusters=3, random_state=2) # doctest: +NORMALIZE_WHITESPACE
k_means <== called by <module>
arguments: X=array([[ -5.19811282e+00, 6.41869316e-01],
[ -5.75229538e+00, 4.18627111e-01],
[ -1.08448984e+01, -7.55352273e+00],
...,
[ 1.36105255e+00, -9.07491863e-01],
[ -3.54141108e-01, 7.12241630e-01],
[ 1.88577252e+00, 1.41185693e-03]]), n_clusters=3, random_state=2
defaults: init='k-means++', precompute_distances='auto', n_init=10, max_iter=300, verbose=False, tol=0.0001, copy_x=True, n_jobs=1, return_n_iter=False
k_means ==> returning to <module>
>>> ret
(array([[ 1.91176144, 0.40634045],
[-8.94137566, -5.48137132],
[-4.55490993, 0.02920864]]), array([2, 2, 1, ..., 0, 0, 0], dtype=int32), 2862.7319140789582)
Call via module -- OUTPUT TOO now :D:D:D :
>>> ret = sklearn.cluster.k_means_.k_means(X, n_clusters=3, random_state=2) # doctest: +NORMALIZE_WHITESPACE
k_means <== called by <module>
arguments: X=array([[ -5.19811282e+00, 6.41869316e-01],
[ -5.75229538e+00, 4.18627111e-01],
[ -1.08448984e+01, -7.55352273e+00],
...,
[ 1.36105255e+00, -9.07491863e-01],
[ -3.54141108e-01, 7.12241630e-01],
[ 1.88577252e+00, 1.41185693e-03]]), n_clusters=3, random_state=2
defaults: init='k-means++', precompute_distances='auto', n_init=10, max_iter=300, verbose=False, tol=0.0001, copy_x=True, n_jobs=1, return_n_iter=False
k_means ==> returning to <module>
>>> ret
(array([[ 1.91176144, 0.40634045],
[-8.94137566, -5.48137132],
[-4.55490993, 0.02920864]]), array([2, 2, 1, ..., 0, 0, 0], dtype=int32), 2862.7319140789582)
"""
pass
##############################################################################
# end of tests.
##############################################################################
import doctest
# For unittest integration
def load_tests(loader, tests, ignore):
try:
import sklearn
except ImportError:
pass
else:
tests.addTests(doctest.DocTestSuite())
return tests
if __name__ == '__main__':
doctest.testmod()
| mit |
fspaolo/scikit-learn | examples/linear_model/plot_ols.py | 8 | 1966 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import pylab as pl
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis]
diabetes_X_temp = diabetes_X[:, :, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X_temp[:-20]
diabetes_X_test = diabetes_X_temp[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
pl.scatter(diabetes_X_test, diabetes_y_test, color='black')
pl.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
pl.xticks(())
pl.yticks(())
pl.show()
| bsd-3-clause |
jmontoyam/mne-python | mne/preprocessing/tests/test_infomax.py | 6 | 5969 | # Authors: Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
"""
Test the infomax algorithm.
Parts of this code are taken from scikit-learn
"""
import numpy as np
from numpy.testing import assert_almost_equal
from scipy import stats
from scipy import linalg
from mne.preprocessing.infomax_ import infomax
from mne.utils import requires_sklearn, run_tests_if_main, check_version
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
@requires_sklearn
def test_infomax_blowup():
""" Test the infomax algorithm blowup condition
"""
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 100
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
center_and_norm(m)
X = _get_pca().fit_transform(m.T)
k_ = infomax(X, extended=True, l_rate=0.1)
s_ = np.dot(k_, X.T)
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
@requires_sklearn
def test_infomax_simple():
""" Test the infomax algorithm on very simple data.
"""
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 500
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
for add_noise in (False, True):
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, n_samples)
center_and_norm(m)
algos = [True, False]
for algo in algos:
X = _get_pca().fit_transform(m.T)
k_ = infomax(X, extended=algo)
s_ = np.dot(k_, X.T)
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
def test_infomax_weights_ini():
""" Test the infomax algorithm when user provides an initial weights matrix.
"""
X = np.random.random((3, 100))
weights = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
w1 = infomax(X, max_iter=0, weights=weights, extended=True)
w2 = infomax(X, max_iter=0, weights=weights, extended=False)
assert_almost_equal(w1, weights)
assert_almost_equal(w2, weights)
@requires_sklearn
def test_non_square_infomax():
""" Test non-square infomax
"""
rng = np.random.RandomState(0)
n_samples = 200
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
n_observed = 6
mixing = rng.randn(n_observed, 2)
for add_noise in (False, True):
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(n_observed, n_samples)
center_and_norm(m)
m = m.T
m = _get_pca(rng).fit_transform(m)
# we need extended since input signals are sub-gaussian
unmixing_ = infomax(m, random_state=rng, extended=True)
s_ = np.dot(unmixing_, m.T)
# Check that the mixing model described in the docstring holds:
mixing_ = linalg.pinv(unmixing_.T)
assert_almost_equal(m, s_.T.dot(mixing_))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
def _get_pca(rng=None):
if not check_version('sklearn', '0.18'):
from sklearn.decomposition import RandomizedPCA
return RandomizedPCA(n_components=2, whiten=True,
random_state=rng)
else:
from sklearn.decomposition import PCA
return PCA(n_components=2, whiten=True, svd_solver='randomized',
random_state=rng)
run_tests_if_main()
| bsd-3-clause |
MTgeophysics/mtpy | tests/SmartMT/test_exportDialog.py | 1 | 12640 | import os
from unittest import TestCase
import matplotlib.pyplot as plt
import numpy as np
from qtpy import QtCore
from qtpy.QtWidgets import QFileDialog, QMessageBox, QDialog
from qtpy.QtTest import QTest
from mtpy.gui.SmartMT.gui.export_dialog import ExportDialog, IMAGE_FORMATS
from tests import make_temp_dir
from tests.SmartMT import _click_area
from tests.imaging import plt_wait
def _fake_exec_accept():
return QFileDialog.Accepted
def _fake_exec_reject():
return QFileDialog.Rejected
def _rewrite_text(widget, text, modifier=QtCore.Qt.NoModifier):
QTest.keyEvent(QTest.Click, widget, QtCore.Qt.Key_A, QtCore.Qt.ControlModifier)
QTest.keyClicks(widget, text, modifier=modifier)
QTest.keyEvent(QTest.Click, widget, QtCore.Qt.Key_Enter)
def _create_fig():
t = np.arange(0.0, 2.0, 0.01)
s = 1 + np.sin(2 * np.pi * t)
plt.plot(t, s)
plt.xlabel('time (s)')
plt.ylabel('voltage (mV)')
plt.title('About as simple as it gets, folks')
plt.grid(True)
# plt.savefig("test.png")
# plt.show()
plt_wait(1)
return plt.gcf() # get access to the current fig
class TestExportDialog(TestCase):
@classmethod
def setUpClass(cls):
# setup temp dir
cls._temp_dir = make_temp_dir(cls.__name__)
def setUp(self):
# create figure
self._fig = _create_fig()
# create GUI
self.dialog = ExportDialog()
self.dialog.show()
QTest.qWaitForWindowActive(self.dialog)
def tearDown(self):
self.dialog.close()
plt.close(self._fig)
def test_defaults(self):
""" test gui default state"""
# check row states
self.assertTrue(self.dialog.ui.comboBox_fileName.currentText() == "figure.png", "Default File Name")
self.assertTrue(self.dialog.ui.comboBox_directory.currentText() == os.path.expanduser("~"), "Default Path")
# file type
self.assertTrue(set(["{} (.{})".format(desc, ext)
for ext, desc in self._fig.canvas.get_supported_filetypes().items()]) ==
set([str(self.dialog.ui.comboBox_fileType.itemText(i))
for i in range(self.dialog.ui.comboBox_fileType.count())]),
"Supported Formats")
self.assertTrue(self.dialog.ui.checkBox_tightBbox.isChecked(), "Tight Layout Default")
self.assertFalse(self.dialog.get_transparent(), "Transparent Default")
self.assertTrue(self.dialog.ui.comboBox_orientation.currentText() == "Landscape", "Orientation Default")
self.assertTrue(self.dialog.ui.spinBox_dpi.value() == 80)
self.assertTrue(self.dialog.ui.doubleSpinBox_height_inches.value() == 6.)
self.assertTrue(self.dialog.ui.doubleSpinBox_width_inches.value() == 8.)
self.assertTrue(self.dialog.ui.spinBox_height_pixels.value() == 480)
self.assertTrue(self.dialog.ui.spinBox_width_pixels.value() == 640)
self.assertTrue(self.dialog.ui.checkBox_open_after_export.isChecked())
# check states from the getters
self.assertTrue(self.dialog.get_bbox_inches() == 'tight', "Tight Layout Value")
self.assertTrue(self.dialog.get_file_format()[0] == 'png', "Format Value")
self.assertTrue(self.dialog.get_orientation() == 'landscape', "Orientation Value")
self.assertTrue(os.path.normpath(self.dialog.get_save_file_name()) ==
os.path.normpath(os.path.join(os.path.expanduser("~"),
str(self.dialog.ui.comboBox_fileName.currentText()))
),
"Save File Path Value")
def test_file_name_change(self):
# select all existing tests
_rewrite_text(self.dialog.ui.comboBox_fileName, "test_file.jpg")
# current text should have changed
self.assertTrue(self.dialog.ui.comboBox_fileName.currentText() == "test_file.jpg",
"Changed file name")
# format should have changed
self.assertTrue(self.dialog.get_file_format()[0] == "jpg")
# transparent should be false
self.assertFalse(self.dialog.get_transparent(), "transparent")
# change to file with unsupported format
_rewrite_text(self.dialog.ui.comboBox_fileName, "test_file_2.abcd")
# current text should have changed
self.assertTrue(self.dialog.ui.comboBox_fileName.currentText() == "test_file_2.abcd",
"Changed file name")
# current format should not been changed
self.assertTrue(self.dialog.get_file_format()[0] == "jpg")
def test_file_type_change(self):
for i in range(self.dialog.ui.comboBox_fileType.count()):
self.dialog.ui.comboBox_fileType.setCurrentIndex(i)
extenion = self.dialog.get_file_format()[0]
self.assertTrue(self.dialog.ui.comboBox_fileName.currentText() == "figure.{}".format(extenion))
def test_directory_change(self):
_rewrite_text(self.dialog.ui.comboBox_directory, os.path.abspath(self._temp_dir))
self.assertTrue(os.path.dirname(self.dialog.get_save_file_name()) == os.path.abspath(self._temp_dir))
# print self.dialog.get_save_file_name()
# select from the browse
self.dialog._dir_dialog.setDirectory(os.path.normpath(os.path.expanduser("~")))
self.dialog._dir_dialog.exec_ = _fake_exec_reject # path should not change
_click_area(self.dialog.ui.pushButton_browse)
self.assertTrue(os.path.dirname(self.dialog.get_save_file_name()) == os.path.abspath(self._temp_dir))
self.dialog._dir_dialog.exec_ = _fake_exec_accept
_click_area(self.dialog.ui.pushButton_browse)
# QTest.qWaitForWindowShown(self.dialog._dir_dialog)
# self.dialog._dir_dialog.accept()
self.assertTrue(
os.path.dirname(
os.path.normpath(self.dialog.get_save_file_name())
) == os.path.normpath(os.path.expanduser("~")))
def test_export(self):
# set export dir
_rewrite_text(self.dialog.ui.comboBox_directory,
os.path.abspath(self._temp_dir))
fname = self.dialog.get_save_file_name()
if os.path.isfile(fname):
# if file exist, remove
os.remove(fname)
self.assertFalse(os.path.exists(fname), "File exists")
# set open after to false
self.dialog.ui.checkBox_open_after_export.setChecked(False)
self.dialog.exec_ = self._fake_export_dialog_exec_cancel # should not create file
self.dialog._msg_box.exec_ = self._fake_msg_dialog_exec_cancel
fname = self.dialog.export_to_file(self._fig)
print(self._fig.get_dpi(), self.dialog.ui.spinBox_dpi.value())
self.assertTrue(self.dialog.ui.spinBox_dpi.value() == self._fig.get_dpi())
self.assertTrue(fname is None)
self.assertFalse(os.path.exists(self.dialog.get_save_file_name()), "File exists")
# save the new file now
self.dialog.exec_ = self._fake_export_dialog_exec_export
fname = self.dialog.export_to_file(self._fig)
self.assertTrue(os.path.exists(fname), "File exists")
self.assertTrue(os.path.isfile(fname))
file_count = len([name for name in os.listdir(self._temp_dir)
if os.path.isfile(os.path.join(self._temp_dir, name))])
# save to the same file and overwrite
self.dialog._msg_box.exec_ = self._fake_msg_dialog_exec_overwrite
fname = self.dialog.export_to_file(self._fig)
self.assertTrue(os.path.exists(fname), "File exists")
new_file_count = len([name for name in os.listdir(self._temp_dir)
if os.path.isfile(os.path.join(self._temp_dir, name))])
self.assertTrue(file_count == new_file_count) # no file should be created
# save to the same file and save as new name
self.dialog._msg_box.exec_ = self._fake_msg_dialog_exec_save_as
fname = self.dialog.export_to_file(self._fig)
self.assertTrue(os.path.exists(fname), "File exists")
new_file_count = len([name for name in os.listdir(self._temp_dir)
if os.path.isfile(os.path.join(self._temp_dir, name))])
self.assertTrue(file_count + 1 == new_file_count) # one extra file should be created
file_count = new_file_count
def test_dpi(self):
# save to higher dpi
# set export dir
_rewrite_text(self.dialog.ui.comboBox_directory,
os.path.abspath(self._temp_dir))
self.dialog.exec_ = self._fake_export_dialog_exec_export
self.dialog._msg_box.exec_ = self._fake_msg_dialog_exec_overwrite
# set open after to false
self.dialog.ui.checkBox_open_after_export.setChecked(False)
QTest.keyClicks(self.dialog.ui.spinBox_dpi, '400')
_rewrite_text(self.dialog.ui.comboBox_fileName, "400dpi.jpg")
fname = self.dialog.export_to_file(self._fig)
self.assertTrue(os.path.exists(fname), "File exists")
new_file_count = len([name for name in os.listdir(self._temp_dir)
if os.path.isfile(os.path.join(self._temp_dir, name))])
QTest.keyClicks(self.dialog.ui.spinBox_dpi, '600')
_rewrite_text(self.dialog.ui.comboBox_fileName, "600dpi.jpg")
fname = self.dialog.export_to_file(self._fig)
self.assertTrue(os.path.exists(fname), "File exists")
new_file_count = len([name for name in os.listdir(self._temp_dir)
if os.path.isfile(os.path.join(self._temp_dir, name))])
QTest.keyClicks(self.dialog.ui.spinBox_dpi, '1000')
_rewrite_text(self.dialog.ui.comboBox_fileName, "1000dpi.jpg")
fname = self.dialog.export_to_file(self._fig)
self.assertTrue(os.path.exists(fname), "File exists")
new_file_count = len([name for name in os.listdir(self._temp_dir)
if os.path.isfile(os.path.join(self._temp_dir, name))])
def _fake_msg_dialog_exec_overwrite(self):
self.dialog._msg_box.show()
QTest.qWaitForWindowActive(self.dialog._msg_box)
_click_area(self.dialog._msg_box_button_overwrite)
return QMessageBox.Accepted
def _fake_msg_dialog_exec_save_as(self):
self.dialog._msg_box.show()
QTest.qWaitForWindowActive(self.dialog._msg_box)
_click_area(self.dialog._msg_box_button_save_as)
return QMessageBox.Accepted
def _fake_msg_dialog_exec_cancel(self):
self.dialog._msg_box.show()
QTest.qWaitForWindowActive(self.dialog._msg_box)
_click_area(self.dialog._msg_box_button_cancel)
return QMessageBox.Cancel
def _fake_export_dialog_exec_cancel(self):
_click_area(self.dialog.ui.pushButton_cancel)
return QDialog.Rejected
def _fake_export_dialog_exec_export(self):
_click_area(self.dialog.ui.pushButton_export)
return QDialog.Accepted
def _transparent_test_gen(index, ext, description):
def _test_transparent(self):
# set to save to tmp dir
_rewrite_text(self.dialog.ui.comboBox_directory,
os.path.abspath(self._temp_dir))
self.dialog.exec_ = self._fake_export_dialog_exec_export
self.dialog._msg_box.exec_ = self._fake_msg_dialog_exec_overwrite
# set open after to false
self.dialog.ui.checkBox_open_after_export.setChecked(False)
# print "testing save to {0[1]} (.{0[0]})".format(self.dialog.get_file_format())
for isTrans in [True, False]:
_rewrite_text(self.dialog.ui.comboBox_fileName, "transparent_{}.{}".format(isTrans, ext))
self.dialog.ui.comboBox_fileType.setCurrentIndex(index)
self.assertTrue((ext, description) == self.dialog.get_file_format(), "sanity check")
self.dialog.ui.checkBox_transparent.setChecked(isTrans)
try:
fname = self.dialog.export_to_file(self._fig)
except RuntimeError as e:
self.skipTest(e.message)
self.assertTrue(os.path.exists(fname),
"testing save to {0[1]} (.{0[0]}) without transparent".format(
self.dialog.get_file_format()))
return _test_transparent
# generate tests
for index, (ext, description) in enumerate(IMAGE_FORMATS):
_test = _transparent_test_gen(index, ext, description)
_test.__name__ = "test_transparent_{}".format(ext)
setattr(TestExportDialog, _test.__name__, _test)
| gpl-3.0 |
cmcantalupo/geopm | integration/experiment/power_sweep/gen_power_sweep_summary.py | 1 | 3310 | #!/usr/bin/env python
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
'''
Prints a summary of the data from a power sweep experiment.
'''
import sys
import pandas
import argparse
import geopmpy.io
from experiment import common_args
def summary(parse_output):
# rename some columns
parse_output['power_limit'] = parse_output['POWER_PACKAGE_LIMIT_TOTAL']
parse_output['runtime'] = parse_output['runtime (s)']
parse_output['network_time'] = parse_output['time-hint-network (s)']
parse_output['energy_pkg'] = parse_output['package-energy (J)']
parse_output['energy_dram'] = parse_output['dram-energy (J)']
parse_output['frequency'] = parse_output['frequency (Hz)']
parse_output['achieved_power'] = parse_output['energy_pkg'] / parse_output['sync-runtime (s)']
parse_output['iteration'] = parse_output.apply(lambda row: row['Profile'].split('_')[-1],
axis=1)
# add extra columns
parse_output['cpu_time'] = parse_output['runtime'] - parse_output['network_time']
# set up index for grouping
parse_output = parse_output.set_index(['Agent', 'host', 'power_limit'])
summary = pandas.DataFrame()
for col in ['count', 'runtime', 'cpu_time', 'network_time', 'energy_pkg', 'energy_dram', 'frequency', 'achieved_power']:
summary[col] = parse_output[col].groupby(['Agent', 'power_limit']).mean()
return summary
if __name__ == '__main__':
parser = argparse.ArgumentParser()
common_args.add_output_dir(parser)
args = parser.parse_args()
output_dir = args.output_dir
output = geopmpy.io.RawReportCollection("*report", dir_name=output_dir)
result = summary(output.get_epoch_df())
sys.stdout.write('{}\n'.format(result))
| bsd-3-clause |
theroncarmichael/GC-CaT-Metallicitiy | interp.py | 1 | 9342 | #! /usr/bin/env python
'''
Created on Mar 17, 2011
@author: Chris Usher
'''
import numpy as np
#import matplotlib.pyplot as plt
import scipy.interpolate as interpolate
def redisperse(inputwavelengths, inputfluxes, firstWavelength=None, lastWavelength=None, dispersion=None, nPixels=None, outside=None, function='spline'):
inputedges = np.empty(inputwavelengths.size + 1)
inputedges[1:-1] = (inputwavelengths[1:] + inputwavelengths[:-1]) / 2
inputedges[0] = 3 * inputwavelengths[0] / 2 - inputwavelengths[1] / 2
inputedges[-1] = 3 * inputwavelengths[-1] / 2 - inputwavelengths[-2] / 2
inputdispersions = inputedges[1:] - inputedges[:-1]
epsilon = 1e-10
if dispersion == None and nPixels != None:
if firstWavelength == None:
firstWavelength = inputwavelengths[0]
if lastWavelength == None:
lastWavelength = inputwavelengths[-1]
outputwavelengths = np.linspace(firstWavelength, lastWavelength, nPixels)
elif dispersion != None and nPixels == None:
if firstWavelength == None:
firstWavelength = inputwavelengths[0]
if lastWavelength == None:
lastWavelength = inputwavelengths[-1]
outputwavelengths = np.arange(firstWavelength, lastWavelength + epsilon, dispersion)
elif dispersion != None and nPixels != None:
if firstWavelength != None:
outputwavelengths = firstWavelength + dispersion * np.ones(nPixels)
elif lastWavelength != None:
outputwavelengths = lastWavelength - dispersion * np.ones(nPixels)
outputwavelengths = outputwavelengths[::-1]
else:
outputwavelengths = inputwavelengths[0] + dispersion * np.ones(nPixels)
else:
dispersion = (inputwavelengths[-1] - inputwavelengths[0]) / (inputwavelengths.size - 1)
if lastWavelength == None:
lastWavelength = inputwavelengths[-1]
if firstWavelength != None:
outputwavelengths = np.arange(firstWavelength, lastWavelength + epsilon, dispersion)
else:
outputwavelengths = np.arange(inputwavelengths[0], lastWavelength + epsilon, dispersion)
outputdispersion = outputwavelengths[1] - outputwavelengths[0]
outputedges = np.linspace(outputwavelengths[0] - outputdispersion / 2, outputwavelengths[-1] + outputdispersion / 2, outputwavelengths.size + 1)
outputfluxes = interp(inputwavelengths, inputfluxes, inputedges, inputdispersions, outputwavelengths, outputedges, outside, function)
return (outputwavelengths, outputfluxes)
def rebin(inputwavelengths, inputfluxes, outputwavelengths, outside=None, function='spline', ratio=False):
inputedges = np.empty(inputwavelengths.size + 1)
inputedges[1:-1] = (inputwavelengths[1:] + inputwavelengths[:-1]) / 2
inputedges[0] = 3 * inputwavelengths[0] / 2 - inputwavelengths[1] / 2
inputedges[-1] = 3 * inputwavelengths[-1] / 2 - inputwavelengths[-2] / 2
inputdispersions = inputedges[1:] - inputedges[:-1]
outputedges = np.empty(outputwavelengths.size + 1)
outputedges[1:-1] = (outputwavelengths[1:] + outputwavelengths[:-1]) / 2
outputedges[0] = 3 * outputwavelengths[0] / 2 - outputwavelengths[1] / 2
outputedges[-1] = 3 * outputwavelengths[-1] / 2 - outputwavelengths[-2] / 2
return interp(inputwavelengths, inputfluxes, inputedges, inputdispersions, outputwavelengths, outputedges, outside, function, ratio)
def interp(inputwavelengths, inputfluxes, inputedges, inputdispersions, outputwavelengths, outputedges, outside=None, function='spline', ratio=False):
if not ratio:
fluxdensities = inputfluxes / inputdispersions.mean()
else:
fluxdensities = inputfluxes
outputfluxes = np.ones(outputwavelengths.size)
if outside != None:
outputfluxes = outputfluxes * outside
else:
middle = (outputwavelengths[0] + outputwavelengths[-1]) / 2
firstnew = None
lastnew = None
if function == 'nearest':
pixels = np.arange(0, inputfluxes.size)
for newpixel in range(outputfluxes.size):
if inputedges[0] <= outputwavelengths[newpixel] <= inputedges[-1]:
outputlowerlimit = outputedges[newpixel]
outputupperlimit = outputedges[newpixel + 1]
outputfluxes[newpixel] = 0
below = inputedges[1:] < outputlowerlimit
above = inputedges[:-1] > outputupperlimit
ok = ~(below | above)
for oldpixel in pixels[ok]:
inputlowerlimit = inputedges[oldpixel]
inputupperlimit = inputedges[oldpixel + 1]
if inputlowerlimit >= outputlowerlimit and inputupperlimit <= outputupperlimit:
outputfluxes[newpixel] += fluxdensities[oldpixel] * inputdispersions[oldpixel]
elif inputlowerlimit < outputlowerlimit and inputupperlimit > outputupperlimit:
outputfluxes[newpixel] += fluxdensities[oldpixel] * (outputupperlimit - outputlowerlimit)
elif inputlowerlimit < outputlowerlimit and outputlowerlimit <= inputupperlimit <= outputupperlimit:
outputfluxes[newpixel] += fluxdensities[oldpixel] * (inputupperlimit - outputlowerlimit)
elif outputupperlimit >= inputlowerlimit >= outputlowerlimit and inputupperlimit > outputupperlimit:
outputfluxes[newpixel] += fluxdensities[oldpixel] * (outputupperlimit - inputlowerlimit)
if firstnew == None:
firstnew = outputfluxes[newpixel]
if ratio:
outputfluxes[newpixel] = outputfluxes[newpixel] / (outputupperlimit - outputlowerlimit)
elif outputwavelengths[newpixel] > inputwavelengths[-1] and lastnew == None:
lastnew = outputfluxes[newpixel - 1]
else:
fluxspline = interpolate.UnivariateSpline(inputwavelengths, fluxdensities, s=0, k=3)
for newpixel in range(outputfluxes.size):
if inputedges[0] <= outputwavelengths[newpixel] <= inputedges[-1]:
outputlowerlimit = outputedges[newpixel]
outputupperlimit = outputedges[newpixel + 1]
outputfluxes[newpixel] = fluxspline.integral(outputedges[newpixel], outputedges[newpixel + 1])
if firstnew == None:
firstnew = outputfluxes[newpixel]
if ratio:
outputfluxes[newpixel] = outputfluxes[newpixel] / (outputupperlimit - outputlowerlimit)
elif outputwavelengths[newpixel] > inputwavelengths[-1] and lastnew == None:
lastnew = outputfluxes[newpixel - 1]
if outside == None:
for newpixel in range(outputfluxes.size):
if outputwavelengths[newpixel] < inputwavelengths[0]:
outputfluxes[newpixel] = firstnew
elif outputwavelengths[newpixel] > inputwavelengths[-1]:
outputfluxes[newpixel] = lastnew
return outputfluxes
def lineartolog(inputwavelengths, inputfluxes, outside=0, function='spline', ratio=False, logDispersion=0):
inputedges = np.empty(inputwavelengths.size + 1)
inputedges[1:-1] = (inputwavelengths[1:] + inputwavelengths[:-1]) / 2
inputedges[0] = 3 * inputwavelengths[0] / 2 - inputwavelengths[1] / 2
inputedges[-1] = 3 * inputwavelengths[-1] / 2 - inputwavelengths[-2] / 2
inputdispersions = inputedges[1:] - inputedges[:-1]
if logDispersion:
outputedges = np.arange(np.log10(inputedges[0]), np.log10(inputedges[-1]), logDispersion)
outputwavelengths = (outputedges[:-1] + outputedges[1:]) / 2
outputedges = 10**outputedges
outputwavelengths = 10**outputwavelengths
else:
outputedges = np.logspace(np.log10(inputedges[0]), np.log10(inputedges[-1]), inputedges.size)
outputwavelengths = (outputedges[:-1] * outputedges[1:])**.5
return outputwavelengths, interp(inputwavelengths, inputfluxes, inputedges, inputdispersions, outputwavelengths, outputedges, outside, function, ratio)
def logtolinear(inputwavelengths, inputfluxes, outside=0, function='spline', ratio=False):
logWavelengths = np.log10(inputwavelengths)
inputedges = np.empty(logWavelengths.size + 1)
inputedges[1:-1] = (logWavelengths[1:] + logWavelengths[:-1]) / 2
inputedges[0] = 3 * logWavelengths[0] / 2 - logWavelengths[1] / 2
inputedges[-1] = 3 * logWavelengths[-1] / 2 - logWavelengths[-2] / 2
inputedges = 10**inputedges
inputdispersions = inputedges[1:] - inputedges[:-1]
outputedges = np.linspace(inputedges[0], inputedges[-1], inputedges.size)
outputwavelengths = (outputedges[:-1] + outputedges[1:]) / 2
return outputwavelengths, interp(inputwavelengths, inputfluxes, inputedges, inputdispersions, outputwavelengths, outputedges, outside, function, ratio)
#plt.show()
| bsd-3-clause |
mikechan0731/tunnel_calculation | forTR_ver4.py | 1 | 12948 | # C:\Python27\Scripts
# -*- coding: utf-8 -*-
# Author : MikeChan
# Email : [email protected]
import pandas as pd
import numpy as np
from scipy import optimize
import xlrd, os
from time import sleep, time
import matplotlib.pyplot as plt
import FileDialog
#===== helper func. =====
def draw_parsley_ver4(t=0.05):
print " " *2 + " " + " _ " + " " + " " *2
sleep(t)
print " _/\_ " *2 + " " + " |_| " + " " + " _/\_ " *2
sleep(t)
print " __\ /__ " *2 + " " + " |:| " + " " + " __\ /__ " *2
sleep(t)
print " <_ _> " *2 + " " + " |:| " + " " + " <_ _> " *2
sleep(t)
print " |/ )\| " *2 + " " + " \:/ " + " " + " |/ )\| " *2
sleep(t)
print " / " *2 + " " + " | " + " " + " / " *2
sleep(t)
print u" = = = = = = = = = = = = = = = = = = = = = = = = = = = "
sleep(t)
print u" || 香菜轉檔(加香腸) version 4.0 || "
sleep(t)
print u" = = = = = = = = = = = = = = = = = = = = = = = = = = = "
def draw_parsley_ver3(t=0.05):
print " _/\_ " *5
sleep(t)
print " __\ /__ " *5
sleep(t)
print " <_ _> " *5
sleep(t)
print " |/ )\| " *5
sleep(t)
print " / " *5
sleep(t)
print u" = = = = = = = = = = = = = = = = = = = = = = = = = = = "
sleep(t)
print u" || 香菜轉檔 version 4.0 || "
sleep(t)
print u" = = = = = = = = = = = = = = = = = = = = = = = = = = = "
def draw_parsley_ver2(t=0.05):
#===== draw terminal =====
print u" .k. "
sleep(t)
print u" 2 "
sleep(t)
print u" U7 u@r "
sleep(t)
print u" :MNvGE@EU 7.LNO "
sleep(t)
print u" OMJXGOG@ .@G8Gui5L "
sleep(t)
print u" r@O8kr. M8kvYNOPYi "
sleep(t)
print u" , i [email protected]@GXSSNEZXM@O. "
sleep(t)
print u" ri@@@7 LJ..N@FvvUNNqZujBNj5. "
sleep(t)
print u" .LqM0X0@@r v :i @GP52:OOOL "
sleep(t)
print u" @@@@B8MMNEMGEi:@ . qMNM@q "
sleep(t)
print u" uJuGqvr825@@@@@ . E1v J:Pk@J iUL "
sleep(t)
print u" .F1121OM@:;@@@, .@ :BkSq5vYBOLv@ur "
sleep(t)
print u" 8@@@M8ZNM@0 Y, Mu rMFkuS001vS8SMZ "
sleep(t)
print u" :J@B0BOOE7 LMv k@8FuPXkSPZGM1 "
sleep(t)
print u" M@@@G 7Nr7@5NGOEXSur : "
sleep(t)
print u" P@F rGMNkjqPO MO;iMNXN0Fi2SEMU: "
sleep(t)
print u" :@051S1jFY51.5U5UkqZqF5kv "
sleep(t)
print u" :@MOOqBB vGqEE8PkPqFu7 "
sleep(t)
print u" 58@@: rq@@@@EOkFq0OZ "
sleep(t)
print u" i: :i@0:@@5ZBB@O "
sleep(t)
print u" ,ui2k "
sleep(t)
print u" = = = = = = = = = = = = = = = = = = = = = = = = = = = "
sleep(t)
print u" || 香菜轉檔 version 3.0 || "
sleep(t)
print u" = = = = = = = = = = = = = = = = = = = = = = = = = = = "
sleep(t)
def read_dir_file(path):
print u"共 %d 筆檔案" % len(os.listdir(path.rstrip()))
count = 0
print os.listdir(path.rstrip())
for f in os.listdir(path.rstrip()):
count += 1
read_no_title_data_and_generate_center_file(path.rstrip().rstrip()+ "\\" + f)
calc_r_and_theta_from_file(path.rstrip().rstrip()+ "\\" + f)
transfrom_single_file(path.rstrip().rstrip()+ "\\" + f)
print u"第 %d 筆檔案完成." %count
return
def circle_fit(lidar_abs_e_arr, lidar_abs_n_arr):
def calc_R(x,y, xc, yc):
""" calculate the distance of each 2D points from the center (xc, yc) """
return np.sqrt((x-xc)**2 + (y-yc)**2)
def f(c, x, y):
""" calculate the algebraic distance between the data points and the mean circle centered at c=(xc, yc) """
Ri = calc_R(x, y, *c)
return Ri - Ri.mean()
def leastsq_circle(x,y):
# coordinates of the barycenter
x_m = np.mean(x)
y_m = np.mean(y)
center_estimate = x_m, y_m
center, ier = optimize.leastsq(f, center_estimate, args=(x,y))
xc, yc = center
Ri = calc_R(x, y, *center)
R = Ri.mean()
residu = np.sum((Ri - R)**2)
return xc, yc, R, residu
xc,yc,R,residu = leastsq_circle(lidar_abs_e_arr, lidar_abs_n_arr)
return xc,yc,R,residu
def read_no_title_data_and_generate_center_file(file_name):
print u"讀取無標題檔案..."
ori_f = pd.read_excel(file_name, header=None)
new_df = pd.DataFrame({"lidar_e": ori_f[0],"lidar_n": ori_f[1],"lidar_z": ori_f[2]})
new_df_lenth = new_df["lidar_e"].size
if new_df_lenth >= 10000:
fit_len = 10000
else:
fit_len = new_df_lenth
# 取全部裡面隨機數量的點雲
lidar_for_fit = new_df.sample(n=fit_len)
tunnel_z = lidar_for_fit['lidar_z'].mean()
print u"計算擬合圓心中..."
xc,yc,R,residu = circle_fit(lidar_for_fit['lidar_e'], lidar_for_fit['lidar_n'])
center_df = pd.DataFrame({'tunnel_e': xc, 'tunnel_n':yc, 'tunnel_z': tunnel_z},index=[0])
print u"擬合圓心計算完成."
all_df = pd.concat([new_df,center_df], axis=1)
all_df.to_csv('%s_FIT.csv' %file_name.rstrip(),index=False)
print u"_FIT.csv 產出."
def calc_r_and_theta_from_file(file_name):
print u"_FITcsv 檔案讀取中..."
ori_f = pd.read_csv('%s_FIT.csv' %file_name.rstrip())
print u"計算角度與半徑..."
data_length = len(ori_f["lidar_e"])
print u"共 %d 筆資料準備計算."%(data_length)
tunnel_e = ori_f['tunnel_e'][0]
tunnel_n = ori_f['tunnel_n'][0]
tunnel_z = ori_f['tunnel_z'][0]
r_theta_dict = {'radius':[float(i) for i in range(data_length)], 'theta':[float(i) for i in range(data_length)]}
r_theta_df = pd.DataFrame(r_theta_dict)
#print r_theta_df.head()
# create dataframe likes:
# redius theta
# 0 0.0 0.0
# 1 1.0 1.0
# 2 2.0 2.0
print u"計算 radius...."
# 計算對應 index 的 radius
r_theta_df['radius'] = ( (ori_f['lidar_e'] - tunnel_e)**2 + (ori_f['lidar_n'] - tunnel_n)**2 ) ** 0.5
#print r_theta_df.head()
# enter dataframe likes:
# radius theta
# 0 5.425958 0.0
# 1 5.442800 1.0
# 2 5.438896 2.0
# 3 5.439481 3.0
print u"radius 計算完畢."
print u"計算 theta..."
for i in range(data_length):
if i%10000 ==0: print u"共 %d 筆完成尚餘 %d 筆."% (i, data_length-i)
x = float(ori_f['lidar_e'][i] - tunnel_e)
y = float(ori_f['lidar_n'][i] - tunnel_n)
r = float(r_theta_df['radius'][i])
if x == 0 and y ==0: # 點雲為圓心
r_theta_df['theta'][i] = 'nan'
elif x == 0 and y > 0: # x=0 y>0
r_theta_df['theta'][i] = 0
elif x == 0 and y < 0: # x=0 y<0
r_theta_df['theta'][i] = 180
elif x > 0 and y== 0: # x>0 y=0
r_theta_df['theta'][i] = 90
elif x < 0 and y == 0: # x<0 y=0
r_theta_df['theta'][i] = 270
# 1st quadrant
elif x > 0 and y > 0:
i_theta = np.rad2deg(np.arctan(np.abs(y)/np.abs(x)))
r_theta_df['theta'][i] = 90 - i_theta
# 2nd quadrant
elif x > 0 and y < 0:
i_theta = np.rad2deg(np.arctan(np.abs(y)/np.abs(x)))
r_theta_df['theta'][i] = i_theta + 90.0
# 3rd quadrant
elif x < 0 and y < 0:
i_theta = np.rad2deg(np.arctan(np.abs(y)/np.abs(x)))
r_theta_df['theta'][i] = (90- i_theta) + 180.0
# 4th quadrant
elif x < 0 and y > 0:
i_theta = np.rad2deg(np.arctan(np.abs(y)/np.abs(x)))
r_theta_df['theta'][i] = i_theta +270.0
else:
print "data error %s: row %d can't be classify by quadrant, deg=nan." %(file_name, i+2)
r_theta_df['theta'][i] = i_theta
with open("Error_Log.txt","a+") as err_log:
err_log.write("Data Error %s: row %d can't be classify by quadrant, deg=nan.\n" %(file_name, i+2))
print u"theta 計算完畢."
df_all = pd.concat([ori_f,r_theta_df], axis=1)
print u"_RESULT.csv 產出."
df_all.to_csv('%s_RESULT.csv' %file_name.rstrip(), index=False)
return
def transfrom_single_file(file_name):
print u"計算每一度的平均半徑..."
#===== open file =====
ori_f = pd.read_csv('%s_RESULT.csv' %file_name.rstrip())
#===== variable =====
data_len = ori_f["radius"].size
arr = []
#===== helper func. =====
data_dict = {}
for i in range(360):
data_dict[str("%s")%i] = []
#===== main =====
for i in range(data_len):
try:
now_theta = int(round(ori_f[u'theta'][i]))
except:
print u"!!DATA MISSING!!%s: missing at row %d " %(file_name, i+2)
with open("Error_Log.txt","a+") as err_log:
print >>err_log, u"!!DATA MISSING!!%s: missing at row %d " %(file_name, i+2)
continue
if now_theta ==360:
data_dict['0'].append(ori_f['radius'][i])
else:
data_dict[str(now_theta)].append(ori_f['radius'][i])
for key in data_dict:
if len(data_dict[key]) ==0:
deg_meanR = 'nan'
else:
deg_meanR = float(sum(data_dict[key])/len(data_dict[key]))
arr.append([int(key), int(len(data_dict[key])), float(deg_meanR)])
deg = [i[0] for i in arr]
num = [i[1] for i in arr]
deg_meanR =[i[2] for i in arr]
df = pd.DataFrame({'deg': deg, 'num': num, 'deg_meanR': deg_meanR})
sorted_df = df.sort_values(by='deg')
print u"平均半徑計算完畢."
#print sorted_df.head()
sorted_df.loc[sorted_df['num'] <=10, 'deg_meanR' ] = ''
new_fn = file_name
sorted_df.to_csv('%s_ANSWER.csv' %new_fn.rstrip(), index=False)
print u"_ANSWER.CSV 產出."
return
def plot_or_not(file_name):
answer_data = pd.read_csv(file_name.rstrip())
theta = np.np.deg2rad(answer_data['deg'] )
radii = answer_data['deg_meanR']
ax = plt.subplot(111, projection='polar')
ax.plot(theta, radii, color='r', linewidth='3')
ax.grid(True)
ax.set_rmax(6.0)
ax.set_rmin(4.0)
ax.set_theta_zero_location('N')
ax.set_theta_direction('clockwise')
plt.show()
#===== main =====
def main():
STATUS_KEY = -1
# -1=> 剛啟動; 1=>輸入為檔案; 2=> 輸入為資料夾 ;
draw_parsley_ver4()
while 1:
read_input_path = raw_input(u"Input File dir or name: ")
if read_input_path == "pp":
print u"開啟繪圖模式"
STATUS_KEY = 9
break
elif os.path.isdir(read_input_path):
print u"取得資料夾位置,進行批次處理作業."
STATUS_KEY = 2
break
elif os.path.isfile(read_input_path):
print u"取得檔案位置,進行單一檔案轉換."
STATUS_KEY = 1
break
else:
print u"輸入錯誤,請重新選擇."
continue
if STATUS_KEY == 1:
print u"計算中..."
read_no_title_data_and_generate_center_file(read_input_path)
calc_r_and_theta_from_file(read_input_path)
transfrom_single_file(read_input_path)
print u"完成."
elif STATUS_KEY == 2:
read_dir_file(read_input_path)
print u"全部完成."
elif STATUS_KEY == 9:
draw_data_name = raw_input("Input _ANSWER.csv file: ")
try:
plot_or_not(draw_data_name)
print u"完成."
except:
print u"檔案錯誤,處罰你等待 3 秒,好好思考人生吧!"
sleep(3)
exit()
else:
print "Error operation!"
if __name__ == "__main__": main()
| apache-2.0 |
jmsolano/picongpu | examples/ThermalTest/tools/dispersion.py | 11 | 2689 | #!/usr/bin/env python
#
# Copyright 2013 Heiko Burau, Axel Huebl
#
# This file is part of PIConGPU.
#
# PIConGPU is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PIConGPU is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PIConGPU.
# If not, see <http://www.gnu.org/licenses/>.
#
#___________P A R A M E T E R S___________
omega_plasma = 6.718e13 # SI unit: 1/s
v_th = 1.0e8 # SI unit: m/s
c = 2.9979e8 # SI unit: m/s
delta_t = 2.5e-15 # SI unit: s
delta_z = c * delta_t # SI unit: m
#_________________________________________
from numpy import *
from matplotlib import pyplot as plt
from matplotlib.ticker import FormatStrFormatter
data_trans = loadtxt("eField_zt_trans.dat")
data_long = loadtxt("eField_zt_long.dat")
N_z = len(data_trans[:,0])
N_t = len(data_trans[0,:])
omega_max = pi*(N_t-1)/(N_t*delta_t)/omega_plasma
k_max = pi * (N_z-1)/(N_z*delta_z)
# __________________transversal plot______________________
ax = plt.subplot(211, autoscale_on=False, xlim=(-k_max, k_max), ylim=(-1, 10))
ax.xaxis.set_major_formatter(FormatStrFormatter('%2.2e'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%0.0f'))
plt.xlabel(r"$k [1/m]$")
plt.ylabel(r"$\omega / \omega_{pe} $")
data_trans = fft.fftshift(fft.fft2(data_trans))
plt.imshow(abs(data_trans), extent=(-k_max, k_max, -omega_max, omega_max), aspect='auto', interpolation='nearest')
plt.colorbar()
# plot analytical dispersion relation
x = linspace(-k_max, k_max, 200)
y = sqrt(c**2 * x**2 + omega_plasma**2)/omega_plasma
plt.plot(x, y, 'r--', linewidth=1)
# ___________________longitudinal plot_____________________
ax = plt.subplot(212, autoscale_on=False, xlim=(-k_max, k_max), ylim=(-1, 10))
ax.xaxis.set_major_formatter(FormatStrFormatter('%2.2e'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%0.0f'))
plt.xlabel(r"$k [1/m]$")
plt.ylabel(r"$\omega / \omega_{pe} $")
data_long = fft.fftshift(fft.fft2(data_long))
plt.imshow(abs(data_long), extent=(-k_max, k_max, -omega_max, omega_max), aspect='auto', interpolation='nearest')
plt.colorbar()
# plot analytical dispersion relation
x = linspace(-k_max, k_max, 200)
y = sqrt(3 * v_th**2 * x**2 + omega_plasma**2)/omega_plasma
plt.plot(x, y, 'r--', linewidth=1)
plt.show()
| gpl-3.0 |
chenyyx/scikit-learn-doc-zh | examples/en/feature_selection/plot_select_from_model_boston.py | 146 | 1527 | """
===================================================
Feature selection using SelectFromModel and LassoCV
===================================================
Use SelectFromModel meta-transformer along with Lasso to select the best
couple of features from the Boston dataset.
"""
# Author: Manoj Kumar <[email protected]>
# License: BSD 3 clause
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
# Load the boston dataset.
boston = load_boston()
X, y = boston['data'], boston['target']
# We use the base estimator LassoCV since the L1 norm promotes sparsity of features.
clf = LassoCV()
# Set a minimum threshold of 0.25
sfm = SelectFromModel(clf, threshold=0.25)
sfm.fit(X, y)
n_features = sfm.transform(X).shape[1]
# Reset the threshold till the number of features equals two.
# Note that the attribute can be set directly instead of repeatedly
# fitting the metatransformer.
while n_features > 2:
sfm.threshold += 0.1
X_transform = sfm.transform(X)
n_features = X_transform.shape[1]
# Plot the selected two features from X.
plt.title(
"Features selected from Boston using SelectFromModel with "
"threshold %0.3f." % sfm.threshold)
feature1 = X_transform[:, 0]
feature2 = X_transform[:, 1]
plt.plot(feature1, feature2, 'r.')
plt.xlabel("Feature number 1")
plt.ylabel("Feature number 2")
plt.ylim([np.min(feature2), np.max(feature2)])
plt.show()
| gpl-3.0 |
themrmax/scikit-learn | examples/feature_selection/plot_feature_selection_pipeline.py | 58 | 1049 | """
==================
Pipeline Anova SVM
==================
Simple usage of Pipeline that runs successively a univariate
feature selection with anova and then a C-SVM of the selected features.
"""
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
print(__doc__)
# import some data to play with
X, y = samples_generator.make_classification(
n_features=20, n_informative=3, n_redundant=0, n_classes=4,
n_clusters_per_class=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# ANOVA SVM-C
# 1) anova filter, take 3 best ranked features
anova_filter = SelectKBest(f_regression, k=3)
# 2) svm
clf = svm.SVC(kernel='linear')
anova_svm = make_pipeline(anova_filter, clf)
anova_svm.fit(X_train, y_train)
y_pred = anova_svm.predict(X_test)
print(classification_report(y_test, y_pred))
| bsd-3-clause |
rileyrustad/pdxapartmentfinder | pipeline/crawler.py | 1 | 2738 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 7 10:59:34 2016
@author: Riley Rustad <[email protected]>
This Script is designed to scrape data from Multnomah County apartment ads
from Craigslist.
"""
# =============================================================================
# Imports
import numpy as np
import os.path
from bs4 import BeautifulSoup
import requests
import time
import random
import datetime
import json
from pandas import DataFrame
import scrape
import status
from datetime import date, datetime
# =============================================================================
filepath = 'data/MasterApartmentData.json'
# Check if file exists, and if it does, load that data
if os.path.isfile(filepath) == True:
f = open(filepath)
my_dict = json.load(f)
f.close()
# If the file doesn't exist, create that file.
else:
f = open(filepath,'w')
f.close()
my_dict = {}
print str(len(my_dict) )+" existing scraped listings."
def merge_two_dicts(x, y):
'''Merges two dictionaries together'''
z = x.copy()
z.update(y)
return z
unexplored_id_numbers = []
newdict = {}
page_numbers = ['']+["?s='"+str(x+1)+'00' for x in range(24)]
print "Searching for new listings..."
# Collect all of the unexplored ID numbers.
for it, page in enumerate(page_numbers):
unexplored_id_numbers, my_dict = scrape.numbers(unexplored_id_numbers,my_dict,page)
status.printProgress((it+1), len(page_numbers),
prefix = 'Progress:', suffix = 'Complete',
decimals = 2, barLength = 25)
# Sleep at random intervals so that craigslist doesn't disconnect
time.sleep(random.randrange(1,2))
new_numbers = len(unexplored_id_numbers)
print str(new_numbers)+" new listings found"
print ""
print "Scraping info from new listings..."
# Scrape new listings
while len(unexplored_id_numbers)>0:
id_number = unexplored_id_numbers.pop(-1)
it = new_numbers - len(unexplored_id_numbers)
status.printProgress(it, new_numbers, prefix = 'Progress:',
suffix = 'Complete', decimals = 2, barLength = 50)
# Get info for listing
newdict = scrape.info(id_number,newdict)
# Sleep at random intervals so that craigslist doesn't disconnect
time.sleep(random.randrange(1, 2))
# Save the Data
print str(len(newdict))+' new listings scraped'
TodayData = open('data/TodaysData/TodaysData'+str(date)+'.json',"w")
MasterData = open('data/MasterApartmentData.json',"w")
json.dump(newdict,TodayData)
my_dict = merge_two_dicts(my_dict,newdict)
json.dump(my_dict, MasterData)
print "Total number of listings scraped is now "+str(len(my_dict))
TodayData.close()
MasterData.close()
| mit |
logpai/logparser | logparser/Drain/Drain.py | 1 | 12453 | """
Description : This file implements the Drain algorithm for log parsing
Author : LogPAI team
License : MIT
"""
import re
import os
import numpy as np
import pandas as pd
import hashlib
from datetime import datetime
class Logcluster:
def __init__(self, logTemplate='', logIDL=None):
self.logTemplate = logTemplate
if logIDL is None:
logIDL = []
self.logIDL = logIDL
class Node:
def __init__(self, childD=None, depth=0, digitOrtoken=None):
if childD is None:
childD = dict()
self.childD = childD
self.depth = depth
self.digitOrtoken = digitOrtoken
class LogParser:
def __init__(self, log_format, indir='./', outdir='./result/', depth=4, st=0.4,
maxChild=100, rex=[], keep_para=True):
"""
Attributes
----------
rex : regular expressions used in preprocessing (step1)
path : the input path stores the input log file name
depth : depth of all leaf nodes
st : similarity threshold
maxChild : max number of children of an internal node
logName : the name of the input file containing raw log messages
savePath : the output path stores the file containing structured logs
"""
self.path = indir
self.depth = depth - 2
self.st = st
self.maxChild = maxChild
self.logName = None
self.savePath = outdir
self.df_log = None
self.log_format = log_format
self.rex = rex
self.keep_para = keep_para
def hasNumbers(self, s):
return any(char.isdigit() for char in s)
def treeSearch(self, rn, seq):
retLogClust = None
seqLen = len(seq)
if seqLen not in rn.childD:
return retLogClust
parentn = rn.childD[seqLen]
currentDepth = 1
for token in seq:
if currentDepth >= self.depth or currentDepth > seqLen:
break
if token in parentn.childD:
parentn = parentn.childD[token]
elif '<*>' in parentn.childD:
parentn = parentn.childD['<*>']
else:
return retLogClust
currentDepth += 1
logClustL = parentn.childD
retLogClust = self.fastMatch(logClustL, seq)
return retLogClust
def addSeqToPrefixTree(self, rn, logClust):
seqLen = len(logClust.logTemplate)
if seqLen not in rn.childD:
firtLayerNode = Node(depth=1, digitOrtoken=seqLen)
rn.childD[seqLen] = firtLayerNode
else:
firtLayerNode = rn.childD[seqLen]
parentn = firtLayerNode
currentDepth = 1
for token in logClust.logTemplate:
#Add current log cluster to the leaf node
if currentDepth >= self.depth or currentDepth > seqLen:
if len(parentn.childD) == 0:
parentn.childD = [logClust]
else:
parentn.childD.append(logClust)
break
#If token not matched in this layer of existing tree.
if token not in parentn.childD:
if not self.hasNumbers(token):
if '<*>' in parentn.childD:
if len(parentn.childD) < self.maxChild:
newNode = Node(depth=currentDepth + 1, digitOrtoken=token)
parentn.childD[token] = newNode
parentn = newNode
else:
parentn = parentn.childD['<*>']
else:
if len(parentn.childD)+1 < self.maxChild:
newNode = Node(depth=currentDepth+1, digitOrtoken=token)
parentn.childD[token] = newNode
parentn = newNode
elif len(parentn.childD)+1 == self.maxChild:
newNode = Node(depth=currentDepth+1, digitOrtoken='<*>')
parentn.childD['<*>'] = newNode
parentn = newNode
else:
parentn = parentn.childD['<*>']
else:
if '<*>' not in parentn.childD:
newNode = Node(depth=currentDepth+1, digitOrtoken='<*>')
parentn.childD['<*>'] = newNode
parentn = newNode
else:
parentn = parentn.childD['<*>']
#If the token is matched
else:
parentn = parentn.childD[token]
currentDepth += 1
#seq1 is template
def seqDist(self, seq1, seq2):
assert len(seq1) == len(seq2)
simTokens = 0
numOfPar = 0
for token1, token2 in zip(seq1, seq2):
if token1 == '<*>':
numOfPar += 1
continue
if token1 == token2:
simTokens += 1
retVal = float(simTokens) / len(seq1)
return retVal, numOfPar
def fastMatch(self, logClustL, seq):
retLogClust = None
maxSim = -1
maxNumOfPara = -1
maxClust = None
for logClust in logClustL:
curSim, curNumOfPara = self.seqDist(logClust.logTemplate, seq)
if curSim>maxSim or (curSim==maxSim and curNumOfPara>maxNumOfPara):
maxSim = curSim
maxNumOfPara = curNumOfPara
maxClust = logClust
if maxSim >= self.st:
retLogClust = maxClust
return retLogClust
def getTemplate(self, seq1, seq2):
assert len(seq1) == len(seq2)
retVal = []
i = 0
for word in seq1:
if word == seq2[i]:
retVal.append(word)
else:
retVal.append('<*>')
i += 1
return retVal
def outputResult(self, logClustL):
log_templates = [0] * self.df_log.shape[0]
log_templateids = [0] * self.df_log.shape[0]
df_events = []
for logClust in logClustL:
template_str = ' '.join(logClust.logTemplate)
occurrence = len(logClust.logIDL)
template_id = hashlib.md5(template_str.encode('utf-8')).hexdigest()[0:8]
for logID in logClust.logIDL:
logID -= 1
log_templates[logID] = template_str
log_templateids[logID] = template_id
df_events.append([template_id, template_str, occurrence])
df_event = pd.DataFrame(df_events, columns=['EventId', 'EventTemplate', 'Occurrences'])
self.df_log['EventId'] = log_templateids
self.df_log['EventTemplate'] = log_templates
if self.keep_para:
self.df_log["ParameterList"] = self.df_log.apply(self.get_parameter_list, axis=1)
self.df_log.to_csv(os.path.join(self.savePath, self.logName + '_structured.csv'), index=False)
occ_dict = dict(self.df_log['EventTemplate'].value_counts())
df_event = pd.DataFrame()
df_event['EventTemplate'] = self.df_log['EventTemplate'].unique()
df_event['EventId'] = df_event['EventTemplate'].map(lambda x: hashlib.md5(x.encode('utf-8')).hexdigest()[0:8])
df_event['Occurrences'] = df_event['EventTemplate'].map(occ_dict)
df_event.to_csv(os.path.join(self.savePath, self.logName + '_templates.csv'), index=False, columns=["EventId", "EventTemplate", "Occurrences"])
def printTree(self, node, dep):
pStr = ''
for i in range(dep):
pStr += '\t'
if node.depth == 0:
pStr += 'Root'
elif node.depth == 1:
pStr += '<' + str(node.digitOrtoken) + '>'
else:
pStr += node.digitOrtoken
print(pStr)
if node.depth == self.depth:
return 1
for child in node.childD:
self.printTree(node.childD[child], dep+1)
def parse(self, logName):
print('Parsing file: ' + os.path.join(self.path, logName))
start_time = datetime.now()
self.logName = logName
rootNode = Node()
logCluL = []
self.load_data()
count = 0
for idx, line in self.df_log.iterrows():
logID = line['LineId']
logmessageL = self.preprocess(line['Content']).strip().split()
# logmessageL = filter(lambda x: x != '', re.split('[\s=:,]', self.preprocess(line['Content'])))
matchCluster = self.treeSearch(rootNode, logmessageL)
#Match no existing log cluster
if matchCluster is None:
newCluster = Logcluster(logTemplate=logmessageL, logIDL=[logID])
logCluL.append(newCluster)
self.addSeqToPrefixTree(rootNode, newCluster)
#Add the new log message to the existing cluster
else:
newTemplate = self.getTemplate(logmessageL, matchCluster.logTemplate)
matchCluster.logIDL.append(logID)
if ' '.join(newTemplate) != ' '.join(matchCluster.logTemplate):
matchCluster.logTemplate = newTemplate
count += 1
if count % 1000 == 0 or count == len(self.df_log):
print('Processed {0:.1f}% of log lines.'.format(count * 100.0 / len(self.df_log)))
if not os.path.exists(self.savePath):
os.makedirs(self.savePath)
self.outputResult(logCluL)
print('Parsing done. [Time taken: {!s}]'.format(datetime.now() - start_time))
def load_data(self):
headers, regex = self.generate_logformat_regex(self.log_format)
self.df_log = self.log_to_dataframe(os.path.join(self.path, self.logName), regex, headers, self.log_format)
def preprocess(self, line):
for currentRex in self.rex:
line = re.sub(currentRex, '<*>', line)
return line
def log_to_dataframe(self, log_file, regex, headers, logformat):
""" Function to transform log file to dataframe
"""
log_messages = []
linecount = 0
with open(log_file, 'r') as fin:
for line in fin.readlines():
try:
match = regex.search(line.strip())
message = [match.group(header) for header in headers]
log_messages.append(message)
linecount += 1
except Exception as e:
pass
logdf = pd.DataFrame(log_messages, columns=headers)
logdf.insert(0, 'LineId', None)
logdf['LineId'] = [i + 1 for i in range(linecount)]
return logdf
def generate_logformat_regex(self, logformat):
""" Function to generate regular expression to split log messages
"""
headers = []
splitters = re.split(r'(<[^<>]+>)', logformat)
regex = ''
for k in range(len(splitters)):
if k % 2 == 0:
splitter = re.sub(' +', '\\\s+', splitters[k])
regex += splitter
else:
header = splitters[k].strip('<').strip('>')
regex += '(?P<%s>.*?)' % header
headers.append(header)
regex = re.compile('^' + regex + '$')
return headers, regex
def get_parameter_list(self, row):
template_regex = re.sub(r"<.{1,5}>", "<*>", row["EventTemplate"])
if "<*>" not in template_regex: return []
template_regex = re.sub(r'([^A-Za-z0-9])', r'\\\1', template_regex)
template_regex = re.sub(r'\\ +', r'\s+', template_regex)
template_regex = "^" + template_regex.replace("\<\*\>", "(.*?)") + "$"
parameter_list = re.findall(template_regex, row["Content"])
parameter_list = parameter_list[0] if parameter_list else ()
parameter_list = list(parameter_list) if isinstance(parameter_list, tuple) else [parameter_list]
return parameter_list | mit |
phobson/statsmodels | statsmodels/datasets/co2/data.py | 3 | 3045 | #! /usr/bin/env python
"""Mauna Loa Weekly Atmospheric CO2 Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = """Mauna Loa Weekly Atmospheric CO2 Data"""
SOURCE = """
Data obtained from http://cdiac.ornl.gov/trends/co2/sio-keel-flask/sio-keel-flaskmlo_c.html
Obtained on 3/15/2014.
Citation:
Keeling, C.D. and T.P. Whorf. 2004. Atmospheric CO2 concentrations derived from flask air samples at sites in the SIO network. In Trends: A Compendium of Data on Global Change. Carbon Dioxide Information Analysis Center, Oak Ridge National Laboratory, U.S. Department of Energy, Oak Ridge, Tennessee, U.S.A.
"""
DESCRSHORT = """Atmospheric CO2 from Continuous Air Samples at Mauna Loa Observatory, Hawaii, U.S.A."""
DESCRLONG = """
Atmospheric CO2 from Continuous Air Samples at Mauna Loa Observatory, Hawaii, U.S.A.
Period of Record: March 1958 - December 2001
Methods: An Applied Physics Corporation (APC) nondispersive infrared gas analyzer was used to obtain atmospheric CO2 concentrations, based on continuous data (four measurements per hour) from atop intake lines on several towers. Steady data periods of not less than six hours per day are required; if no such six-hour periods are available on any given day, then no data are used that day. Weekly averages were calculated for most weeks throughout the approximately 44 years of record. The continuous data for year 2000 is compared with flask data from the same site in the graphics section."""
#suggested notes
NOTE = """::
Number of observations: 2225
Number of variables: 2
Variable name definitions:
date - sample date in YYMMDD format
co2 - CO2 Concentration ppmv
The data returned by load_pandas contains the dates as the index.
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
import pandas as pd
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
names = data.dtype.names
return du.Dataset(data=data, names=names)
def load_pandas():
data = load()
# pandas <= 0.12.0 fails in the to_datetime regex on Python 3
index = pd.DatetimeIndex(start=data.data['date'][0].decode('utf-8'),
periods=len(data.data), format='%Y%m%d',
freq='W-SAT')
dataset = pd.DataFrame(data.data['co2'], index=index, columns=['co2'])
#NOTE: this is how I got the missing values in co2.csv
#new_index = pd.DatetimeIndex(start='1958-3-29', end=index[-1],
# freq='W-SAT')
#data.data = dataset.reindex(new_index)
data.data = dataset
return data
def _get_data():
filepath = dirname(abspath(__file__))
with open(filepath + '/co2.csv', 'rb') as f:
data = np.recfromtxt(f, delimiter=",", names=True, dtype=['a8', float])
return data
| bsd-3-clause |
abhisg/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
openmichigan/metrics_tools | openmichigan-metrics-pdf/ga_api_timeseries.py | 1 | 20456 | import sys
import infofile
import requests, json
import get_material_links
from pylab import * #?
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from datetime import date, timedelta
from apiclient.errors import HttpError
from oauth2client.client import AccessTokenRefreshError
import httplib2
from apiclient.discovery import build
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
# structural stuff, TODO
# generalization; TODO
def get_country(city_name):
baseurl = "http://maps.googleapis.com/maps/api/geocode/json?address=%s&sensor=false" % city_name
r = requests.get(baseurl)
d = json.loads(r.text)
if "country" in d["results"][0]["address_components"][-1]["types"]:
country = d["results"][0]["address_components"][-1]["short_name"]
else:
country = d["results"][0]["address_components"][-2]["short_name"]
return country
class GoogleAnalyticsData(object):
def __init__(self, days_back=30):
self.days_back = days_back
self.CLIENT_SECRETS = 'client_secrets.json'
# helpful msg if it's missing
self.MISSING_CLIENT_SECRETS_MSG = '%s is missing' % self.CLIENT_SECRETS
self.paramlist = [int(infofile.profileid),infofile.pgpath] # should this be here or in overall file??
# flow object to be used if we need to authenticate (this remains a bit of a problem in some cases)
self.FLOW = flow_from_clientsecrets(self.CLIENT_SECRETS, scope='https://www.googleapis.com/auth/analytics.readonly', message=self.MISSING_CLIENT_SECRETS_MSG)
# a file to store the access token
self.TOKEN_FILE_NAME = 'analytics.dat' # should be stored in a SECURE PLACE
def proper_start_date(self):
"""Gets accurate date in YYYY-mm-dd format that is default 30 (or, however many specified) days earlier than current day"""
d = date.today() - timedelta(days=self.days_back)
return str(d)
def prepare_credentials(self):
# get existing creds
storage = Storage(self.TOKEN_FILE_NAME)
credentials = storage.get()
# if existing creds are invalid and Run Auth flow
# run method will store any new creds
if credentials is None or credentials.invalid:
credentials = run(self.FLOW, storage)
return credentials
def initialize_service(self):
http = httplib2.Http()
credentials = self.prepare_credentials()
http = credentials.authorize(http) # authorize the http obj
return build('analytics', 'v3', http=http)
def deal_with_results(self, res):
"""Handles results gotten from API and formatted, plots them with matplotlib tools and saves plot img"""
view_nums = [x[1] for x in res] # y axis
date_strs = [mdates.datestr2num(x[0]) for x in res]
fig, ax = plt.subplots(1)
ax.plot_date(date_strs, view_nums, fmt="g-")
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
total = sum(view_nums)
plt.title("%d total Course Views over past %s days" % (total, len(date_strs)-1)) # should get title of course
#plt.text(3,3,"TESTING ADDING A STRING THING TO PLOT PDF")
return fig
def main(self):
self.service = self.initialize_service()
try:
self.profile_id = self.paramlist[0]
if self.profile_id:
results = self.get_results(self.service, self.profile_id)
res = self.return_results(results)
except TypeError, error:
print "There was an API error: %s " % (error)
except HttpError, error:
print "There was an API error: %s " % (error)
except AccessTokenRefreshError:
print "The credentials have been revoked or expired, please re-run app to reauthorize."
except:
print "Did you provide a profile id and a path as cli arguments? (Do you need to?) Try again."
else: # should run if it did not hit an except clause
return self.deal_with_results(res)
def get_results(self, service, profile_id):
# query = service.data().ga().get(ids='ga:%s' % profile_id, start_date='2010-03-01',end_date='2013-05-15',metrics='ga:pageviews',dimensions='ga:pagePath',filters='ga:pagePath==%s' % (sys.argv[2]))
start = self.proper_start_date() # change to change num of days back
end = str(date.today())
# return query.execute()
return self.service.data().ga().get(ids='ga:%s' % (profile_id), start_date=start,end_date=end,metrics='ga:pageviews',dimensions='ga:date',sort='ga:date',filters='ga:pagePath==%s' % (self.paramlist[1])).execute()#(sys.argv[2])).execute()
def return_results(self, results):
if results:
#date_views_tup = [(str(x[0][-4:-2])+"/"+str(x[0][-2:]),int(x[1])) for x in results.get('rows')] ## altered date strs
# should be list of tuples of form: ("mm/dd", views) where views is int
date_views_tup = [(str(x[0]), int(x[1])) for x in results.get('rows')]
return date_views_tup
else:
print "No results found."
return None
def print_results(self, results):
# print data nicely for the user (may also want to pipe to a file)
## this turned into a testing fxn -- TODO decide whether/what printing is needed and change to class __str__ method
if results:
print "Profile: %s" % results.get('profileInfo').get('profileName')
#print 'Total Pageviews: %s' % results.get('rows')[0][1]
for r in results.get('rows'):
print r
else:
print "No results found."
# for modularity -- poss look @ Python print-results examples (e.g. by country or whatever) todo
class GABulkDownloads_Views(GoogleAnalyticsData):
def __init__(self, days_back=30):
self.days_back = days_back
self.CLIENT_SECRETS = 'client_secrets.json'
# helpful msg if it's missing
self.MISSING_CLIENT_SECRETS_MSG = '%s is missing' % self.CLIENT_SECRETS
## TODO need to handle non-bulk-download pages appropriately
# if self.get_bulk_dl_link() != 0:
# self.paramlist = [int(infofile.profileid),self.get_bulk_dl_link()] # needs error checking TODO
# else:
# self.paramlist = [int(infofile.profileid)]
self.paramlist = [int(infofile.profileid),self.get_bulk_dl_link()] # needs error checking TODO
self.paramlist_second = [int(infofile.profileid), infofile.pgpath]
self.FLOW = flow_from_clientsecrets(self.CLIENT_SECRETS, scope='https://www.googleapis.com/auth/analytics.readonly', message=self.MISSING_CLIENT_SECRETS_MSG)
self.TOKEN_FILE_NAME = 'analytics.dat'
def get_bulk_dl_link(self):
url = None
try:
import mechanize
br = mechanize.Browser()
except:
print "Dependency (Mechanize) not installed. Try again."
return None
else:
response = br.open("http://open.umich.edu%s" % infofile.pgpath)
for link in br.links():
if "Download all" in link.text: # depends on current page lang/phrasing
response = br.follow_link(link)
url = response.geturl()
# else:
# print "No bulk download available"
# #return 0
if url:
return url[len("http://open.umich.edu"):] # if no Download All, error -- needs checking + graceful handling
else:
return 0
def get_results_other(self, service, profile_id):
# query = service.data().ga().get(ids='ga:%s' % profile_id, start_date='2010-03-01',end_date='2013-05-15',metrics='ga:pageviews',dimensions='ga:pagePath',filters='ga:pagePath==%s' % (sys.argv[2]))
start = self.proper_start_date() # change to change num of days back
end = str(date.today())
# return query.execute()
return self.service.data().ga().get(ids='ga:%s' % (profile_id), start_date=start,end_date=end,metrics='ga:pageviews',dimensions='ga:date',sort='ga:date',filters='ga:pagePath==%s' % (self.paramlist_second[1])).execute()#(sys.argv[2])).execute()
def deal_with_results(self, res):
"""Handles results gotten from API and formatted, plots them with matplotlib tools and saves plot img"""
view_nums = [x[1] for x in res] # y axis
view_nums_orig = [x[1] for x in self.return_results(self.get_results_other(self.service,self.profile_id))] ## let's see
date_strs = [mdates.datestr2num(x[0]) for x in res] # x axis
fig, ax = plt.subplots(1)
ax.plot_date(date_strs, view_nums, fmt="b-", label="Downloads")
ax.plot_date(date_strs, view_nums_orig, fmt="g-", label="Views")
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
#total = sum(view_nums)
plt.legend(loc='upper left')
plt.title("Course Views vs Bulk Material Downloads over past %s days" % (len(date_strs)-1)) # should get title of course
#savefig('test4.png')
return fig
class GABulkDownloads(GABulkDownloads_Views):
def deal_with_results(self, res):
"""Handles results gotten from API and formatted, plots them with matplotlib tools and saves plot img"""
view_nums = [x[1] for x in res] # y axis
date_strs = [mdates.datestr2num(x[0]) for x in res] # x axis
fig, ax = plt.subplots(1)
ax.plot_date(date_strs, view_nums, fmt="b-")
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
total = sum(view_nums)
plt.title("%d total Bulk Course Material Downloads over past %s days" % (total, len(date_strs)-1)) # should get title of course
#savefig('test5.png')
#fig.show()
return fig
class GA_Text_Info(GABulkDownloads_Views):
# depends on the main fxn in GABulkDownloads_Views -- this calls deal_with_results()
def return_info(self):
"""Handles results gotten from API and formatted, returns data"""
#res = self.get_results()
self.service = self.initialize_service()
try:
self.profile_id = self.paramlist[0]
if self.profile_id:
results = self.get_results(self.service, self.profile_id)
res = self.return_results(results)
else:
print "Profile ID missing in return_info fxn"
except:
print "Error occurred."
else:
view_nums = [x[1] for x in res] # y axis
view_nums_orig = [x[1] for x in self.return_results(self.get_results_other(self.service,self.profile_id))] ## let's see
total_dls = sum(view_nums)
total_views = sum(view_nums_orig)
top_countries = self.get_more_info()
#top_resources = self.indiv_dl_nums()
# get more info with other queries? TODO
self.info_dict = {'Across time span':self.days_back, 'Total Page Views': total_views, 'Total Bulk Downloads': total_dls, 'Top Nations': top_countries} #, 'Top Resources':top_resources}
return self.info_dict # making this a class attribute so I can use it below easily
def deal_with_results(self, res):
## to be called in main -- do plots here basically
ind_res = res #self.resources_results # holding it in class structure for easy access :/ ugly terribleness a bit
files = [str(x[0].encode('utf-8')) for x in ind_res]
nums = [int(x[3].encode('utf-8')) for x in ind_res]
fig, ax = plt.subplots(1)
ax.plot(files, nums) # this as line plot doesn't make sense, each is a different plotted line, so this should be bar or should get individual bits over time (and plot each by date obviously)
plt.title("bad line chart of individual resources")
return fig
# if get_results itself changes, will have to change main() as well because return_results() depends on this being as is NOTE TODO
def get_results(self, service, profile_id):
# query = service.data().ga().get(ids='ga:%s' % profile_id, start_date='2010-03-01',end_date='2013-05-15',metrics='ga:pageviews',dimensions='ga:pagePath',filters='ga:pagePath==%s' % (sys.argv[2]))
start = self.proper_start_date() # change to change num of days back
end = str(date.today())
# return query.execute()
if self.get_bulk_dl_link() != 0:
return self.service.data().ga().get(ids='ga:%s' % (profile_id), start_date=start,end_date=end,metrics='ga:pageviews',dimensions='ga:date',sort='ga:date',filters='ga:pagePath==%s' % (self.paramlist[1])).execute()#(sys.argv[2])).execute()
#else:
# need to handle non-bulk-download links appropriately! TODO
# but a different function that will get all the infos because it will take infodict?? that is a possibility, though ugly NTS
def get_more_info_tups(self, top_what=10): # don't need to pass in infodict b/c class attr now
# dimensions=ga:country
# metrics=ga:visits
# sort=-ga:visits
self.profile_id = self.paramlist[0]
self.service = self.initialize_service()
start = self.proper_start_date()
end = str(date.today())
results = self.service.data().ga().get(ids='ga:%s' % (self.profile_id), start_date=start,end_date=end,metrics='ga:pageviews',dimensions='ga:country',sort='-ga:pageviews',filters='ga:pagePath==%s' % (self.paramlist[1])).execute()#(sys.argv[2])).execute()
if results and results.get('rows'):
# for x in results.get('rows'):
# print x
top_nations = [(x[0].encode('utf-8'), x[1].encode('utf-8')) for x in results.get('rows') if "not set" not in x[0].encode('utf-8')][:top_what]
# for x in top_nations:
# print x
return top_nations
else:
print "No results found."
return None
def get_cities_tups(self, top_what=10):
self.profile_id = self.paramlist[0]
self.service = self.initialize_service()
start = self.proper_start_date()
end = str(date.today())
results = self.service.data().ga().get(ids='ga:%s' % (self.profile_id), start_date=start,end_date=end,metrics='ga:pageviews',dimensions='ga:city',sort='-ga:pageviews',filters='ga:pagePath==%s' % (self.paramlist[1])).execute()#(sys.argv[2])).execute()
if results and results.get('rows'):
# for x in results.get('rows'):
# print x
top_cities = [(x[0].encode('utf-8'), x[1].encode('utf-8')) for x in results.get('rows') if "not set" not in x[0].encode('utf-8')][:top_what]
# for x in top_nations:
# print x
return top_cities
else:
print "No results found."
return None
def get_more_info(self, top_what=10):
self.profile_id = self.paramlist[0]
self.service = self.initialize_service()
start = self.proper_start_date()
end = str(date.today())
results = self.service.data().ga().get(ids='ga:%s' % (self.profile_id), start_date=start,end_date=end,metrics='ga:pageviews',dimensions='ga:country',sort='-ga:pageviews',filters='ga:pagePath==%s' % (self.paramlist[1])).execute()#(sys.argv[2])).execute()
if results:
# for x in results.get('rows'):
# print x
top_nations = [x[0].encode('utf-8') for x in results.get('rows') if "not set" not in x[0].encode('utf-8')][:top_what]
# for x in top_nations:
# print x
return top_nations
else:
print "No results found."
return None
## need maybe scraping-y fxn to find out what common term is in files to dl on course pg?
## OR some other sort of commonality e.g. creator?? are our naming conventions solid enough?
## (meh that's a terrible thing to depend on)
## OR list of all file links on course and check in lists -- expect performance worse but honestly... esp if monthly...
## TODO should know the DIFFERENCES between the popular individual materials and 'less so'
def indiv_dls_helper(self):
self.profile_id = self.paramlist[0]
self.service = self.initialize_service()
start = self.proper_start_date()
end = str(date.today())
resources_results = self.service.data().ga().get(ids='ga:%s' % (self.profile_id), start_date=start,end_date=end,metrics='ga:visitsWithEvent',dimensions='ga:eventLabel,ga:eventCategory,ga:eventAction',sort='-ga:visitsWithEvent').execute()#(sys.argv[2])).execute()
return resources_results.get('rows')
def indiv_dl_nums(self): # pass in string that identifies all files of certain cat (hoping there is one) -- default Dr Gunderson atm
## TODO except that we have a problem because the file names are ureliable, can only rely on fact that they are in the course. should extract filenames from scrapingness
# self.profile_id = self.paramlist[0]
# self.service = self.initialize_service()
# start = '2011-01-01'#self.proper_start_date()
# end = str(date.today())
results = self.indiv_dls_helper()
if results:
# for x in results.get('rows'):
# if id_string in x[0].encode('utf-8'):
# print x
course_files = get_material_links.get_material_links()
sorted_resources = sorted([x for x in results.get('rows') if x[0][21:] in course_files if int(x[3]) != 0], key=lambda x: int(x[3].encode('utf-8')), reverse=True)
top_ten_resources = sorted_resources[:10]
# for x in top_ten_resources:
# print x[0][21:].encode('utf-8')
return ["%s -- %s" % (x[0][21:].encode('utf-8'), x[3].encode('utf-8')) for x in top_ten_resources]
#print type(results)
# print results
else:
print "No results found."
return None
# def plot_indiv_dls(self):
# ind_res = self.resources_results # holding it in class structure for easy access :/ ugly terribleness a bit
def main(self):
self.service = self.initialize_service()
try:
self.profile_id = self.paramlist[0]
if self.profile_id:
#results = self.get_results(self.service, self.profile_id)
#res = self.return_results(results)
res = self.indiv_dls_helper()
except TypeError, error:
print "There was an API error: %s " % (error)
except HttpError, error:
print "There was an API error: %s " % (error)
except AccessTokenRefreshError:
print "The credentials have been revoked or expired, please re-run app to reauthorize."
except:
print "Did you provide a profile id and a path as cli arguments? (Do you need to?) Try again."
else: # should run if it did not hit an except clause
return self.deal_with_results(res)
class GA_Info_forTime(GA_Text_Info):
def hash_by_day(self):
views_day_ranges = {} # over range of past days_back number days
today = date.today()
dates_overall = []
for i in sorted(range(0,self.days_back), reverse=True):
date_to_get = today - timedelta(days=i)
# get results and handle results for the prope get_results fxn
results = self.get_results(self.service, self.profile_id, date_to_get)
date_views_tup = [(str(x[0]), int(x[1])) for x in results.get('rows')] # this is from other return_results so it may not work
#print date_views_tup
dates_overall.append(date_views_tup) # presumably each date_views_tup will only have one elem, take out extra layer (TODO fix if this is not so)
print dates_overall
return dates_overall
def get_results(self, service, profile_id, start): # start should be a proper start date, and it should be whatever SINGLE date, which is gotten by in a wrapper timedeltaing from start of pd to today
# query = service.data().ga().get(ids='ga:%s' % profile_id, start_date='2010-03-01',end_date='2013-05-15',metrics='ga:pageviews',dimensions='ga:pagePath',filters='ga:pagePath==%s' % (sys.argv[2]))
# return query.execute()
end = start #+ timedelta(days=1)
#end = date.today()
return self.service.data().ga().get(ids='ga:%s' % (profile_id), start_date=str(start),end_date=str(end),metrics='ga:pageviews',dimensions='ga:date',filters='ga:pagePath==%s' % (self.paramlist_second[1])).execute()#(sys.argv[2])).execute()
def main(self):
self.service = self.initialize_service()
try:
self.profile_id = self.paramlist[0]
if not self.profile_id:
# results = self.get_results(self.service, self.profile_id)
# res = self.return_results(results)
print "Error: missing profile ID!"
except TypeError, error:
print "There was an API error: %s " % (error)
except HttpError, error:
print "There was an API error: %s " % (error)
except AccessTokenRefreshError:
print "The credentials have been revoked or expired, please re-run app to reauthorize."
except:
print "Did you provide a profile id and a path as cli arguments? (Do you need to?) Try again."
else: # should run if it did not hit an except clause
return self.hash_by_day()
class GA_dls_forTime(GA_Info_forTime):
def get_results(self, service, profile_id, start): # start should be a proper start date, and it should be whatever SINGLE date, which is gotten by in a wrapper timedeltaing from start of pd to today
# query = service.data().ga().get(ids='ga:%s' % profile_id, start_date='2010-03-01',end_date='2013-05-15',metrics='ga:pageviews',dimensions='ga:pagePath',filters='ga:pagePath==%s' % (sys.argv[2]))
# return query.execute()
end = start #+ timedelta(days=1)
#end = date.today()
return self.service.data().ga().get(ids='ga:%s' % (profile_id), start_date=str(start),end_date=str(end),metrics='ga:pageviews',dimensions='ga:date',filters='ga:pagePath==%s' % (self.paramlist[1])).execute() # paramlist holds dls, _second holds views
# everything else is the same
if __name__ == '__main__':
## TESTING (pre unit tests)
#main(sys.argv)
#main(param_list)
#print "running the right file"
a = GoogleAnalyticsData()
#print a.paramlist[0]
a.main()
c = GABulkDownloads_Views()
c.main()
b = GABulkDownloads()
#print b.paramlist
b.main()
| mit |
briney/abstar | abstar/utils/pandaseq.py | 1 | 7169 | #!/usr/bin/python
# filename: pandaseq.py
#
# Copyright (c) 2015 Bryan Briney
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import glob
import subprocess as sp
from multiprocessing import cpu_count
from abutils.utils import log
logger = log.get_logger('basespace')
def list_files(d):
return sorted([f for f in glob.glob(d + '/*') if os.path.isfile(f)])
def pair_files(files, nextseq):
pairs = {}
for f in files:
if nextseq:
f_prefix = '_'.join(os.path.basename(f).split('_')[:-2])
else:
f_prefix = '_'.join(os.path.basename(f).split('_')[:-3])
if f_prefix in pairs:
pairs[f_prefix].append(f)
else:
pairs[f_prefix] = [f, ]
return pairs
def batch_pandaseq(f, r, o, algo):
cmd = 'pandaseq -f {0} -r {1} -A {2} -d rbfkms -T {3} -w {4}'.format(f, r, algo, cpu_count(), o)
sp.Popen(cmd, shell=True, stderr=sp.STDOUT, stdout=sp.PIPE).communicate()
def merge_reads(files, output, algo, nextseq, i):
files.sort()
f = files[0]
r = files[1]
if nextseq:
lane = os.path.basename(f).split('_')[-3]
sample_id = '_'.join(os.path.basename(f).split('_')[:-4])
sample = sample_id + '_' + lane
else:
sample = '_'.join(os.path.basename(f).split('_')[:-4])
print_sample_info(i, sample)
o = os.path.join(output, '{}.fasta'.format(sample))
batch_pandaseq(f, r, o, algo)
return o
def print_start_info():
logger.info('')
logger.info('')
logger.info('========================================')
logger.info('Merging reads with PANDAseq')
logger.info('========================================')
logger.info('')
def print_input_info(files):
logger.info('The input directory contains {} pair(s) of files to be merged.\n'.format(len(files) / 2))
def print_sample_info(i, sample):
logger.info('[ {} ] Merging sample {}'.format(str(i + 1), sample))
def print_sample_end():
logger.info('Done.')
def run(input, output, algorithm='simple_bayesian', nextseq=False):
'''
Merge paired-end FASTQ files with PANDAseq.
Examples:
To merge a directory of raw (gzip compressed) files from a MiSeq run::
merged_files = run('/path/to/input', '/path/to/output')
Same as above, but using the Pear_ read merging algorithm::
merged_files = run('/path/to/input', '/path/to/output', algorithm='pear')
To merge a list of file pairs::
file_pairs = [(sample1_R1.fastq, sample1_R2.fastq),
(sample2_R1.fastq.gz, sample2_R2.fastq.gz),
(sample3_R1.fastq, sample3_R2.fastq)]
merged_files = run(file_pairs, '/path/to/output')
.. _Pear: http://sco.h-its.org/exelixis/web/software/pear/
Args:
input (str, list): Input can be one of three things:
1. path to a directory of paired FASTQ files
2. a list of paired FASTQ files
3. a list of read pairs, with each read pair being a list/tuple
containing paths to two paired read files
Regardless of what input type is provided, paired FASTQ files can be either
gzip compressed or uncompressed.
When providing a list of files or a directory of files, it is assumed that
all files follow Illumina naming conventions. If your file names aren't
Illumina-like, submit your files as a list of read pairs to ensure that
the proper pairs of files are merged.
output (str): Path to an output directory, into which merged FASTQ files will be deposited.
To determine the filename for the merged file, the R1 file (or the first file in the read
pair) is split at the first occurance of the '_' character. Therefore, the read pair
``['my-sequences_R1.fastq', 'my-sequences_R2.fastq']`` would be merged into ``my-sequences.fasta``.
algorithm (str): PANDAseq algorithm to be used for merging reads. Choices are: 'simple_bayesian',
'ea_util', 'flash', 'pear', 'rdp_mle', 'stitch', or 'uparse'. Default is 'simple_bayesian',
which is the default PANDAseq algorithm.
nextseq (bool): Set to ``True`` if the sequencing data was generated on a NextSeq. Needed
because the naming conventions for NextSeq output files differs from MiSeq output.
Returns:
list: a list of merged file paths
'''
print_start_info()
if os.path.isdir(input):
files = list_files(input)
pairs = pair_files(files, nextseq)
elif type(input) in [list, tuple]:
if all([type(i) in [list, tuple] for i in input]) and all([len(i) == 2 for i in input]):
files = [f for sublist in input for f in sublist]
pairs = {n: i for n, i in zip(range(len(input)), input)}
elif all([os.path.isfile(i) for i in input]):
files = input
pairs = pair_files(files, nextseq)
else:
err = 'ERROR: Invalid input. Input may be one of three things:\n'
err += ' 1. a directory path\n'
err += ' 2. a list of file paths\n'
err += ' 3. a list of file pairs (lists/tuples containing exactly 2 file paths)'
raise RuntimeError(err)
else:
err = 'ERROR: Invalid input. Input may be one of three things:\n'
err += ' 1. a directory path\n'
err += ' 2. a list of file paths\n'
err += ' 3. a list of file pairs (lists/tuples containing exactly 2 file paths)'
raise RuntimeError(err)
print_input_info(files)
merged_files = []
for i, pair in enumerate(sorted(pairs.keys())):
if len(pairs[pair]) == 2:
# logger.info('Merging {} and {}'.format(pairs[pair][0], pairs[pair][1]))
mf = merge_reads(pairs[pair], output, algorithm, nextseq, i)
merged_files.append(mf)
return merged_files
| mit |
shirtsgroup/pygo | analysis/QRE_scripts/MBAR_foldingcurve.py | 1 | 6622 | # Ellen Zhong
# [email protected]
# 03/08/2014
import sys
import numpy
import pymbar # for MBAR analysis
import timeseries # for timeseries analysis
import os
import os.path
import pdb
import wham
from optparse import OptionParser
def parse_args():
parser=OptionParser()
parser.add_option("-r", "--replicas", default=24, type="int",dest="replicas", help="number of replicas (default: 24)")
parser.add_option("-n", "--N_max", default=100000, type="int",dest="N_max", help="number of data points to read in (default: 100k)")
parser.add_option("-s", "--skip", default=1, type="int",dest="skip", help="skip every n data points")
parser.add_option("--direc", dest="direc", help="Qtraj_singleprot.txt file location")
parser.add_option('-t', "--tfile", dest="tfile", default="/home/edz3fz/proteinmontecarlo/T32.txt", help="file of temperatures (default: T32.txt)")
parser.add_option('-Q', "--Qfile", dest="Qfile", default="/home/edz3fz/proteinmontecarlo/Q32.txt", help="file of Qpins (default: Q32.txt)")
parser.add_option("--k_Qpin", type="float", default=10, help="Q umbrella spring constant (default: 10)")
parser.add_option('--show', action="store_true", default=False, help="show plot at end")
(args,_) = parser.parse_args()
return args
def read_data(args, T, Q, K):
U_kn = numpy.empty([K,args.N_max/args.skip], numpy.float64)
Q_kn = numpy.empty([K,args.N_max/args.skip], numpy.float64)
print "Reading data..."
for i in range(len(T)):
suffix = '%i_%2.2f' % (int(T[i]), Q[i])
ufile = '%s/energy%s.npy' % (args.direc, suffix)
data = numpy.load(ufile)[-args.N_max::]
U_kn[i,:] = data[::args.skip]
Qfile = '%s/fractionnative%s.npy' %(args.direc, suffix)
data = numpy.load(Qfile)[-args.N_max::]
Q_kn[i,:] = data[::args.skip]
# if args.surf:
# sfile = '%s/surfenergy%i.npy' %(args.direc, t)
# data = numpy.load(sfile)[-args.N_max::]
# if numpy.shape(data) == (N_max,2):
# if data[:,0]==data[:,1]:
# data = data[:,0]
# else:
# data = numpy.sum(data,axis=1)
# U_kn[i,:] -= data[::args.skip]
N_max = args.N_max/args.skip
return U_kn, Q_kn, N_max
def subsample(U_kn,Q_kn,K,N_max):
assume_uncorrelated = False
if assume_uncorrelated:
print 'Assuming data is uncorrelated'
N_k = numpy.zeros(K, numpy.int32)
N_k[:] = N_max
else:
print 'Subsampling the data...'
N_k = numpy.zeros(K,numpy.int32)
g = numpy.zeros(K,numpy.float64)
for k in range(K): # subsample the energies
g[k] = timeseries.statisticalInefficiency(Q_kn[k])#,suppress_warning=True)
indices = numpy.array(timeseries.subsampleCorrelatedData(Q_kn[k],g=g[k])) # indices of uncorrelated samples
N_k[k] = len(indices) # number of uncorrelated samplesadsf
U_kn[k,0:N_k[k]] = U_kn[k,indices]
Q_kn[k,0:N_k[k]] = Q_kn[k,indices]
return U_kn, Q_kn, N_k
def get_ukln(args, N_max, K, Qpin, beta_k, k_Qpin, U_kn, Q_kn, N_k):
print 'Computing reduced potential energies...'
u_kln = numpy.zeros([K,K,N_max], numpy.float32)
for k in range(K):
for l in range(K):
u_kln[k,l,0:N_k[k]] = beta_k[l] * (U_kn[k,0:N_k[k]] - k_Qpin[k]*(Q_kn[k,0:N_k[k]]-Qpin[k])**2 + k_Qpin[l]*(Q_kn[k,0:N_k[k]]-Qpin[l])**2)
return u_kln
def get_mbar(beta_k, U_kn, N_k, u_kln):
print 'Initializing mbar...'
#f_k = wham.histogram_wham(beta_k, U_kn, N_k)
try:
f_k = numpy.loadtxt('f.k.out')
assert(len(f_k)==len(beta_k))
mbar = pymbar.MBAR(u_kln, N_k, initial_f_k = f_k, verbose=True)
except:
mbar = pymbar.MBAR(u_kln, N_k, verbose=True)
#mbar = pymbar.MBAR(u_kln, N_k, initial_f_k = f_k, verbose=True)
return mbar
def main():
args = parse_args()
kB = 0.00831447/4.184 #Boltzmann constant (Gas constant) in kJ/(mol*K)
dT = 2.5 # Temperature increment for calculating Cv(T)
T = numpy.loadtxt(args.tfile)
K = len(T)
Qpin = numpy.loadtxt(args.Qfile)
k_Qpin = args.k_Qpin*numpy.ones(K)
print 'Initial temperature states are', T
U_kn, Q_kn, N_max = read_data(args, T, Qpin, K)
U_kn, Q_kn, N_k = subsample(U_kn, Q_kn, K, N_max)
# Define new states without Q biasing
T_new = numpy.arange(250,325,5)
K_new = len(T_new)
# Update states
T = numpy.concatenate((T, T_new))
Qpin = numpy.concatenate((Qpin, numpy.zeros(K_new)))
k_Qpin = numpy.concatenate((k_Qpin, numpy.zeros(K_new)))
K += K_new
N_k = numpy.concatenate((N_k,numpy.zeros(K_new)))
U_kn = numpy.concatenate((U_kn,numpy.zeros([K_new,N_max])))
Q_kn = numpy.concatenate((Q_kn,numpy.zeros([K_new,N_max])))
beta_k = 1/(kB*T)
pdb.set_trace()
u_kln = get_ukln(args, N_max, K, Qpin, beta_k, k_Qpin, U_kn, Q_kn, N_k)
print "Initializing MBAR..."
# Use Adaptive Method (Both Newton-Raphson and Self-Consistent, testing which is better)
mbar = get_mbar(beta_k, U_kn, N_k, u_kln)
print "Computing Expectations for E..."
(E_expect, dE_expect) = mbar.computeExpectations(u_kln)*(beta_k)**(-1)
print "Computing Expectations for E^2..."
(E2_expect,dE2_expect) = mbar.computeExpectations(u_kln*u_kln)*(beta_k)**(-2)
print "Computing Expectations for Q..."
(Q,dQ) = mbar.computeExpectations(Q_kn)
print "Computing Heat Capacity as ( <E^2> - <E>^2 ) / ( R*T^2 )..."
Cv = numpy.zeros([K], numpy.float64)
dCv = numpy.zeros([K], numpy.float64)
for i in range(K):
Cv[i] = (E2_expect[i] - (E_expect[i]*E_expect[i])) / ( kB * T[i] * T[i])
dCv[i] = 2*dE_expect[i]**2 / (kB *T[i]*T[i]) # from propagation of error
numpy.save(args.direc+'/foldingcurve_umbrella',numpy.array([T, Q, dQ]))
numpy.save(args.direc+'/heatcap_umbrella',numpy.array([T, Cv, dCv]))
import matplotlib.pyplot as plt
#ncavg = numpy.average(Q_fromfile, axis=1)
plt.figure(1)
#plt.plot(T, ncavg, 'ko')
plt.plot(T[-K_new::],Q[-K_new::],'k')
plt.errorbar(T[-K_new::], Q[-K_new::], yerr=dQ[-K_new::])
plt.xlabel('Temperature (K)')
plt.ylabel('Q fraction native contacts')
#plt.title('Heat Capacity from Go like model MC simulation of 1BSQ')
plt.savefig(args.direc+'/foldingcurve.png')
numpy.save(args.direc+'/foldingcurve',numpy.array([T, Q, dQ]))
numpy.save(args.direc+'/heatcap',numpy.array([T, Cv, dCv]))
if args.show:
plt.show()
if __name__ == '__main__':
main()
| gpl-2.0 |
sinhrks/expandas | pandas_ml/skaccessors/test/test_multioutput.py | 2 | 1807 | #!/usr/bin/env python
try:
import sklearn.multioutput as multioutput
except ImportError:
pass
import numpy as np
import pandas as pd
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestMultiOutput(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.multioutput.MultiOutputRegressor,
multioutput.MultiOutputRegressor)
self.assertIs(df.multioutput.MultiOutputClassifier,
multioutput.MultiOutputClassifier)
def test_multioutput(self):
# http://scikit-learn.org/stable/auto_examples/ensemble/plot_random_forest_regression_multioutput.html#sphx-glr-auto-examples-ensemble-plot-random-forest-regression-multioutput-py
from sklearn.multioutput import MultiOutputRegressor
from sklearn.ensemble import RandomForestRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
df = pdml.ModelFrame(X, target=y)
max_depth = 30
rf1 = df.ensemble.RandomForestRegressor(max_depth=max_depth,
random_state=self.random_state)
reg1 = df.multioutput.MultiOutputRegressor(rf1)
rf2 = RandomForestRegressor(max_depth=max_depth,
random_state=self.random_state)
reg2 = MultiOutputRegressor(rf2)
df.fit(reg1)
reg2.fit(X, y)
result = df.predict(reg2)
expected = pd.DataFrame(reg2.predict(X))
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_cont/padova_cont_5/Rest.py | 33 | 7215 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [3,4,15,22,37,53,54,55,57,62,77,88,89,90,92,93]
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Rest of the Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Rest.pdf')
plt.clf()
| gpl-2.0 |
hainm/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
msultan/msmbuilder | msmbuilder/cluster/agglomerative.py | 6 | 11834 | # Author: Robert McGibbon <[email protected]>
# Contributors: Brooke Husic <[email protected]>
# Copyright (c) 2017, Stanford University
# All rights reserved.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, print_function, division
import numpy as np
import six
import scipy.spatial.distance
import warnings
from msmbuilder import libdistance
from scipy.cluster.hierarchy import fcluster
from sklearn.utils import check_random_state
from sklearn.base import ClusterMixin, TransformerMixin
from . import MultiSequenceClusterMixin
from ..base import BaseEstimator
from fastcluster import linkage
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
__all__ = ['_LandmarkAgglomerative']
def ward_pooling_function(x, cluster_cardinality, intra_cluster_sum):
normalization_factor = cluster_cardinality*(cluster_cardinality+1)/2
squared_sums = (x**2).sum(axis=1)
result_vector = ((cluster_cardinality * squared_sums -
intra_cluster_sum) / normalization_factor)
return result_vector
POOLING_FUNCTIONS = {
'average': lambda x, ignore1, ignore2: np.mean(x, axis=1),
'complete': lambda x, ignore1, ignore2: np.max(x, axis=1),
'single': lambda x, ignore1, ignore2: np.min(x, axis=1),
'ward': ward_pooling_function,
}
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def pdist(X, metric='euclidean'):
if isinstance(metric, six.string_types):
return libdistance.pdist(X, metric)
n = len(X)
d = np.empty((n, n))
for i in range(n):
d[i, :] = metric(X, X, i)
return scipy.spatial.distance.squareform(d, checks=False)
def cdist(XA, XB, metric='euclidean'):
if isinstance(metric, six.string_types):
return libdistance.cdist(XA, XB, metric)
nA, nB = len(XA), len(XB)
d = np.empty((nA, nB))
for i in range(nA):
d[i, :] = metric(XB, XA, i)
return d
#-----------------------------------------------------------------------------
# Main Code
#-----------------------------------------------------------------------------
class _LandmarkAgglomerative(ClusterMixin, TransformerMixin):
"""Landmark-based agglomerative hierarchical clustering
Landmark-based agglomerative clustering is a simple scalable version of
"standard" hierarchical clustering which doesn't require computing the full
matrix of pairwise distances between all data points. The idea is
basically to subsample only ``n_landmarks`` "landmark"
data points, cluster them, and then assign labels to the remaining data
points based on their distances to (and the labels of) the landmarks.
Parameters
----------
n_clusters : int
The number of clusters to find.
n_landmarks : int, optional
Memory-saving approximation. Instead of actually clustering every
point, we instead select n_landmark points either randomly or by
striding the data matrix (see ``landmark_strategy``). Then we cluster
the only the landmarks, and then assign the remaining dataset based
on distances to the landmarks. Note that n_landmarks=None is equivalent
to using every point in the dataset as a landmark.
linkage : {'single', 'complete', 'average', 'ward'}, default='average'
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of observation. The algorithm will merge
the pairs of cluster that minimize this criterion.
- average uses the average of the distances of each observation of
the two sets.
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
- single uses the minimum distance between all observations of the
two sets.
- ward linkage minimizes the within-cluster variance
The linkage also effects the predict() method and the use of landmarks.
After computing the distance from each new data point to the landmarks,
the new data point will be assigned to the cluster that minimizes the
linkage function between the new data point and each of the landmarks.
(i.e with ``single``, new data points will be assigned the label of
the closest landmark, with ``average``, it will be assigned the label
of the landmark s.t. the mean distance from the test point to all the
landmarks with that label is minimized, etc.)
metric : string or callable, default= "euclidean"
Metric used to compute the distance between samples.
landmark_strategy : {'stride', 'random'}, default='stride'
Method for determining landmark points. Only matters when n_landmarks
is not None. "stride" takes landmarks every n-th data point in X, and
random selects them uniformly at random.
random_state : integer or numpy.RandomState, optional
The generator used to select random landmarks. Only used if
landmark_strategy=='random'. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
max_landmarks : int, optional, default=None
Useful for hyperparameter searching. If n_clusters exceeds n_landmarks,
max_landmarks will be used. Otherwise, n_landmarks will be used. If
None, no cutoff is enforced on n_landmarks, which may result in memory
issues.
ward_predictor : {'single', 'complete', 'average', 'ward'}, default='ward'
Which criterion to use when predicting cluster assignments after
fitting with ward linkage.
References
----------
.. [1] Mullner, D. "Modern hierarchical, agglomerative clustering
algorithms." arXiv:1109.2378 (2011).
Attributes
----------
landmark_labels_ : np.array, [n_landmarks]
landmarks_ : np.array, [n_landmarks, X.shape]
cluster_centers_ : np.array, [n_clusters, X.shape]
Coordinates of cluster centers (unless RMSD is the metric)
"""
def __init__(self, n_clusters, n_landmarks=None, linkage='average',
metric='euclidean', landmark_strategy='stride',
random_state=None, max_landmarks=None, ward_predictor='ward'):
self.n_clusters = n_clusters
self.n_landmarks = n_landmarks
self.metric = metric
self.landmark_strategy = landmark_strategy
self.random_state = random_state
self.linkage = linkage
self.max_landmarks = max_landmarks
self.ward_predictor = ward_predictor
self.landmark_labels_ = None
self.landmarks_ = None
self.cluster_centers_ = None
def fit(self, X, y=None):
"""
Compute agglomerative clustering.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Returns
-------
self
"""
if self.max_landmarks is not None:
if self.n_clusters > self.n_landmarks:
self.n_landmarks = self.max_landmarks
if self.n_landmarks is None:
distances = pdist(X, self.metric)
tree = linkage(distances, method=self.linkage)
self.landmark_labels_ = fcluster(tree, criterion='maxclust',
t=self.n_clusters) - 1
self.cardinality_ = np.bincount(self.landmark_labels_)
self.squared_distances_within_cluster_ = np.zeros(self.n_clusters)
n = len(X)
for k in range(len(distances)):
i = int(n - 2 - np.floor(np.sqrt(-8*k + 4*n*(n-1)-7)/2.0 - 0.5))
j = int(k + i + 1 - n*(n-1)/2 + (n-i)*((n-i)-1)/2)
if self.landmark_labels_[i] == self.landmark_labels_[j]:
self.squared_distances_within_cluster_[
self.landmark_labels_[i]] += distances[k] ** 2
self.landmarks_ = X
else:
if self.landmark_strategy == 'random':
land_indices = check_random_state(self.random_state).randint(
len(X), size=self.n_landmarks)
else:
land_indices = np.arange(len(X))[::(len(X) //
self.n_landmarks)][:self.n_landmarks]
distances = pdist(X[land_indices], self.metric)
tree = linkage(distances, method=self.linkage)
self.landmark_labels_ = fcluster(tree, criterion='maxclust',
t=self.n_clusters) - 1
self.cardinality_ = np.bincount(self.landmark_labels_)
self.squared_distances_within_cluster_ = np.zeros(self.n_clusters)
n = len(X[land_indices])
for k in range(len(distances)):
i = int(n - 2 - np.floor(np.sqrt(-8*k + 4*n*(n-1)-7)/2.0 - 0.5))
j = int(k + i + 1 - n*(n-1)/2 + (n-i)*((n-i)-1)/2)
if self.landmark_labels_[i] == self.landmark_labels_[j]:
self.squared_distances_within_cluster_[
self.landmark_labels_[i]] += distances[k] ** 2
self.landmarks_ = X[land_indices]
if self.metric != 'rmsd':
cluster_centers_ = []
for i in range(self.n_clusters):
temp = list(np.mean(self.landmarks_[self.landmark_labels_==i], axis=0))
cluster_centers_.append(temp)
self.cluster_centers_ = np.array(cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
dists = cdist(X, self.landmarks_, self.metric)
pfunc_name = self.ward_predictor if self.linkage == 'ward' else self.linkage
try:
pooling_func = POOLING_FUNCTIONS[pfunc_name]
except KeyError:
raise ValueError("linkage {} is not supported".format(pfunc_name))
pooled_distances = np.empty(len(X))
pooled_distances.fill(np.infty)
labels = np.zeros(len(X), dtype=int)
for i in range(self.n_clusters):
if np.any(self.landmark_labels_ == i):
d = pooling_func(dists[:, self.landmark_labels_ == i],
self.cardinality_[i],
self.squared_distances_within_cluster_[i])
if np.any(d < 0):
warnings.warn("Distance shouldn't be negative.")
mask = (d < pooled_distances)
pooled_distances[mask] = d[mask]
labels[mask] = i
else:
print("No data points were assigned to cluster {}".format(i))
return labels
def fit_predict(self, X):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
self.fit(X)
return self.predict(X)
class LandmarkAgglomerative(MultiSequenceClusterMixin, _LandmarkAgglomerative,
BaseEstimator):
__doc__ = _LandmarkAgglomerative.__doc__
_allow_trajectory = True
| lgpl-2.1 |
mattilyra/scikit-learn | examples/datasets/plot_iris_dataset.py | 35 | 1929 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
cython-testbed/pandas | pandas/core/config_init.py | 8 | 17165 | """
This module is imported from the pandas package __init__.py file
in order to ensure that the core.config options registered here will
be available as soon as the user loads the package. if register_option
is invoked inside specific modules, they will not be registered until that
module is imported, which may or may not be a problem.
If you need to make sure options are available even before a certain
module is imported, register them here rather then in the module.
"""
import pandas.core.config as cf
from pandas.core.config import (is_int, is_bool, is_text, is_instance_factory,
is_one_of_factory, is_callable)
from pandas.io.formats.console import detect_console_encoding
from pandas.io.formats.terminal import is_terminal
# compute
use_bottleneck_doc = """
: bool
Use the bottleneck library to accelerate if it is installed,
the default is True
Valid values: False,True
"""
def use_bottleneck_cb(key):
from pandas.core import nanops
nanops.set_use_bottleneck(cf.get_option(key))
use_numexpr_doc = """
: bool
Use the numexpr library to accelerate computation if it is installed,
the default is True
Valid values: False,True
"""
def use_numexpr_cb(key):
from pandas.core.computation import expressions
expressions.set_use_numexpr(cf.get_option(key))
with cf.config_prefix('compute'):
cf.register_option('use_bottleneck', True, use_bottleneck_doc,
validator=is_bool, cb=use_bottleneck_cb)
cf.register_option('use_numexpr', True, use_numexpr_doc,
validator=is_bool, cb=use_numexpr_cb)
#
# options from the "display" namespace
pc_precision_doc = """
: int
Floating point output precision (number of significant digits). This is
only a suggestion
"""
pc_colspace_doc = """
: int
Default space for DataFrame columns.
"""
pc_max_rows_doc = """
: int
If max_rows is exceeded, switch to truncate view. Depending on
`large_repr`, objects are either centrally truncated or printed as
a summary view. 'None' value means unlimited.
In case python/IPython is running in a terminal and `large_repr`
equals 'truncate' this can be set to 0 and pandas will auto-detect
the height of the terminal and print a truncated object which fits
the screen height. The IPython notebook, IPython qtconsole, or
IDLE do not run in a terminal and hence it is not possible to do
correct auto-detection.
"""
pc_max_cols_doc = """
: int
If max_cols is exceeded, switch to truncate view. Depending on
`large_repr`, objects are either centrally truncated or printed as
a summary view. 'None' value means unlimited.
In case python/IPython is running in a terminal and `large_repr`
equals 'truncate' this can be set to 0 and pandas will auto-detect
the width of the terminal and print a truncated object which fits
the screen width. The IPython notebook, IPython qtconsole, or IDLE
do not run in a terminal and hence it is not possible to do
correct auto-detection.
"""
pc_max_categories_doc = """
: int
This sets the maximum number of categories pandas should output when
printing out a `Categorical` or a Series of dtype "category".
"""
pc_max_info_cols_doc = """
: int
max_info_columns is used in DataFrame.info method to decide if
per column information will be printed.
"""
pc_nb_repr_h_doc = """
: boolean
When True, IPython notebook will use html representation for
pandas objects (if it is available).
"""
pc_date_dayfirst_doc = """
: boolean
When True, prints and parses dates with the day first, eg 20/01/2005
"""
pc_date_yearfirst_doc = """
: boolean
When True, prints and parses dates with the year first, eg 2005/01/20
"""
pc_pprint_nest_depth = """
: int
Controls the number of nested levels to process when pretty-printing
"""
pc_multi_sparse_doc = """
: boolean
"sparsify" MultiIndex display (don't display repeated
elements in outer levels within groups)
"""
pc_encoding_doc = """
: str/unicode
Defaults to the detected encoding of the console.
Specifies the encoding to be used for strings returned by to_string,
these are generally strings meant to be displayed on the console.
"""
float_format_doc = """
: callable
The callable should accept a floating point number and return
a string with the desired format of the number. This is used
in some places like SeriesFormatter.
See formats.format.EngFormatter for an example.
"""
max_colwidth_doc = """
: int
The maximum width in characters of a column in the repr of
a pandas data structure. When the column overflows, a "..."
placeholder is embedded in the output.
"""
colheader_justify_doc = """
: 'left'/'right'
Controls the justification of column headers. used by DataFrameFormatter.
"""
pc_expand_repr_doc = """
: boolean
Whether to print out the full DataFrame repr for wide DataFrames across
multiple lines, `max_columns` is still respected, but the output will
wrap-around across multiple "pages" if its width exceeds `display.width`.
"""
pc_show_dimensions_doc = """
: boolean or 'truncate'
Whether to print out dimensions at the end of DataFrame repr.
If 'truncate' is specified, only print out the dimensions if the
frame is truncated (e.g. not display all rows and/or columns)
"""
pc_east_asian_width_doc = """
: boolean
Whether to use the Unicode East Asian Width to calculate the display text
width.
Enabling this may affect to the performance (default: False)
"""
pc_ambiguous_as_wide_doc = """
: boolean
Whether to handle Unicode characters belong to Ambiguous as Wide (width=2)
(default: False)
"""
pc_latex_repr_doc = """
: boolean
Whether to produce a latex DataFrame representation for jupyter
environments that support it.
(default: False)
"""
pc_table_schema_doc = """
: boolean
Whether to publish a Table Schema representation for frontends
that support it.
(default: False)
"""
pc_html_border_doc = """
: int
A ``border=value`` attribute is inserted in the ``<table>`` tag
for the DataFrame HTML repr.
"""
pc_html_border_deprecation_warning = """\
html.border has been deprecated, use display.html.border instead
(currently both are identical)
"""
pc_html_use_mathjax_doc = """\
: boolean
When True, Jupyter notebook will process table contents using MathJax,
rendering mathematical expressions enclosed by the dollar symbol.
(default: True)
"""
pc_width_doc = """
: int
Width of the display in characters. In case python/IPython is running in
a terminal this can be set to None and pandas will correctly auto-detect
the width.
Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a
terminal and hence it is not possible to correctly detect the width.
"""
pc_chop_threshold_doc = """
: float or None
if set to a float value, all float values smaller then the given threshold
will be displayed as exactly 0 by repr and friends.
"""
pc_max_seq_items = """
: int or None
when pretty-printing a long sequence, no more then `max_seq_items`
will be printed. If items are omitted, they will be denoted by the
addition of "..." to the resulting string.
If set to None, the number of items to be printed is unlimited.
"""
pc_max_info_rows_doc = """
: int or None
df.info() will usually show null-counts for each column.
For large frames this can be quite slow. max_info_rows and max_info_cols
limit this null check only to frames with smaller dimensions than
specified.
"""
pc_large_repr_doc = """
: 'truncate'/'info'
For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can
show a truncated table (the default from 0.13), or switch to the view from
df.info() (the behaviour in earlier versions of pandas).
"""
pc_memory_usage_doc = """
: bool, string or None
This specifies if the memory usage of a DataFrame should be displayed when
df.info() is called. Valid values True,False,'deep'
"""
pc_latex_escape = """
: bool
This specifies if the to_latex method of a Dataframe uses escapes special
characters.
Valid values: False,True
"""
pc_latex_longtable = """
:bool
This specifies if the to_latex method of a Dataframe uses the longtable
format.
Valid values: False,True
"""
pc_latex_multicolumn = """
: bool
This specifies if the to_latex method of a Dataframe uses multicolumns
to pretty-print MultiIndex columns.
Valid values: False,True
"""
pc_latex_multicolumn_format = """
: string
This specifies the format for multicolumn headers.
Can be surrounded with '|'.
Valid values: 'l', 'c', 'r', 'p{<width>}'
"""
pc_latex_multirow = """
: bool
This specifies if the to_latex method of a Dataframe uses multirows
to pretty-print MultiIndex rows.
Valid values: False,True
"""
style_backup = dict()
def table_schema_cb(key):
from pandas.io.formats.printing import _enable_data_resource_formatter
_enable_data_resource_formatter(cf.get_option(key))
with cf.config_prefix('display'):
cf.register_option('precision', 6, pc_precision_doc, validator=is_int)
cf.register_option('float_format', None, float_format_doc,
validator=is_one_of_factory([None, is_callable]))
cf.register_option('column_space', 12, validator=is_int)
cf.register_option('max_info_rows', 1690785, pc_max_info_rows_doc,
validator=is_instance_factory((int, type(None))))
cf.register_option('max_rows', 60, pc_max_rows_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('max_categories', 8, pc_max_categories_doc,
validator=is_int)
cf.register_option('max_colwidth', 50, max_colwidth_doc, validator=is_int)
if is_terminal():
max_cols = 0 # automatically determine optimal number of columns
else:
max_cols = 20 # cannot determine optimal number of columns
cf.register_option('max_columns', max_cols, pc_max_cols_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('large_repr', 'truncate', pc_large_repr_doc,
validator=is_one_of_factory(['truncate', 'info']))
cf.register_option('max_info_columns', 100, pc_max_info_cols_doc,
validator=is_int)
cf.register_option('colheader_justify', 'right', colheader_justify_doc,
validator=is_text)
cf.register_option('notebook_repr_html', True, pc_nb_repr_h_doc,
validator=is_bool)
cf.register_option('date_dayfirst', False, pc_date_dayfirst_doc,
validator=is_bool)
cf.register_option('date_yearfirst', False, pc_date_yearfirst_doc,
validator=is_bool)
cf.register_option('pprint_nest_depth', 3, pc_pprint_nest_depth,
validator=is_int)
cf.register_option('multi_sparse', True, pc_multi_sparse_doc,
validator=is_bool)
cf.register_option('encoding', detect_console_encoding(), pc_encoding_doc,
validator=is_text)
cf.register_option('expand_frame_repr', True, pc_expand_repr_doc)
cf.register_option('show_dimensions', 'truncate', pc_show_dimensions_doc,
validator=is_one_of_factory([True, False, 'truncate']))
cf.register_option('chop_threshold', None, pc_chop_threshold_doc)
cf.register_option('max_seq_items', 100, pc_max_seq_items)
cf.register_option('width', 80, pc_width_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('memory_usage', True, pc_memory_usage_doc,
validator=is_one_of_factory([None, True,
False, 'deep']))
cf.register_option('unicode.east_asian_width', False,
pc_east_asian_width_doc, validator=is_bool)
cf.register_option('unicode.ambiguous_as_wide', False,
pc_east_asian_width_doc, validator=is_bool)
cf.register_option('latex.repr', False,
pc_latex_repr_doc, validator=is_bool)
cf.register_option('latex.escape', True, pc_latex_escape,
validator=is_bool)
cf.register_option('latex.longtable', False, pc_latex_longtable,
validator=is_bool)
cf.register_option('latex.multicolumn', True, pc_latex_multicolumn,
validator=is_bool)
cf.register_option('latex.multicolumn_format', 'l', pc_latex_multicolumn,
validator=is_text)
cf.register_option('latex.multirow', False, pc_latex_multirow,
validator=is_bool)
cf.register_option('html.table_schema', False, pc_table_schema_doc,
validator=is_bool, cb=table_schema_cb)
cf.register_option('html.border', 1, pc_html_border_doc,
validator=is_int)
cf.register_option('html.use_mathjax', True, pc_html_use_mathjax_doc,
validator=is_bool)
with cf.config_prefix('html'):
cf.register_option('border', 1, pc_html_border_doc,
validator=is_int)
cf.deprecate_option('html.border', msg=pc_html_border_deprecation_warning,
rkey='display.html.border')
tc_sim_interactive_doc = """
: boolean
Whether to simulate interactive mode for purposes of testing
"""
with cf.config_prefix('mode'):
cf.register_option('sim_interactive', False, tc_sim_interactive_doc)
use_inf_as_null_doc = """
: boolean
use_inf_as_null had been deprecated and will be removed in a future
version. Use `use_inf_as_na` instead.
"""
use_inf_as_na_doc = """
: boolean
True means treat None, NaN, INF, -INF as NA (old way),
False means None and NaN are null, but INF, -INF are not NA
(new way).
"""
# We don't want to start importing everything at the global context level
# or we'll hit circular deps.
def use_inf_as_na_cb(key):
from pandas.core.dtypes.missing import _use_inf_as_na
_use_inf_as_na(key)
with cf.config_prefix('mode'):
cf.register_option('use_inf_as_na', False, use_inf_as_na_doc,
cb=use_inf_as_na_cb)
cf.register_option('use_inf_as_null', False, use_inf_as_null_doc,
cb=use_inf_as_na_cb)
cf.deprecate_option('mode.use_inf_as_null', msg=use_inf_as_null_doc,
rkey='mode.use_inf_as_na')
# user warnings
chained_assignment = """
: string
Raise an exception, warn, or no action if trying to use chained assignment,
The default is warn
"""
with cf.config_prefix('mode'):
cf.register_option('chained_assignment', 'warn', chained_assignment,
validator=is_one_of_factory([None, 'warn', 'raise']))
# Set up the io.excel specific configuration.
writer_engine_doc = """
: string
The default Excel writer engine for '{ext}' files. Available options:
auto, {others}.
"""
_xls_options = ['xlwt']
_xlsm_options = ['openpyxl']
_xlsx_options = ['openpyxl', 'xlsxwriter']
with cf.config_prefix("io.excel.xls"):
cf.register_option("writer", "auto",
writer_engine_doc.format(
ext='xls',
others=', '.join(_xls_options)),
validator=str)
with cf.config_prefix("io.excel.xlsm"):
cf.register_option("writer", "auto",
writer_engine_doc.format(
ext='xlsm',
others=', '.join(_xlsm_options)),
validator=str)
with cf.config_prefix("io.excel.xlsx"):
cf.register_option("writer", "auto",
writer_engine_doc.format(
ext='xlsx',
others=', '.join(_xlsx_options)),
validator=str)
# Set up the io.parquet specific configuration.
parquet_engine_doc = """
: string
The default parquet reader/writer engine. Available options:
'auto', 'pyarrow', 'fastparquet', the default is 'auto'
"""
with cf.config_prefix('io.parquet'):
cf.register_option(
'engine', 'auto', parquet_engine_doc,
validator=is_one_of_factory(['auto', 'pyarrow', 'fastparquet']))
# --------
# Plotting
# ---------
register_converter_doc = """
: bool
Whether to register converters with matplotlib's units registry for
dates, times, datetimes, and Periods. Toggling to False will remove
the converters, restoring any converters that pandas overwrote.
"""
def register_converter_cb(key):
from pandas.plotting import register_matplotlib_converters
from pandas.plotting import deregister_matplotlib_converters
if cf.get_option(key):
register_matplotlib_converters()
else:
deregister_matplotlib_converters()
with cf.config_prefix("plotting.matplotlib"):
cf.register_option("register_converters", True, register_converter_doc,
validator=bool, cb=register_converter_cb)
| bsd-3-clause |
rl-institut/reegis-hp | reegis_hp/de21/test.py | 3 | 1906 | import pandas as pd
from matplotlib import pyplot as plt
import logging
from oemof.tools import logger
logger.define_logging()
exit(0)
df = pd.read_csv('/home/uwe/geo.csv', index_col='zip_code')
del df['Unnamed: 0']
del df['gid']
df.to_csv('/home/uwe/git_local/reegis-hp/reegis_hp/de21/geometries/postcode.csv')
exit(0)
df = pd.read_csv('solar_cap.csv', index_col=[0, 1, 2, 3])
# df = df.sortlevel()
# df = df.reindex_axis(sorted(df.columns), axis=1)
df.index = df.index.droplevel(0)
my = df.groupby(level=[0]).sum()
# df_all = pd.Series(df.sum(axis=1), index=df.index)
# my = df_all.unstack(level=0)
# my = my.sortlevel()
my.plot(stacked=True, kind='area')
# plt.show()
# df.loc['Solar'].plot(stacked=True, kind='area')
# df.loc['Solar'].plot()
# plt.show()
df = pd.read_csv('test_cap.csv', index_col=[0, 1]).fillna(0)
df = df.sortlevel()
df = df.reindex_axis(sorted(df.columns), axis=1)
print(df)
df_all = pd.Series(df.sum(axis=1), index=df.index)
my = df_all.unstack(level=0)
my = my.sortlevel()
my.plot(stacked=True, kind='area')
# plt.show()
df.loc['Solar'].plot(stacked=True, kind='area')
df.loc['Solar'].plot()
plt.show()
exit(0)
seq_file = 'my_scenarios/reegis_de_21_test_neu_seq.csv'
# seq_neu = 'scenarios/reegis_de_21_test_neu_neu_seq.csv'
# para_file = 'scenarios/reegis_de_3_test.csv'
para_file = 'my_scenarios/EK_test3_neu2.csv'
seq_neu = 'my_scenarios/EK_test3_neu2_seq.csv'
df_seq = pd.read_csv(seq_neu, header=[0, 1, 2, 3, 4],
parse_dates=True, index_col=0)
# tmp_csv.to_csv(seq_neu)
# print(tmp_csv.index)
df = pd.read_csv(para_file, index_col=[0, 1, 2])
mask = df['actual_value'].str.contains('seq').fillna(False)
a = df[mask].index.tolist()
print(a[0])
# print(pd.Series([1, 2, 4]))
# # df.loc[[a[0]], 'actual_value'] = pd.Series([1, 2, 4])
# print(df['actual_value'].loc[[a[0]]])
# s = df.to_dict()
# print(s['actual_value'][a[0]])
print(df_seq[a[0]])
| gpl-3.0 |
yvesalexandre/privacy-tools | within_voronoi_translation/within_voronoi_translation.py | 1 | 7700 | #!/usr/bin/env python
"""
within_voronoi_translation.py: Move antennas uniformly within their voronoi cell.
Noise is often added to the GPS coordinates of antennas to hinter's an attacker
ability to link outside information to the released database. This code takes as
input a list of antennas location and moves them uniformly within their voronoi
cell and either the convex hull formed by the antennas or the polygon. The noise
added is proportional to the density of antennas in the region while preserving
the overall structure of the mesh.
Use:
> import within_voronoi_translation as wvt
> wvt.generate_new_positions([(0.367, 0.491), (0.415, 0.289), (0.495, 0.851),...])
Test:
$ python within_voronoi_translation.py
or
$ python within_voronoi_translation.py senegal
Algorithm:
Points are then draw at random in the square bounding the circle whose diameter
is equal to the maximum of the distance between the centroid its voronoi vertices
or the half-min distance with its neighbors for border points.
Points are rejected until they fall in the voronoi cell and either inside the
convex hull or the polygon.
Author: Yves-Alexandre de Montjoye
https://github.com/yvesalexandre/privacy-tools
"""
import scipy.spatial
import random
import numpy as np
def __compute_distance(a, pos, positions):
"""
Return the distance between an antenna a and a point pos (tuple)
"""
x1 = positions[a][0]
y1 = positions[a][1]
x2, y2 = pos[0], pos[1]
return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
def __compute_distance_centroids(a, b, positions):
"""
Return the distance between two antennas a and b
"""
return __compute_distance(a, positions[b], positions)
def __compute_border_points(positions):
"""
Return the set of points for which at least one of their voronoi vertice falls
outside of the convex hull.
"""
voronoi = scipy.spatial.Voronoi(positions)
vertices_outside = set([-1])
for i, vertice in enumerate(voronoi.vertices):
if not __in_convexhull(vertice, initial_positions):
vertices_outside.add(i)
points_outside = set()
for region_id, region in enumerate(voronoi.regions):
if any(point in vertices_outside for point in region):
points_outside.add(list(voronoi.point_region).index(region_id))
return points_outside
def __compute_max_radius(positions, neighbors):
"""
Return a list of the maximum distances between an antenna and its voronoi
vertices.
Note: the half-min distance to its neighbors for border points
"""
voronoi = scipy.spatial.Voronoi(positions)
border_points = __compute_border_points(positions)
radiuses = []
for point, region in enumerate(voronoi.point_region):
if point not in border_points:
radiuses.append(max([__compute_distance(point, voronoi.vertices[pos], positions) for pos in voronoi.regions[region]]))
else:
radiuses.append(min([__compute_distance_centroids(point,i,positions) for i in neighbors[point]]) / 2)
return radiuses
def __compute_neighbors(positions):
"""
Return a list of the neighbors of every antenna.
"""
delaunay = scipy.spatial.Delaunay(positions)
slices, delaunay_neighbors = delaunay.vertex_neighbor_vertices
neighbors = []
for node, pos in enumerate(positions):
neighbors.append(list(delaunay_neighbors[slices[node]:slices[node + 1]]))
return neighbors
def __in_convexhull(point, initial_positions):
"""
Return True if the point is inside the convex hull.
"""
if set(scipy.spatial.ConvexHull(initial_positions + [point]).vertices) - set(scipy.spatial.ConvexHull(initial_positions).vertices):
return False
else:
return True
def __draw_point(node, positions, neighbors, radiuses, polygon):
"""
Return the new position of the antenna.
"""
condition = True
while condition:
trans_x, trans_y = [(random.random() - .5) * radiuses[node] for i in range(2)]
proposed_point = (positions[node][0] - trans_x, positions[node][1] - trans_y)
in_voronoi = __compute_distance(node, proposed_point, positions) < min([__compute_distance(i, proposed_point, positions) for i in neighbors[node]])
if in_voronoi:
if polygon:
if __in_polygon(proposed_point, polygon):
return proposed_point
else:
if __in_convexhull(proposed_point, positions):
return proposed_point
return proposed_point
def generate_new_positions(positions, polygon=None):
"""
Return the new position for all the antennas.
Shapefile:
polygon expects a lonlat polygon. Shapefiles can loaded in python using
shapefile and can be converted to lonlat format using pyproj.transform and
the appropriate projection (http://www.prj2epsg.org/search).
"""
neighbors = __compute_neighbors(positions)
radiuses = __compute_max_radius(positions, neighbors)
output = []
for point_id in range(len(positions)):
output.append(__draw_point(point_id, positions, neighbors, radiuses, polygon))
return output
def __in_polygon(point,poly):
"""
Return whether a point is in a polygon.
Ray-casting Algorithm
Adapted from http://geospatialpython.com/2011/08/point-in-polygon-2-on-line.html
"""
x, y = point
# check if point is a vertex
if (x,y) in poly:
return True
# check if point is on a boundary
for i in range(len(poly)):
p1 = None
p2 = None
if i == 0:
p1 = poly[0]
p2 = poly[1]
else:
p1 = poly[i - 1]
p2 = poly[i]
if p1[1] == p2[1] and p1[1] == y and x > min(p1[0], p2[0]) and x < max(p1[0], p2[0]):
return True
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n + 1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
if __name__ == '__main__':
import sys
import matplotlib.pyplot as plt
if (len(sys.argv) > 1) and (sys.argv[1] == 'senegal'):
main_type = False
else:
main_type = True
if main_type:
initial_positions = [(random.random(), random.random()) for i in range(350)]
new_positions = generate_new_positions(initial_positions)
else:
import pyproj
import shapefile
sf = shapefile.Reader('senegal_shapefile/senegal.shp')
region = sf.shapes()[0]
polygon = [pyproj.transform(pyproj.Proj(init='epsg:32628'),pyproj.Proj(proj='latlong'), pts[0], pts[1]) for pts in region.points]
initial_positions = []
while len(initial_positions) < 350:
point = [random.uniform(-18, -11), random.uniform(12,17)]
if __in_polygon(point,polygon):
initial_positions.append(point)
new_positions = generate_new_positions(initial_positions, polygon)
fig = plt.figure(figsize=(10,9))
scipy.spatial.voronoi_plot_2d(scipy.spatial.Voronoi(initial_positions), plt.gca())
for i, pos in enumerate(initial_positions):
plt.text(pos[0], pos[1], str(i))
for point in __compute_border_points(initial_positions):
initial_pos = initial_positions[point]
plt.plot(initial_pos[0], initial_pos[1], marker='o', color='g', ls='')
if main_type:
hull = scipy.spatial.ConvexHull(initial_positions)
for simplex in hull.simplices:
x, y = zip(*[initial_positions[simplex[0]], initial_positions[simplex[1]]])
plt.plot(x, y, 'b-')
else:
list_x, list_y = zip(*polygon)
plt.plot(list_x, list_y, 'b-')
for i, pos in enumerate(new_positions):
initial_pos = initial_positions[i]
plt.plot([initial_pos[0], pos[0]], [initial_pos[1], pos[1]], 'k-')
plt.plot(pos[0], pos[1], marker='o', color='r', ls='')
plt.show()
| mit |
toobaz/pandas | pandas/core/tools/timedeltas.py | 2 | 6506 | """
timedelta support tools
"""
import warnings
import numpy as np
from pandas._libs.tslibs import NaT
from pandas._libs.tslibs.timedeltas import Timedelta, parse_timedelta_unit
from pandas.util._decorators import deprecate_kwarg
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.arrays.timedeltas import sequence_to_td64ns
@deprecate_kwarg(old_arg_name="box", new_arg_name=None)
def to_timedelta(arg, unit="ns", box=True, errors="raise"):
"""
Convert argument to timedelta.
Timedeltas are absolute differences in times, expressed in difference
units (e.g. days, hours, minutes, seconds). This method converts
an argument from a recognized timedelta format / value into
a Timedelta type.
Parameters
----------
arg : str, timedelta, list-like or Series
The data to be converted to timedelta.
unit : str, default 'ns'
Denotes the unit of the arg. Possible values:
('Y', 'M', 'W', 'D', 'days', 'day', 'hours', hour', 'hr',
'h', 'm', 'minute', 'min', 'minutes', 'T', 'S', 'seconds',
'sec', 'second', 'ms', 'milliseconds', 'millisecond',
'milli', 'millis', 'L', 'us', 'microseconds', 'microsecond',
'micro', 'micros', 'U', 'ns', 'nanoseconds', 'nano', 'nanos',
'nanosecond', 'N').
box : bool, default True
- If True returns a Timedelta/TimedeltaIndex of the results.
- If False returns a numpy.timedelta64 or numpy.darray of
values of dtype timedelta64[ns].
.. deprecated:: 0.25.0
Use :meth:`Series.to_numpy` or :meth:`Timedelta.to_timedelta64`
instead to get an ndarray of values or numpy.timedelta64,
respectively.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaT.
- If 'ignore', then invalid parsing will return the input.
Returns
-------
timedelta64 or numpy.array of timedelta64
Output type returned if parsing succeeded.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
Examples
--------
Parsing a single string to a Timedelta:
>>> pd.to_timedelta('1 days 06:05:01.00003')
Timedelta('1 days 06:05:01.000030')
>>> pd.to_timedelta('15.5us')
Timedelta('0 days 00:00:00.000015')
Parsing a list or array of strings:
>>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan'])
TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015', NaT],
dtype='timedelta64[ns]', freq=None)
Converting numbers by specifying the `unit` keyword argument:
>>> pd.to_timedelta(np.arange(5), unit='s')
TimedeltaIndex(['00:00:00', '00:00:01', '00:00:02',
'00:00:03', '00:00:04'],
dtype='timedelta64[ns]', freq=None)
>>> pd.to_timedelta(np.arange(5), unit='d')
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
Returning an ndarray by using the 'box' keyword argument:
>>> pd.to_timedelta(np.arange(5), box=False)
array([0, 1, 2, 3, 4], dtype='timedelta64[ns]')
"""
unit = parse_timedelta_unit(unit)
if errors not in ("ignore", "raise", "coerce"):
raise ValueError("errors must be one of 'ignore', " "'raise', or 'coerce'}")
if unit in {"Y", "y", "M"}:
warnings.warn(
"M and Y units are deprecated and " "will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
if arg is None:
return arg
elif isinstance(arg, ABCSeries):
values = _convert_listlike(arg._values, unit=unit, box=False, errors=errors)
return arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, unit=unit, box=box, errors=errors, name=arg.name)
elif isinstance(arg, np.ndarray) and arg.ndim == 0:
# extract array scalar and process below
arg = arg.item()
elif is_list_like(arg) and getattr(arg, "ndim", 1) == 1:
return _convert_listlike(arg, unit=unit, box=box, errors=errors)
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
"arg must be a string, timedelta, list, tuple, " "1-d array, or Series"
)
# ...so it must be a scalar value. Return scalar.
return _coerce_scalar_to_timedelta_type(arg, unit=unit, box=box, errors=errors)
def _coerce_scalar_to_timedelta_type(r, unit="ns", box=True, errors="raise"):
"""Convert string 'r' to a timedelta object."""
try:
result = Timedelta(r, unit)
if not box:
# explicitly view as timedelta64 for case when result is pd.NaT
result = result.asm8.view("timedelta64[ns]")
except ValueError:
if errors == "raise":
raise
elif errors == "ignore":
return r
# coerce
result = NaT
return result
def _convert_listlike(arg, unit="ns", box=True, errors="raise", name=None):
"""Convert a list of objects to a timedelta index object."""
if isinstance(arg, (list, tuple)) or not hasattr(arg, "dtype"):
# This is needed only to ensure that in the case where we end up
# returning arg (errors == "ignore"), and where the input is a
# generator, we return a useful list-like instead of a
# used-up generator
arg = np.array(list(arg), dtype=object)
try:
value = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0]
except ValueError:
if errors == "ignore":
return arg
else:
# This else-block accounts for the cases when errors='raise'
# and errors='coerce'. If errors == 'raise', these errors
# should be raised. If errors == 'coerce', we shouldn't
# expect any errors to be raised, since all parsing errors
# cause coercion to pd.NaT. However, if an error / bug is
# introduced that causes an Exception to be raised, we would
# like to surface it.
raise
if box:
from pandas import TimedeltaIndex
value = TimedeltaIndex(value, unit="ns", name=name)
return value
| bsd-3-clause |
ManuelMBaumann/opt_tau | num_exper/mekrylov.py | 1 | 10316 | import scipy.sparse as sparse
import matplotlib.pyplot as plt
#import scipy.io as io
import numpy as np
import scipy.sparse.linalg as spla
import pyamg
from math import sqrt, atan, cos, sin, pi, atan2
from numpy.linalg import norm
#from scipy.io import mmwrite
from nutils import *
from numpy.linalg import solve
from scipy.linalg.blas import get_blas_funcs
from plot_misc import *
import time
import cmath
class convergenceHistory:
def __init__(self, plot_resnrm=False):
self.resvec = []
self.plot_resnrm = plot_resnrm
def callback(self, _rnrm_):
self.resvec.append(_rnrm_)
if self.plot_resnrm:
print(str(len(self.resvec))+' - '+str(_rnrm_))
class __NoPrecond__(object):
def solve(self,_X_): return _X_
def megmres(A, B, m=1000, X0=None, tol=1e-8, maxit=None, M1=None, callback=None, plot_ritz=False):
size = B.shape
if maxit is None:
maxit = 2*np.prod(size)
if M1 is None:
# No preconditioner class
class __NoPrecond__(object):
def solve(self,_X_): return _X_
M1 = __NoPrecond__()
if X0 is None:
X0 = np.zeros(size, dtype = complex)
X = np.array(X0)
bnrm = norm(B)
info = 1
# Check for zero rhs:
if bnrm == 0.0:
# Solution is null-vector
info = 0
return np.zeros(size), info
# Compute initial residual:
R = B - A.dot(X)
rnrm = norm(R)
# Relative tolerance
tolb = tol*bnrm
if callback is not None:
callback(rnrm)
if rnrm < tolb:
# Initial guess is a good enough solution
info = 0
return X, info
# Initialization
rotmat = get_blas_funcs('rotg', dtype=np.complex128) # call to ZROTG
V = [np.zeros(size, dtype=complex) for i in range(0, m+1)]
H = np.zeros((m+1, m), dtype=complex)
cs = np.zeros(m+1, dtype=np.float64)
cs_tmp = np.zeros(1, dtype=np.complex128)
sn = np.zeros(m+1, dtype=np.complex128)
e1 = np.zeros(m+1, dtype=complex)
e1[0] = 1.
for _iter in range(0, maxit):
# Begin iteration
V[0] = R/rnrm
s = rnrm*e1
for i in range(0, m):
# Construct orthonormal basis
# using Gram-Schmidt
W = A.dot(M1.solve(V[i]))
for k in range(0, i+1):
H[k, i] = np.vdot(V[k],W)
W = W - H[k, i]*V[k]
H[i+1, i] = norm(W)
V[i+1] = W/H[i+1, i]
for k in range(0, i):
# Apply Givens rotation
temp = cs[k]*H[k, i] + sn[k]*H[k+1, i]
H[k+1, i] = -np.conj(sn[k])*H[k, i] + cs[k]*H[k+1, i]
H[k, i] = temp
cs_tmp, sn[i] = rotmat(H[i, i], H[i+1, i])
cs[i] = cs_tmp.real # BUGFIX: BLAS wrapper out params
temp = cs[i]*s[i]
s[i+1] = -np.conj(sn[i])*s[i]
s[i] = temp
H[i, i] = cs[i]*H[i, i] + sn[i]*H[i+1, i]
H[i+1, i] = 0.0
rnrm = abs(s[i+1])
if callback is not None:
callback(rnrm)
if rnrm < tolb:
y = solve(H[:i, :i], s[:i])
Xtmp = np.zeros(size, dtype=complex)
for k in range(0, i):
Xtmp += y[k]*V[k]
X += M1.solve(Xtmp)
info = 0
if plot_ritz:
plot_ritzvals(H[:i,:i])
return X, info
y = solve(H[:m, :m], s[:m])
Xtmp = np.zeros(size, dtype=complex)
for k in range(0, k):
Xtmp += y[k]*V[k]
X += M1.solve(Xtmp)
R = B - A.dot(X)
rnrm = norm(R)
if callback is not None:
callback(rnrm)
if rnrm < tolb:
info = 0
break
if plot_ritz & _iter==maxit-1:
plot_ritzvals(H[:m,:m])
return X, info
def vectorize_me(A, om, tau, dd, P=None, imgtype='png'):
eta = om/(om-tau)
# simplified for right operators being diag matrices
Pflag = 0
if P==None:
P = __NoPrecond__()
Pflag = 1
N = A.K.shape[0]
Nom = A.Om.shape[0]
Eij = np.zeros((N,Nom), dtype=complex)
A_blk = np.zeros((N*Nom,N*Nom), dtype=complex)
for i in range(N):
for j in range(Nom):
Eij[i,j] = 1.0
A_blk[j*N:(j+1)*N,i+j*N] = A.dot(P.solve(Eij))[:,j]
Eij[i,j] = 0.0
with plot.PyPlot( 'blk_eigs', figsize=(10,10), imgtype=imgtype) as plt:
vals = np.linalg.eigvals(A_blk)
plt.plot(vals.real, vals.imag, 'bx', markersize=5)
plt.axhline(linewidth=0.5, color='k')
plt.axvline(linewidth=0.5, color='k')
#plt.axis('equal')
# Plotting
plt.axis('scaled')
plt.xlim([-1.7,1.7])
plt.ylim([-0.7,1.8])
#plt.axis([-1.7, 1.7, -0.7, 1.7])
plt.xlabel('real part', fontsize=16)
plt.ylabel('imag part', fontsize=16)
NOP = 100
th = np.linspace(0.0,2.0*pi,NOP)
C = 0.0 + 1j*( (dd*abs(tau)**2)/(2.0*tau.imag*(tau.imag+dd*tau.real)) )
R = sqrt( abs(tau)**2*(dd**2+1.0)/(4.0*(tau.imag+dd*tau.real)**2) )
X = R*np.cos(th)+C.real
Y = R*np.sin(th)+C.imag
plt.plot(X, Y, color='0.55')
plt.plot(C.real, C.imag, color='0.55', marker='x', markersize=10)
plt.title('Spectrum of '+r'$A \circ P_1^{-1}$', fontsize=20)
if Pflag==0:
for k in range(1,Nom): # do not plot bounding circle for f_1
ck = -np.conj(tau)/(tau-np.conj(tau)) - eta[k]
r = abs(tau/(tau-np.conj(tau)))
x = r*np.cos(th)+ck.real
y = r*np.sin(th)+ck.imag
plt.plot(x, y, color='0.75', linestyle='dashed')
plt.plot(ck.real, ck.imag, color='0.55', marker='x', markersize=10)
plt.title('Spectrum of '+r'$A \circ P_1^{-1} \circ P_2^{-1}$', fontsize=20)
if Pflag==1:
with plot.PyPlot( 'blk_spy', ndigits=0 ) as plt:
plt.spy( A_blk, markersize=0.8, precision=0.05)
def me_driver(K, C, M, b, freq, tau, damping, tol, maxit, plot_resnrm=True, iLU=False, fill_factor=10, rot=False, plot_ritz=False):
class vG_op:
def __init__(self, K, C, M, Om, P):
self.K = K
self.C = C
self.M = M
self.Om = Om
self.P = P
self.type = complex
def dot(self, X):
X = self.P.solve(X)
return self.K.dot(X) + 1j*( self.C.dot( ((self.Om).dot(X.T)).T ) ) - self.M.dot( ((self.Om**2).dot(X.T)).T )
#return self.K.dot(X) - self.M.dot( ((self.Om**2).dot(X.T)).T )
def resub(self, X):
return self.P.solve(X)
class precon:
def __init__(self, K, C, M, tau, eta, timing=False):
P = K+1j*tau*C-tau**2*M
#P = K-tau*M
t0 = time.time()
self.P = spla.splu(P.tocsc())
self.IE = sparse.identity(len(eta)) - sparse.diags(eta,0)
te = time.time()
if timing:
print('LU decomposition:'+str(te-t0))
def solve(self, X):
X = self.P.solve(X)
return (self.IE.dot(X.T)).T
class precon_ilu:
def __init__(self, K, C, M, tau, eta, fill_factor=10.0, timing=False):
P = K+1j*tau*C-tau**2*M
#P = K-tau*M
t0 = time.time()
self.P = spla.spilu( P.tocsc(), fill_factor=fill_factor)
self.IE = sparse.identity(len(eta)) - sparse.diags(eta,0)
te = time.time()
if timing:
print('iLU({}) decomposition:'.format(fill_factor)+str(te-t0))
def solve(self, X):
X = self.P.solve(X)
return (self.IE.dot(X.T)).T
class rot_precon:
def __init__(self, eta, tau):
c1 = (0-np.conj(tau))/(tau-np.conj(tau)) - eta[0]
phi1 = cmath.polar(c1)[1]
#phi1 = pi/2.0
rot = np.ones((len(eta),), dtype=complex)
for k in range(0,len(eta)):
ck = (0-np.conj(tau))/(tau-np.conj(tau)) - eta[k]
phik = cmath.polar(ck)[1]
rot[k] = np.exp(-1j*(phik-phi1))
self.R = sparse.diags(rot,0)
def solve(self, X):
return (self.R.dot(X.T)).T
# Convert frequencies, damping model
om = np.sqrt(1.0-1j*damping)*(2.0*pi*freq)
Om = sparse.diags(om,0)
tau2 = tau*max((2.0*pi*freq)**2)
if tau.real<0.0:
tau2 = opt_tau_anal( damping, min((2.0*pi*freq)**2), max((2.0*pi*freq)**2) )
tau = np.sqrt(tau2)
#print(tau2, tau)
print( tau/max(om.real) )
eta = om**2/(om**2-tau2)
# Define preconditioners
if not iLU:
P1 = precon( K, C, M, tau, eta, timing=True )
else:
P1 = precon_ilu( K, C, M, tau, eta, fill_factor=fill_factor, timing=True )
if rot:
P2 = rot_precon( eta, tau2 )
else:
P2 = __NoPrecond__()
# Define operator and RHS
A = vG_op( K, C, M, Om, P1 )
B = (b*np.ones((len(freq),1))).T
if plot_ritz:
vectorize_me(A, om**2, tau2, damping)
vectorize_me(A, om**2, tau2, damping, imgtype='eps')
if rot:
vectorize_me(A, om**2, tau2, damping, P2)
vectorize_me(A, om**2, tau2, damping, P2, imgtype='eps')
# Run global GMRES
X0 = np.zeros(B.shape, dtype=complex)
res = convergenceHistory(plot_resnrm=plot_resnrm)
X, info = megmres( A, B, X0=X0, tol=tol, maxit=maxit, M1=P2, callback=res.callback, plot_ritz=plot_ritz )
X = A.resub(X)
# Plot convergence and bounding cirlces
plot_meconvergence(res.resvec)
I = sparse.identity(M.shape[0])
AA = sparse.bmat([[1j*C,K],[I,None]])
BB = sparse.bmat([[M,None],[None,I]])
plot_circles_on_circle( AA, BB, om**2, tau2, damping)
if rot:
plot_circles_on_circle( AA, BB, om**2, tau2, damping, rot=np.diag(P2.R.todense()) )
return X.T, len(res.resvec)
| mit |
cxxgtxy/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py | 72 | 12865 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
| apache-2.0 |
behzadnouri/scipy | scipy/optimize/nonlin.py | 34 | 46681 | r"""
Nonlinear solvers
-----------------
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
~~~~~~~~
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
~~~~~~~~
**Small problem**
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
**Large problem**
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print('Residual: %g' % abs(residual(sol)).max())
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolor(x, y, sol)
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <[email protected]>
# Distributed under the same license as Scipy.
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from scipy._lib.six import callable, exec_, xrange
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
from numpy import asarray, dot, vdot
import scipy.sparse.linalg
import scipy.sparse
from scipy.linalg import get_blas_funcs
import inspect
from scipy._lib._util import getargspec_no_self as _getargspec
from .linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
x0 : array_like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : ndarray
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None,
full_output=False, raise_exception=True):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
full_output : bool
If true, returns a dictionary `info` containing convergence
information.
raise_exception : bool
If True, a `NoConvergence` exception is raise if no solution is found.
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
http://www.siam.org/books/kelley/
"""
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.inf
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in xrange(maxiter):
status = condition.check(Fx, x, dx)
if status:
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x = x + dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % (
n, norm(Fx), s, eta))
sys.stdout.flush()
else:
if raise_exception:
raise NoConvergence(_array_like(x, x0))
else:
status = 2
if full_output:
info = {'nit': condition.iteration,
'fun': Fx,
'status': status,
'success': status == 1,
'message': {1: 'A solution was found at the specified '
'tolerance.',
2: 'The maximum number of iterations allowed '
'has been reached.'
}[status]
}
return _array_like(x, x0), info
else:
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition(object):
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
if norm is None:
self.norm = maxnorm
else:
self.norm = norm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return 1
if self.iter is not None:
# backwards compatibility with Scipy 0.6.0
return 2 * (self.iteration > self.iter)
# NB: condition must succeed for rtol=inf even if norm == 0
return int((f_norm <= self.f_tol
and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol
and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc. algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(self, x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# Autoscale the initial Jacobian parameter
# unless we have already guessed the solution.
normf0 = norm(f0)
if normf0:
self.alpha = 0.5*max(norm(x0), 1) / normf0
else:
self.alpha = 1.0
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [1]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='economic')
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in xrange(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is ``(-1/alpha)``.
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Takes an extra parameter, ``to_retain``, which determines the
number of SVD components to retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\dagger / ( df^\dagger df)
corresponding to Broyden's second method.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, ie.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in xrange(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in xrange(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(-np.ones(self.shape[0])/self.alpha)
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> from scipy.optimize.nonlin import BroydenFirst, KrylovJacobian
>>> from scipy.optimize.nonlin import InverseJacobian
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
%(params_extra)s
See Also
--------
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by a finite difference:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
Scipy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [1]_,
and for the LGMRES sparse inverse method, see [2]_.
References
----------
.. [1] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
doi:10.1016/j.jcp.2003.08.010
.. [2] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
doi:10.1137/S0895479803422014
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See eg. Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
if 'tol' in self.method_kw:
sol, info = self.method(self.op, rhs, **self.method_kw)
else:
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
args, varargs, varkw, defaults = _getargspec(jac.__init__)
kwargs = list(zip(args[-len(defaults):], defaults))
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
# Construct the wrapper function so that its keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec_(wrapper, ns)
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
| bsd-3-clause |
jakobj/UP-Tasks | NEST/single_neuron_task/single_neuron.py | 3 | 1344 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import nest # import NEST module
def single_neuron(spike_times, sim_duration):
nest.set_verbosity('M_WARNING') # reduce NEST output
nest.ResetKernel() # reset simulation kernel
# create LIF neuron with exponential synaptic currents
neuron = nest.Create('iaf_psc_exp')
# create a voltmeter
voltmeter = nest.Create('voltmeter', params={'interval': 0.1})
# create a spike generator
spikegenerator = nest.Create('spike_generator')
# ... and let it spike at predefined times
nest.SetStatus(spikegenerator, {'spike_times': spike_times})
# connect spike generator and voltmeter to the neuron
nest.Connect(spikegenerator, neuron)
nest.Connect(voltmeter, neuron)
# run simulation for sim_duration
nest.Simulate(sim_duration)
# read out recording time and voltage from voltmeter
times = nest.GetStatus(voltmeter)[0]['events']['times']
voltage = nest.GetStatus(voltmeter)[0]['events']['V_m']
# plot results
plt.plot(times, voltage)
plt.xlabel('Time (ms)')
plt.ylabel('Membrane potential (mV)')
filename = 'single_neuron.png'
plt.savefig(filename, dpi=300)
if __name__ == '__main__':
spike_times = [10., 50.]
sim_duration = 100.
single_neuron(spike_times, sim_duration)
| gpl-2.0 |
flightgong/scikit-learn | benchmarks/bench_plot_omp_lars.py | 31 | 4457 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute_gram=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute_gram=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause |
harisbal/pandas | pandas/tests/io/test_sql.py | 3 | 96428 | """SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
from __future__ import print_function
import pytest
import sqlite3
import csv
import warnings
import numpy as np
import pandas as pd
from datetime import datetime, date, time
from pandas.core.dtypes.common import (
is_object_dtype, is_datetime64_dtype,
is_datetime64tz_dtype)
from pandas import DataFrame, Series, Index, MultiIndex, isna, concat
from pandas import date_range, to_datetime, to_timedelta, Timestamp
import pandas.compat as compat
from pandas.compat import range, lrange, string_types, PY36
import pandas.io.sql as sql
from pandas.io.sql import read_sql_table, read_sql_query
import pandas.util.testing as tm
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': {
'query': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'IntDateOnlyCol',
'FloatCol', 'IntCol', 'BoolCol', 'IntColWithNull',
'BoolColWithNull'
)
},
'mysql': {
'query': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'IntDateOnlyCol',
'FloatCol', 'IntCol', 'BoolCol', 'IntColWithNull',
'BoolColWithNull'
)
},
'postgresql': {
'query': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'DateColWithTz',
'IntDateCol', 'IntDateOnlyCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
},
'read_parameters': {
'sqlite': "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
'mysql': 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
'postgresql': 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s'
},
'read_named_parameters': {
'sqlite': """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
'mysql': """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
'postgresql': """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
"""
},
'create_view': {
'sqlite': """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
}
}
class MixInBase(object):
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, 'conn'):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" %
sql._get_valid_mysql_name(table_name))
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute('SHOW TABLES')
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute("DROP TABLE IF EXISTS %s" %
sql._get_valid_sqlite_name(table_name))
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute(
"SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest(object):
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[('io', 'data', 'iris.csv')])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, 'conn'):
self.setup_connect()
self.drop_table('iris')
self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with io.open(iris_csv_file, mode='r', newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table('iris_view')
self._get_exec().execute(SQL_STRINGS['create_view'][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906,
-0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543,
0.00364077397681, 0.67525259227)]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(dict(A=[4, 1, 3, 6],
B=['asd', 'gsq', 'ylt', 'jkl'],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=['1990-11-22', '1991-10-26',
'1993-11-26', '1995-12-12']))
df['E'] = to_datetime(df['E'])
self.test_frame2 = df
def _load_test3_data(self):
columns = ['index', 'A', 'B']
data = [(
'2000-01-03 00:00:00', 2 ** 31 - 1, -1.987670),
('2000-01-04 00:00:00', -29, -0.0412318367011),
('2000-01-05 00:00:00', 20000, 0.731167677815),
('2000-01-06 00:00:00', -290867, 1.56762092543)]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table('types_test_data')
self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [
{
'TextCol': 'first',
'DateCol': '2000-01-03 00:00:00',
'DateColWithTz': '2000-01-01 00:00:00-08:00',
'IntDateCol': 535852800,
'IntDateOnlyCol': 20101010,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': 1,
'BoolColWithNull': False,
},
{
'TextCol': 'first',
'DateCol': '2000-01-04 00:00:00',
'DateColWithTz': '2000-06-01 00:00:00-07:00',
'IntDateCol': 1356998400,
'IntDateOnlyCol': 20101212,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': None,
'BoolColWithNull': None,
},
]
for d in data:
self._get_exec().execute(
ins['query'],
[d[field] for field in ins['fields']]
)
def _count_rows(self, table_name):
result = self._get_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS['read_parameters'][self.flavor]
params = ['Iris-setosa', 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS['read_named_parameters'][self.flavor]
params = {'name': 'Iris-setosa', 'length': 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')
assert self.pandasSQL.has_table('test_frame1')
# Nuke table
self.drop_table('test_frame1')
def _to_sql_empty(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], 'test_frame1')
def _to_sql_fail(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
assert self.pandasSQL.has_table('test_frame1')
pytest.raises(ValueError, self.pandasSQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.drop_table('test_frame1')
def _to_sql_replace(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replace')
assert self.pandasSQL.has_table('test_frame1')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
assert num_rows == num_entries
self.drop_table('test_frame1')
def _to_sql_append(self):
# Nuke table just in case
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='append')
assert self.pandasSQL.has_table('test_frame1')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
assert num_rows == num_entries
self.drop_table('test_frame1')
def _roundtrip(self):
self.drop_table('test_frame_roundtrip')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip')
result = self.pandasSQL.read_query(
'SELECT * FROM test_frame_roundtrip')
result.set_index('level_0', inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _to_sql_save_index(self):
df = DataFrame.from_records([(1, 2.1, 'line1'), (2, 1.5, 'line2')],
columns=['A', 'B', 'C'], index=['A'])
self.pandasSQL.to_sql(df, 'test_to_sql_saves_index')
ix_cols = self._get_index_columns('test_to_sql_saves_index')
assert ix_cols == [['A', ], ]
def _transaction_test(self):
self.pandasSQL.execute("CREATE TABLE test_trans (A INT, B TEXT)")
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
# Make sure when transaction is rolled back, no rows get inserted
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise Exception('error')
except Exception:
# ignore raised exception
pass
res = self.pandasSQL.read_query('SELECT * FROM test_trans')
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query('SELECT * FROM test_trans')
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = 'sqlite'
mode = None
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn)
assert sql.has_table('test_frame1', self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, if_exists='fail')
assert sql.has_table('test_frame2', self.conn)
pytest.raises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, if_exists='fail')
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, if_exists='replace')
assert sql.has_table('test_frame3', self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame3')
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, if_exists='append')
assert sql.has_table('test_frame4', self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame4')
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, 'test_frame5', self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype='int64'), name='series')
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_to_sql_panel(self):
panel = tm.makePanel()
pytest.raises(NotImplementedError, sql.to_sql, panel,
'test_panel', self.conn)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip',
con=self.conn)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index('level_0', inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn,
index=False, chunksize=2)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['DateCol'])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0)
]
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0)
]
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['IntDateCol'])
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0)
]
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'IntDateCol': 's'})
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0)
]
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'IntDateOnlyCol': '%Y%m%d'})
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp('2010-10-10'),
pd.Timestamp('2010-12-12')
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
index_col='DateCol',
parse_dates=['DateCol', 'IntDateCol'])
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(
Series(['00:00:01', '00:00:03'], name='foo')).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql('test_timedelta', self.conn)
result = sql.read_sql_query('SELECT * FROM test_timedelta', self.conn)
tm.assert_series_equal(result['foo'], df['foo'].astype('int64'))
def test_complex(self):
df = DataFrame({'a': [1 + 1j, 2j]})
# Complex data type should raise error
pytest.raises(ValueError, df.to_sql, 'test_complex', self.conn)
def test_to_sql_index_label(self):
temp_frame = DataFrame({'col1': range(4)})
# no index name, defaults to 'index'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == 'index'
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "other_label"
# using the index name
temp_frame.index.name = 'index_name'
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "index_name"
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "other_label"
# index name is integer
temp_frame.index.name = 0
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "0"
temp_frame.index.name = None
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=0)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "0"
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame({'col1': range(4)},
index=MultiIndex.from_product(
[('A0', 'A1'), ('B0', 'B1')]))
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == 'level_0'
assert frame.columns[1] == 'level_1'
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['A', 'B'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[:2].tolist() == ['A', 'B']
# using the index name
temp_frame.index.names = ['A', 'B']
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[:2].tolist() == ['A', 'B']
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['C', 'D'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[:2].tolist() == ['C', 'D']
# wrong length of index_label
pytest.raises(ValueError, sql.to_sql, temp_frame,
'test_index_label', self.conn, if_exists='replace',
index_label='C')
def test_multiindex_roundtrip(self):
df = DataFrame.from_records([(1, 2.1, 'line1'), (2, 1.5, 'line2')],
columns=['A', 'B', 'C'], index=['A', 'B'])
df.to_sql('test_multiindex_roundtrip', self.conn)
result = sql.read_sql_query('SELECT * FROM test_multiindex_roundtrip',
self.conn, index_col=['A', 'B'])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn,
if_exists='replace')
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, 'test', con=self.conn)
assert 'CREATE' in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({'a': [1.1, 1.2], 'b': [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == 'sqlalchemy' else 'INTEGER'
create_sql = sql.get_schema(float_frame, 'test',
con=self.conn, dtype={'b': dtype})
assert 'CREATE' in create_sql
assert 'INTEGER' in create_sql
def test_get_schema_keys(self):
frame = DataFrame({'Col1': [1.1, 1.2], 'Col2': [2.1, 2.2]})
create_sql = sql.get_schema(frame, 'test', con=self.conn, keys='Col1')
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(self.test_frame1, 'test',
con=self.conn, keys=['A', 'B'])
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list('abcde'))
df.to_sql('test_chunksize', self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query("select * from test_chunksize",
self.conn, chunksize=5):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == 'sqlalchemy':
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn,
chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{'person_id': [1, 2, 3],
'person_name': ['John P. Doe', 'Jane Dove', 'John P. Doe']})
df2 = df.copy()
df2['person_name'] = df2['person_name'].astype('category')
df2.to_sql('test_categorical', self.conn, index=False)
res = sql.read_sql_query('SELECT * FROM test_categorical', self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=[u'\xe9', u'b'])
df.to_sql('test_unicode', self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({'A': [0, 1, 2], 'B': [0.2, np.nan, 5.6]})
df.to_sql('d1187b08-4943-4c8d-a7f6', self.conn, index=False)
res = sql.read_sql_query('SELECT * FROM `d1187b08-4943-4c8d-a7f6`',
self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = 'sqlite'
mode = 'sqlalchemy'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
pytest.skip('SQLAlchemy not installed')
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
cols = ['A', 'B']
result = sql.read_sql_table('test_frame', self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
result = sql.read_sql_table('test_frame', self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table(
'test_frame', self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table('test_frame', self.conn,
index_col=["A", "B"],
columns=["C", "D"])
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql(
"SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table('iris', self.conn)
iris_frame2 = sql.read_sql('iris', self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table('other_table', self.conn)
sql.read_sql_query('SELECT * FROM other_table', self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql('CaseSensitive', self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes('test_index_saved')
ixs = [i['column_names'] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
assert isinstance(table.table.c['time'].type, sqltypes.DateTime)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = 'sqlite:///' + name
table = 'iris'
test_frame1.to_sql(table, db_uri, if_exists='replace', index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = 'SELECT * FROM iris'
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with tm.assert_raises_regex(ImportError, "pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table('iris', metadata,
sa.Column('SepalLength', sa.REAL),
sa.Column('SepalWidth', sa.REAL),
sa.Column('PetalLength', sa.REAL),
sa.Column('PetalWidth', sa.REAL),
sa.Column('Name', sa.TEXT)
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text('select * from iris where name=:name')
iris_df = sql.read_sql(name_text, self.conn, params={
'name': 'Iris-versicolor'})
all_names = set(iris_df['Name'])
assert all_names == {'Iris-versicolor'}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam('name'))
iris_df = sql.read_sql(name_select, self.conn,
params={'name': 'Iris-setosa'})
all_names = set(iris_df['Name'])
assert all_names == {'Iris-setosa'}
class _EngineToConnMixin(object):
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super(_EngineToConnMixin, self).load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super(_EngineToConnMixin, self).teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = 'sqlite'
mode = 'fallback'
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy",
conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;",
conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
def test_con_string_import_error(self):
if not SQLALCHEMY_INSTALLED:
conn = 'mysql://root@localhost/pandas_nosetest'
pytest.raises(ImportError, sql.read_sql, "SELECT * FROM iris",
conn)
else:
pytest.skip('SQLAlchemy is installed')
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
pytest.raises(sql.DatabaseError, sql.read_sql, 'iris', self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b ']) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, 'test')
assert 'CREATE' in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split('\n'):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError('Column %s not found' % (column))
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, 'time') == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor = None
@pytest.fixture(autouse=True, scope='class')
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
# test connection
try:
conn = cls.connect()
conn.connect()
except sqlalchemy.exc.OperationalError:
msg = "{0} - can't connect to {1} server".format(cls, cls.flavor)
pytest.skip(msg)
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip('SQLAlchemy not installed')
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(
"Can't connect to {0} server".format(self.flavor))
def test_aread_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
assert temp_conn.has_table('temp_frame')
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
assert temp_conn.has_table('temp_frame')
pandasSQL.drop_table('temp_frame')
assert not temp_conn.has_table('temp_frame')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
pytest.raises(
ValueError, sql.read_sql_table, "this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={'i64': [2**62]})
df.to_sql('test_bigint', self.conn, index=False)
result = sql.read_sql_table('test_bigint', self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp('2000-01-01 08:00:00')
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp('2000-06-01 07:00:00')
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == 'UTC'
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [Timestamp('2000-01-01 08:00:00', tz='UTC'),
Timestamp('2000-06-01 07:00:00', tz='UTC')]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError("DateCol loaded with incorrect type "
"-> {0}".format(col.dtype))
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, 'DateColWithTz'):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgrsql server version difference
col = df.DateColWithTz
assert (is_object_dtype(col.dtype) or
is_datetime64_dtype(col.dtype) or
is_datetime64tz_dtype(col.dtype))
df = pd.read_sql_query("select * from types_test_data",
self.conn, parse_dates=['DateColWithTz'])
if not hasattr(df, 'DateColWithTz'):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == 'UTC'
check(df.DateColWithTz)
df = pd.concat(list(pd.read_sql_query("select * from types_test_data",
self.conn, chunksize=1)),
ignore_index=True)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == 'UTC'
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates=['DateCol'])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates={
'DateCol': {'format': '%Y-%m-%d %H:%M:%S'}})
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'IntDateCol': {'unit': 's'}})
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.to_sql('test_datetime', self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
result = result.drop('index', axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
result = result.drop('index', axis=1)
if self.flavor == 'sqlite':
assert isinstance(result.loc[0, 'A'], string_types)
result['A'] = to_datetime(result['A'])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.loc[1, 'A'] = np.nan
df.to_sql('test_datetime', self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
if self.flavor == 'sqlite':
assert isinstance(result.loc[0, 'A'], string_types)
result['A'] = to_datetime(result['A'], errors='coerce')
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False)
res = read_sql_table('test_date', self.conn)
result = res['a']
expected = to_datetime(df['a'])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql('test_time', self.conn, index=False)
res = read_sql_table('test_time', self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == 'sqlite':
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2**25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({'s1': s1, 's2': s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({'A': [0, 1, 2], 'B': [0.2, np.nan, 5.6]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({'A': [0, 1, 2], 'B': [np.nan, np.nan, np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df['B'] = df['B'].astype('object')
df['B'] = None
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({'A': [0, 1, 2], 'B': ['a', 'b', np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# NaNs are coming back as None
df.loc[2, 'B'] = None
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i['column_names'] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = 'test_get_schema_create_table'
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df,
check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables['dtype_test2'].columns['B'].type
assert isinstance(sqltype, sqlalchemy.TEXT)
pytest.raises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': str})
# GH9083
df.to_sql('dtype_test3', self.conn, dtype={'B': sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables['dtype_test3'].columns['B'].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql('single_dtype_test', self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables['single_dtype_test'].columns['A'].type
sqltypeb = meta.tables['single_dtype_test'].columns['B'].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notna_dtype(self):
cols = {'Bool': Series([True, None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int': Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notna_dtype_test'
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == 'mysql':
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict['Bool'].type, my_type)
assert isinstance(col_dict['Date'].type, sqltypes.DateTime)
assert isinstance(col_dict['Int'].type, sqltypes.Integer)
assert isinstance(col_dict['Float'].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame({'f32': Series([V, ], dtype='float32'),
'f64': Series([V, ], dtype='float64'),
'f64_as_f32': Series([V, ], dtype='float64'),
'i32': Series([5, ], dtype='int32'),
'i64': Series([5, ], dtype='int64'),
})
df.to_sql('test_dtypes', self.conn, index=False, if_exists='replace',
dtype={'f64_as_f32': sqlalchemy.Float(precision=23)})
res = sql.read_sql_table('test_dtypes', self.conn)
# check precision of float64
assert (np.round(df['f64'].iloc[0], 14) ==
np.round(res['f64'].iloc[0], 14))
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables['test_dtypes'].columns
assert str(col_dict['f32'].type) == str(col_dict['f64_as_f32'].type)
assert isinstance(col_dict['f32'].type, sqltypes.Float)
assert isinstance(col_dict['f64'].type, sqltypes.Float)
assert isinstance(col_dict['i32'].type, sqltypes.Integer)
assert isinstance(col_dict['i64'].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = 'SELECT test_foo_data FROM test_foo_data'
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name='test_foo_data',
con=connection, if_exists='append')
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({'test_foo_data': [0, 1, 2]}).to_sql(
'test_foo_data', self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = u'Hello, World!'
expected = DataFrame({'spam': [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = 'temp_test'
__table_args__ = {'prefixes': ['TEMPORARY']}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(
sql=sqlalchemy.select([Temporary.spam]),
con=conn,
)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip(
"Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy(object):
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlalchemy.create_engine('sqlite:///:memory:')
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({'a': [1, 2]}, dtype='int64')
df.to_sql('test_bigintwarning', self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table('test_bigintwarning', self.conn)
assert len(w) == 0
class _TestMySQLAlchemy(object):
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = 'mysql'
@classmethod
def connect(cls):
url = 'mysql+{driver}://root@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver),
connect_args=cls.connect_args)
@classmethod
def setup_driver(cls):
try:
import pymysql # noqa
cls.driver = 'pymysql'
from pymysql.constants import CLIENT
cls.connect_args = {'client_flag': CLIENT.MULTI_STATEMENTS}
except ImportError:
pytest.skip('pymysql not installed')
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]})
df.to_sql('test_procedure', self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy(object):
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = 'postgresql'
@classmethod
def connect(cls):
url = 'postgresql+{driver}://postgres@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import psycopg2 # noqa
cls.driver = 'psycopg2'
except ImportError:
pytest.skip('psycopg2 not installed')
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({'col1': [1, 2], 'col2': [
0.1, 0.2], 'col3': ['a', 'n']})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql('test_schema_public', self.conn, index=False)
df.to_sql('test_schema_public_explicit', self.conn, index=False,
schema='public')
df.to_sql('test_schema_other', self.conn, index=False, schema='other')
# read dataframes back in
res1 = sql.read_sql_table('test_schema_public', self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table('test_schema_public_explicit', self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table('test_schema_public_explicit', self.conn,
schema='public')
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table('test_schema_other', self.conn,
schema='other')
tm.assert_frame_equal(df, res4)
pytest.raises(ValueError, sql.read_sql_table, 'test_schema_other',
self.conn, schema='public')
# different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql('test_schema_other', self.conn, schema='other', index=False)
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='replace')
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='append')
res = sql.read_sql_table(
'test_schema_other', self.conn, schema='other')
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
# specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema='other')
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, 'test_schema_other2', index=False)
pdsql.to_sql(df, 'test_schema_other2',
index=False, if_exists='replace')
pdsql.to_sql(df, 'test_schema_other2',
index=False, if_exists='append')
res1 = sql.read_sql_table(
'test_schema_other2', self.conn, schema='other')
res2 = pdsql.read_table('test_schema_other2')
tm.assert_frame_equal(res1, res2)
@pytest.mark.single
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):
pass
# -----------------------------------------------------------------------------
# -- Test Sqlite / MySQL fallback
@pytest.mark.single
class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlite3.connect(':memory:')
def setup_connect(self):
self.conn = self.connect()
def load_test_data_and_sql(self):
self.pandasSQL = sql.SQLiteDatabase(self.conn)
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
self.pandasSQL.to_sql(temp_frame, 'drop_test_frame')
assert self.pandasSQL.has_table('drop_test_frame')
self.pandasSQL.drop_table('drop_test_frame')
assert not self.pandasSQL.has_table('drop_test_frame')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False)
res = read_sql_query('SELECT * FROM test_date', self.conn)
if self.flavor == 'sqlite':
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == 'mysql':
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time, GH #8341
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql('test_time', self.conn, index=False)
res = read_sql_query('SELECT * FROM test_time', self.conn)
if self.flavor == 'sqlite':
# comes back as strings
expected = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(res, expected)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' " +
"AND tbl_name = '%s'" % tbl_name, self.conn)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(
"PRAGMA index_info(%s)" % ix_name, self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
if PY36:
pytest.skip("not working on python > 3.5")
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute('PRAGMA table_info(%s)' % table)
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError('Table %s, column %s not found' % (table, column))
def test_dtype(self):
if self.flavor == 'mysql':
pytest.skip('Not applicable to MySQL legacy')
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': 'STRING'})
# sqlite stores Boolean values as INTEGER
assert self._get_sqlite_column_type(
'dtype_test', 'B') == 'INTEGER'
assert self._get_sqlite_column_type(
'dtype_test2', 'B') == 'STRING'
pytest.raises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': bool})
# single dtype
df.to_sql('single_dtype_test', self.conn, dtype='STRING')
assert self._get_sqlite_column_type(
'single_dtype_test', 'A') == 'STRING'
assert self._get_sqlite_column_type(
'single_dtype_test', 'B') == 'STRING'
def test_notna_dtype(self):
if self.flavor == 'mysql':
pytest.skip('Not applicable to MySQL legacy')
cols = {'Bool': Series([True, None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int': Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notna_dtype_test'
df.to_sql(tbl, self.conn)
assert self._get_sqlite_column_type(tbl, 'Bool') == 'INTEGER'
assert self._get_sqlite_column_type(tbl, 'Date') == 'TIMESTAMP'
assert self._get_sqlite_column_type(tbl, 'Int') == 'INTEGER'
assert self._get_sqlite_column_type(tbl, 'Float') == 'REAL'
def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
# Raise error on blank
pytest.raises(ValueError, df.to_sql, "", self.conn)
for ndx, weird_name in enumerate(
['test_weird_name]', 'test_weird_name[',
'test_weird_name`', 'test_weird_name"', 'test_weird_name\'',
'_b.test_weird_name_01-30', '"_b.test_weird_name_01-30"',
'99beginswithnumber', '12345', u'\xe9']):
df.to_sql(weird_name, self.conn)
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', weird_name])
c_tbl = 'test_weird_col_name%d' % ndx
df2.to_sql(c_tbl, self.conn)
sql.table_exists(c_tbl, self.conn)
# -----------------------------------------------------------------------------
# -- Old tests from 0.13.1 (before refactor using sqlalchemy)
def date_format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime('%Y%m%d')
_formatters = {
datetime: lambda dt: "'%s'" % date_format(dt),
str: lambda x: "'%s'" % x,
np.str_: lambda x: "'%s'" % x,
compat.text_type: lambda x: "'%s'" % x,
compat.binary_type: lambda x: "'%s'" % x,
float: lambda x: "%.8f" % x,
int: lambda x: "%s" % x,
type(None): lambda x: "NULL",
np.float64: lambda x: "%.10f" % x,
bool: lambda x: "'%s'" % x,
}
def format_query(sql, *args):
"""
"""
processed_args = []
for arg in args:
if isinstance(arg, float) and isna(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def tquery(query, con=None, cur=None):
"""Replace removed sql.tquery function"""
res = sql.execute(query, con=con, cur=cur).fetchall()
if res is None:
return None
else:
return list(res)
def _skip_if_no_pymysql():
try:
import pymysql # noqa
except ImportError:
pytest.skip('pymysql not installed, skipping')
@pytest.mark.single
class TestXSQLite(SQLiteMixIn):
@pytest.fixture(autouse=True)
def setup_method(self, request, datapath):
self.method = request.function
self.conn = sqlite3.connect(':memory:')
# In some test cases we may close db connection
# Re-open conn here so we can perform cleanup in teardown
yield
self.method = request.function
self.conn = sqlite3.connect(':memory:')
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
cur.execute(create_sql)
cur = self.conn.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame, check_less_precise=True)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.iloc[0]
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
assert tokens[1] == 'DATETIME'
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', keys=['A', 'B'])
lines = create_sql.splitlines()
assert 'PRIMARY KEY ("A", "B")' in create_sql
cur = self.conn.cursor()
cur.execute(create_sql)
@tm.capture_stdout
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(Exception):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
with pytest.raises(Exception):
tquery("select * from test", con=self.conn)
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.to_sql(frame, name='test_table', con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
frame2['Idx'] = Index(lrange(len(frame2))) + 10
sql.to_sql(frame2, name='test_table2', con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn,
index_col='Idx')
expected = frame.copy()
expected.index = Index(lrange(len(frame2))) + 10
expected.index.name = 'Idx'
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
df = DataFrame({'From': np.ones(5)})
sql.to_sql(df, con=self.conn, name='testkeywords', index=False)
def test_onecolumn_of_integer(self):
# GH 3628
# a column_of_integers dataframe should transfer well to sql
mono_df = DataFrame([1, 2], columns=['c0'])
sql.to_sql(mono_df, con=self.conn, name='mono_df', index=False)
# computing the sum via sql
con_x = self.conn
the_sum = sum(my_c0[0]
for my_c0 in con_x.execute("select * from mono_df"))
# it should not fail, and gives 3 ( Issue #3628 )
assert the_sum == 3
result = sql.read_sql("select * from mono_df", con_x)
tm.assert_frame_equal(result, mono_df)
def test_if_exists(self):
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame(
{'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
pytest.raises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(frame=df_if_exists_1, con=self.conn,
name=table_name, if_exists='fail')
pytest.raises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='fail')
# test if_exists='replace'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='replace', index=False)
assert tquery(sql_select, con=self.conn) == [(1, 'A'), (2, 'B')]
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='replace', index=False)
assert (tquery(sql_select, con=self.conn) ==
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='fail', index=False)
assert tquery(sql_select, con=self.conn) == [(1, 'A'), (2, 'B')]
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='append', index=False)
assert (tquery(sql_select, con=self.conn) ==
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
@pytest.mark.single
@pytest.mark.skip(reason="gh-13611: there is no support for MySQL "
"if SQLAlchemy is not installed")
class TestXMySQL(MySQLMixIn):
@pytest.fixture(autouse=True, scope='class')
def setup_class(cls):
_skip_if_no_pymysql()
# test connection
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
pymysql.connect(host='localhost', user='root', passwd='',
db='pandas_nosetest')
except:
pass
else:
return
try:
pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError:
pytest.skip(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error:
pytest.skip(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
@pytest.fixture(autouse=True)
def setup_method(self, request, datapath):
_skip_if_no_pymysql()
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
self.conn = pymysql.connect(host='localhost', user='root',
passwd='', db='pandas_nosetest')
except:
pass
else:
return
try:
self.conn = pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError:
pytest.skip(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error:
pytest.skip(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
self.method = request.function
def test_basic(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame, check_less_precise=True)
def test_chunksize_read_type(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
frame.index.name = "index"
drop_sql = "DROP TABLE IF EXISTS test"
cur = self.conn.cursor()
cur.execute(drop_sql)
sql.to_sql(frame, name='test', con=self.conn)
query = "select * from test"
chunksize = 5
chunk_gen = pd.read_sql_query(sql=query, con=self.conn,
chunksize=chunksize, index_col="index")
chunk_df = next(chunk_gen)
tm.assert_frame_equal(frame[:chunksize], chunk_df)
def test_execute(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
row = frame.iloc[0].values.tolist()
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
assert tokens[1] == 'DATETIME'
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', keys=['A', 'B'])
lines = create_sql.splitlines()
assert 'PRIMARY KEY (`A`, `B`)' in create_sql
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
@tm.capture_stdout
def test_execute_fail(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(Exception):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
def test_execute_closed_connection(self, request, datapath):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
with pytest.raises(Exception):
tquery("select * from test", con=self.conn)
# Initialize connection again (needed for tearDown)
self.setup_method(request, datapath)
def test_na_roundtrip(self):
_skip_if_no_pymysql()
pass
def _check_roundtrip(self, frame):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.to_sql(frame, name='test_table', con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
result.index.name = frame.index.name
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
index = Index(lrange(len(frame2))) + 10
frame2['Idx'] = index
drop_sql = "DROP TABLE IF EXISTS test_table2"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.to_sql(frame2, name='test_table2',
con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn,
index_col='Idx')
expected = frame.copy()
# HACK! Change this once indexes are handled properly.
expected.index = index
expected.index.names = result.index.names
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
_skip_if_no_pymysql()
df = DataFrame({'From': np.ones(5)})
sql.to_sql(df, con=self.conn, name='testkeywords',
if_exists='replace', index=False)
def test_if_exists(self):
_skip_if_no_pymysql()
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame(
{'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
pytest.raises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='fail', index=False)
pytest.raises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='fail')
# test if_exists='replace'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='replace', index=False)
assert tquery(sql_select, con=self.conn) == [(1, 'A'), (2, 'B')]
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='replace', index=False)
assert (tquery(sql_select, con=self.conn) ==
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='fail', index=False)
assert tquery(sql_select, con=self.conn) == [(1, 'A'), (2, 'B')]
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='append', index=False)
assert (tquery(sql_select, con=self.conn) ==
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
| bsd-3-clause |
RayMick/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
rvraghav93/scikit-learn | sklearn/neural_network/rbm.py | 26 | 12280 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from scipy.special import expit # logistic function
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='F')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='F')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/e328.py | 2 | 6737 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 1000
GRADIENT_STEPS = 100
SEQ_LENGTH = 512
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
# 'hair straighteners',
# 'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 1800, 1800],
min_off_durations=[12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.5,
one_target_per_seq=True,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=0,
lag=0
# reshape_target_to_2D=True,
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-3,
learning_rate_changes_by_iteration={
25: 5e-4,
100: 1e-4
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True
# plotter=MDNPlotter
)
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
# {
# 'type': FeaturePoolLayer,
# 'ds': 2, # number of feature maps to be pooled together
# 'axis': 1, # pool over the time axis
# 'pool_function': T.max
# },
# {
# 'type': BidirectionalRecurrentLayer,
# 'num_units': N,
# 'gradient_steps': GRADIENT_STEPS,
# 'W_in_to_hid': Normal(std=1/sqrt(N)),
# 'nonlinearity': tanh
# },
{
'type': DenseLayer,
'num_units': source.n_outputs,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': T.nnet.softplus
}
# {
# 'type': MixtureDensityLayer,
# 'num_units': source.n_outputs,
# 'num_components': 1,
# 'nonlinearity_mu': T.nnet.softplus
# }
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
kdebrab/pandas | pandas/tests/extension/base/groupby.py | 3 | 2747 | import pytest
import pandas.util.testing as tm
import pandas as pd
from .base import BaseExtensionTests
class BaseGroupbyTests(BaseExtensionTests):
"""Groupby-specific tests."""
def test_grouping_grouper(self, data_for_grouping):
df = pd.DataFrame({
"A": ["B", "B", None, None, "A", "A", "B", "C"],
"B": data_for_grouping
})
gr1 = df.groupby("A").grouper.groupings[0]
gr2 = df.groupby("B").grouper.groupings[0]
tm.assert_numpy_array_equal(gr1.grouper, df.A.values)
tm.assert_extension_array_equal(gr2.grouper, data_for_grouping)
@pytest.mark.parametrize('as_index', [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4],
"B": data_for_grouping})
result = df.groupby("B", as_index=as_index).A.mean()
_, index = pd.factorize(data_for_grouping, sort=True)
# TODO(ExtensionIndex): remove astype
index = pd.Index(index.astype(object), name="B")
expected = pd.Series([3, 1, 4], index=index, name="A")
if as_index:
self.assert_series_equal(result, expected)
else:
expected = expected.reset_index()
self.assert_frame_equal(result, expected)
def test_groupby_extension_no_sort(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4],
"B": data_for_grouping})
result = df.groupby("B", sort=False).A.mean()
_, index = pd.factorize(data_for_grouping, sort=False)
# TODO(ExtensionIndex): remove astype
index = pd.Index(index.astype(object), name="B")
expected = pd.Series([1, 3, 4], index=index, name="A")
self.assert_series_equal(result, expected)
def test_groupby_extension_transform(self, data_for_grouping):
valid = data_for_grouping[~data_for_grouping.isna()]
df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4],
"B": valid})
result = df.groupby("B").A.transform(len)
expected = pd.Series([3, 3, 2, 2, 3, 1], name="A")
self.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', [
lambda x: 1,
lambda x: [1] * len(x),
lambda x: pd.Series([1] * len(x)),
lambda x: x,
], ids=['scalar', 'list', 'series', 'object'])
def test_groupby_extension_apply(self, data_for_grouping, op):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4],
"B": data_for_grouping})
df.groupby("B").apply(op)
df.groupby("B").A.apply(op)
df.groupby("A").apply(op)
df.groupby("A").B.apply(op)
| bsd-3-clause |
ArtezGDA/MappingTheCity-Maps | Kimberley ter Heerdt/Poster/Visual-3/wiki-birthsvisualmetdatatekst.py | 1 | 1087 | import json
import matplotlib.pyplot as plt
def visual_file(file_name, line_color):
fig = plt.figure(1)
with open(file_name, 'r') as f:
data = json.load(f)
for d in data:
cur_births = d['birth']
for cur_birth in cur_births:
year = cur_birth['year']
deathyear = cur_birth['deathyear']
if deathyear:
radius = int(deathyear) - int(year)
else:
radius = 2016 - int(year)
ax = fig.add_subplot(1, 1, 1)
circe = plt.Circle((year, 0), radius=radius, color=line_color, fill=False)
ax.add_patch(circe)
plt.xlim(800, 2100)
plt.xticks([i for i in range(800, 2016, 50)])
plt.ylim(0, 150)
plt.xlabel('January', fontsize=17)
plt.yticks([])
plt.subplots_adjust(left=0.01, right=0.99, top=0.58, bottom=0.4)
plt.show()
if __name__ == '__main__':
file_name = 'wikibirth-jan.json'
line_color = 'gray'
visual_file(file_name, line_color)
| mit |
skrueger111/zazzie | src/scripts/convergence_test.py | 3 | 31326 | # from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
# # from __future__ import unicode_literals
"""SASSIE: Copyright (C) 2011-2015 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import glob
import logging
import numpy
import os
import pandas
import time
import sasmol.sasmol as sasmol
# allows for creating plots without an xserver
try:
dummy = os.environ["DISPLAY"]
except:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# convergence_test
#
# 08/24/2016 -- updating for github repo : sch
#
# 1 2 3 4 5 6 7
# 34567890123456789012345678901234567890123456789012345678901234567890123456789
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class AlignInputs(object):
def __init__(self, goal_pdb, move, ref_pdb, out_fname, **kwargs):
self.goal_pdb = goal_pdb
self.ref_pdb = ref_pdb
self.move = move
self.out_fname = out_fname
self.path = kwargs.get('path', './')
self.basis_atoms = kwargs.get('basis_atoms', 'CA')
self.seg_or_chain = kwargs.get('seg_or_chain', 'segname')
self.seg_chain = kwargs.get('seg_chain', 'GAG')
self.min_resid = kwargs.get('min_resid', 20)
self.max_resid = kwargs.get('max_resid', 30)
default_filter = (
'(({}[i] == "{}") and (name[i] == "{}") and '
'(resid[i] >= {}) and (resid[i] <= {}))'.format(
self.seg_or_chain,
self.seg_chain,
self.basis_atoms,
self.min_resid,
self.max_resid))
self.goal_filter = kwargs.get('goal_filter', default_filter)
self.move_filter = kwargs.get('move_filter', default_filter)
logging.debug('goal_pdb: {}'.format(self.goal_pdb))
logging.debug('ref_pdb: {}'.format(self.ref_pdb))
logging.debug('move: {}'.format(self.move))
logging.debug('out_fname: {}'.format(self.out_fname))
logging.debug('path: {}'.format(self.path))
logging.debug('goal_filter: {}'.format(self.goal_filter))
logging.debug('move_filter: {}'.format(self.move_filter))
def align(inputs):
'''
input:
------
inputs: object should contain the following attributes
goal: goal pdb
ref: reference pdb containing molecule info for moving pdb/dcd
move: pdb/dcd to align
out: output dcd file
path: output path
goal_filter: goal basis filter
move_filter: move basis filter
note: inputs.ref and inputs.move can ofter be the same pdb
'''
aa_goal_pdb = inputs.goal_pdb
aa_move_pdb = inputs.ref_pdb
aa_move_fname = inputs.move
save_fname = inputs.out_fname
path = inputs.path
if save_fname == aa_move_fname:
in_place = True
save_fname = 'temp' + save_fname[-4:]
try:
goal_filter = inputs.goal_filter
except:
basis_atoms = inputs.basis_atoms
goal_seg_or_ch = inputs.goal_seg_or_chain
goal_segname = inputs.goal_seg_chain
goal_res_max = inputs.goal_max
goal_res_min = inputs.goal_min
try:
move_filter = inputs.move_filter
except:
basis_atoms = inputs.basis_atoms
move_seg_or_ch = inputs.move_seg_or_chain
move_segname = inputs.move_seg_chain
move_res_max = inputs.move_max
move_res_min = inputs.move_min
move_filter = ('((%s[i] == "%s") and (name[i] == "%s") and '
'(resid[i] >= %s) and (resid[i] <= %s))' % (
move_seg_or_ch, move_segname, basis_atoms,
move_res_min, move_res_max))
# check input
assert os.path.exists(aa_move_fname), ('ERROR: no such file - %s' %
aa_move_fname)
assert os.path.exists(aa_move_pdb), ('ERROR: no such file - %s' %
aa_move_pdb)
assert os.path.exists(aa_goal_pdb), ('ERROR: no such file - %s' %
aa_goal_pdb)
# create the SasMol objects
sub_goal = sasmol.SasMol(0)
sub_move = sasmol.SasMol(0)
aa_goal = sasmol.SasMol(0)
aa_move = sasmol.SasMol(0)
aa_goal.read_pdb(aa_goal_pdb)
aa_move.read_pdb(aa_move_pdb)
if aa_move_fname[-3:] == 'pdb':
aa_move.read_pdb(aa_move_fname)
n_frames = aa_move.number_of_frames()
in_type = 'pdb'
elif aa_move_fname[-3:] == 'dcd':
dcd_file = aa_move.open_dcd_read(aa_move_fname)
n_frames = dcd_file[2]
in_type = 'dcd'
else:
message = "\n~~~ ERROR, unknown input type ~~~\n"
print_failure(message, txtOutput)
return
out_type = save_fname[-3:].lower()
if 'dcd' == out_type:
dcd_out_file = aa_move.open_dcd_write(path + save_fname)
elif 'pdb' == out_type:
dcd_out_file = None
error, goal_seg_mask = aa_goal.get_subset_mask(goal_filter)
assert not error, error
error, move_seg_mask = aa_move.get_subset_mask(move_filter)
assert not error, error
error = aa_goal.copy_molecule_using_mask(sub_goal, goal_seg_mask, 0)
assert not error, error
error = aa_move.copy_molecule_using_mask(sub_move, move_seg_mask, 0)
assert not error, error
# calculate the center of mass of the subset of m1
com_sub_goal = sub_goal.calccom(0)
sub_goal.center(0) # center the m1 coordinates
# get the m1 centered coordinates
coor_sub_goal = sub_goal.coor()[0]
for i in xrange(n_frames):
if in_type == 'dcd':
aa_move.read_dcd_step(dcd_file, i)
# move m2 to be centered at the origin
aa_move.center(0)
error, sub_move.coor = aa_move.get_coor_using_mask(
0, move_seg_mask)
sub_move.setCoor(sub_move.coor)
# calculate the center of mass of the subset of m2
com_sub_move = sub_move.calccom(0)
# move the subset of m2 to be centered at the origin
sub_move.center(0)
# get the new coordinates of the subset of m2
coor_sub_move = sub_move.coor[0]
# align m2 using the transformation from sub_m2 to sub_m1
aa_move.align(
0, coor_sub_move, com_sub_move, coor_sub_goal, com_sub_goal)
elif in_type == 'pdb':
# move m2 to be centered at the origin
aa_move.center(i)
error, sub_move.coor = aa_move.get_coor_using_mask(
i, move_seg_mask)
sub_move.setCoor(sub_move.coor)
# calculate the center of mass of the subset of m2
com_sub_move = sub_move.calccom(0)
# move the subset of m2 to be centered at the origin
sub_move.center(0)
# get the new coordinates of the subset of m2
coor_sub_move = sub_move.coor[0]
# align m2 using the transformation from sub_m2 to sub_m1
aa_move.align(
i, coor_sub_move, com_sub_move, coor_sub_goal, com_sub_goal)
aa_move.write_dcd_step(dcd_out_file, 0, i + 1)
if in_type == 'dcd':
aa_move.close_dcd_read(dcd_file[0])
if out_type == 'dcd':
aa_move.close_dcd_write(dcd_out_file)
if in_place:
os.remove(aa_move_fname)
os.rename(save_fname, aa_move_fname)
logging.info('Alingment of {} complete. \m/ >.< \m/'.format(aa_move_fname))
def calc_sas_convergence_all(sas_folders, output_prefix=None,
granularity=int(1e3), show=False, sas_ext='iq'):
assert len(sas_folders) == 1, ("ERROR: mode for examining multiple "
"folders not currently tested")
if not output_prefix:
output_prefix = 'sas_convergence'
# initialize data sets
iq_all = []
list_new_grids = []
list_occupied_grids = []
n_q, n_spec = load_iq(sas_folders, sas_ext, iq_all)
count_sas_grids(sas_folders, iq_all, n_q, n_spec,
list_new_grids, list_occupied_grids, granularity)
total_spec = n_spec.sum()
new_grids = numpy.zeros((total_spec, len(sas_folders) + 1))
new_grids[:, 0] = numpy.arange(total_spec)
occupied_grids = numpy.copy(new_grids)
for i in xrange(len(sas_folders)):
rows = list_new_grids[i][:, 0] - 1
new_grids[rows, 1] = list_new_grids[i][:, 1]
occupied_grids[rows, 1] = list_occupied_grids[i][:, 1]
# create output text files
fname_occupied_grids = output_prefix + '_occupied_grids.npy'
fname_new_grids = output_prefix + '_new_grids.npy'
numpy.savetxt(fname_occupied_grids, occupied_grids)
numpy.savetxt(fname_new_grids, new_grids)
print 'output text files: \n%s \n%s' % (fname_occupied_grids,
fname_new_grids)
plot_convergence(new_grids, sas_folders, occupied_grids,
output_prefix, show, spatial=False)
def calc_sas_convergence_by_run(sas_folders, output_prefix=None,
granularity=int(1e3), show=False, sas_ext='iq'):
assert len(sas_folders) == 1, ("ERROR: mode for examining multiple "
"folders not currently tested")
if not output_prefix:
output_prefix = 'sas_convergence'
# initialize data sets
iq_all = []
list_new_grids = []
list_occupied_grids = []
n_q, n_spec = load_iq(sas_folders, sas_ext, iq_all)
count_sas_grids(sas_folders, iq_all, n_q, n_spec,
list_new_grids, list_occupied_grids, granularity)
total_spec = n_spec.sum()
new_grids = numpy.zeros((total_spec, len(sas_folders) + 1), dtype=int)
new_grids[:, 0] = numpy.arange(total_spec)
occupied_grids = numpy.copy(new_grids)
for i in xrange(len(sas_folders)):
rows = list_new_grids[i][:, 0] - 1
new_grids[rows, i + 1] = list_new_grids[i][:, 1]
occupied_grids[rows, i + 1] = list_occupied_grids[i][:, 1]
# create output text files
fname_occupied_grids = output_prefix + '_occupied_grids_by_run.npy'
fname_new_grids = output_prefix + '_new_grids_by_run.npy'
numpy.savetxt(fname_occupied_grids, occupied_grids)
numpy.savetxt(fname_new_grids, new_grids)
print 'output text files: \n%s \n%s' % (fname_occupied_grids,
fname_new_grids)
plot_convergence(new_grids, sas_folders, occupied_grids,
output_prefix, show, spatial=False)
def calc_spatial_convergence_all(pdb_fname, dcd_fnames, output_prefix=None,
show=False, **kwargs):
assert len(dcd_fnames) == 1, ("ERROR: mode for examining multiple "
"dcd files not currently tested")
if not output_prefix:
output_prefix = pdb_fname[:-4]
# initialize data sets
list_new_voxels = []
list_occupied_voxels = []
count_spatial_voxels(pdb_fname, dcd_fnames, list_new_voxels,
list_occupied_voxels, **kwargs)
n_structures = sum([len(new_voxels) for new_voxels in list_new_voxels])
new_voxels = numpy.empty((n_structures, 2))
occupied_voxels = numpy.empty((n_structures, 2))
new_voxels[:, 0] = numpy.arange(n_structures)
occupied_voxels[:, 0] = numpy.arange(n_structures)
for i in xrange(len(dcd_fnames)):
rows = list_new_voxels[i][:, 0] - 1
new_voxels[rows, 1] = list_new_voxels[i][:, 1]
occupied_voxels[rows, 1] = list_occupied_voxels[i][:, 1]
# create output text files
fname_occupied_voxels = output_prefix + '_occupied_voxels.npy'
fname_new_voxels = output_prefix + '_new_voxels.npy'
numpy.savetxt(fname_occupied_voxels, occupied_voxels)
numpy.savetxt(fname_new_voxels, new_voxels)
print 'output text files: \n%s \n%s' % (fname_occupied_voxels,
fname_new_voxels)
plot_convergence(new_voxels, dcd_fnames, occupied_voxels,
output_prefix, show)
def calc_spatial_convergence_by_run(pdb_fname, dcd_fnames, output_prefix=None,
show=False, **kwargs):
assert len(sas_folders) == 1, ("ERROR: mode for examining multiple "
"folders not currently tested")
if not output_prefix:
output_prefix = pdb_fname[:4]
# initialize data sets
list_new_voxels = []
list_occupied_voxels = []
count_spatial_voxels(pdb_fname, dcd_fnames, list_new_voxels,
list_occupied_voxels, **kwargs)
n_structures = sum([len(new_voxels) for new_voxels in list_new_voxels])
new_voxels = numpy.empty((n_structures, len(dcd_fnames) + 1))
occupied_voxels = numpy.empty((n_structures, len(dcd_fnames) + 1))
new_voxels[:, 0] = numpy.arange(n_structures)
occupied_voxels[:, 0] = numpy.arange(n_structures)
for i in xrange(len(dcd_fnames)):
rows = list_new_voxels[i][:, 0] - 1
new_voxels[rows, i + 1] = list_new_voxels[i][:, 1]
occupied_voxels[rows, i + 1] = list_occupied_voxels[i][:, 1]
# create output text files
fname_occupied_voxels = output_prefix + '_occupied_voxels_by_run.npy'
fname_new_voxels = output_prefix + '_new_voxels_by_run.npy'
numpy.savetxt(fname_occupied_voxels, occupied_voxels)
numpy.savetxt(fname_new_voxels, new_voxels)
print 'output text files: \n%s \n%s' % (fname_occupied_voxels,
fname_new_voxels)
plot_convergence(new_voxels, dcd_fnames, occupied_voxels,
output_prefix, show)
def count_new_spatial_voxels(coors, voxel_set, delta):
number_new_voxels = 0
for coor in coors:
voxel_number = get_spatial_voxel_number(coor, delta)
if voxel_number not in voxel_set:
number_new_voxels += 1
voxel_set.add(voxel_number)
return number_new_voxels
def count_sas_grids(sas_folders, iq_all, n_q, n_spec, list_new_grids,
list_occupied_grids, granularity=int(1e3), iq_low=0,
iq_high=2):
den = float(iq_high - iq_low)
delta_i = 1.0 / granularity # using I(0) = 1 as the default
number_of_occupied_grids = 0
cwd = os.getcwd()
tic = time.time()
for (i_folder, this_folder) in enumerate(sas_folders):
logging.info('processing spec files from: {}\n'.format(this_folder))
output_prefix = os.path.join(cwd, this_folder, '{}_of_{}'.format(
i_folder + 1, len(sas_folders)))
output_new_grids = output_prefix + '_new_grids.npy'
output_occupied_grids = output_prefix + '_occupied_grids.npy'
try:
# try loading output from previous run
this_folder_new_grids = numpy.load(output_new_grids)
this_folder_occupied_grids = numpy.load(output_occupied_grids)
logging.info('Successfully loaded new voxels and occupied '
'voxels for {} from:\n{} \n{}'.format(
this_folder, output_new_grids,
output_occupied_grids))
except:
# calculate and create output
logging.info('Calculating convergence. Did not find output '
'files from previous calculation. Storing the output '
'to:\n%s \n%s' % (output_new_grids,
output_occupied_grids))
this_folder_new_grids = numpy.zeros(
(n_spec[i_folder], 2), dtype=int)
this_folder_new_grids[:, 0] = numpy.arange(n_spec[i_folder]) + 1
this_folder_occupied_grids = numpy.copy(this_folder_new_grids)
occupied_grids = {}
# convert I(Q) to bin number
binned_iqs = numpy.array(
(iq_all[i_folder] - 1.0) / delta_i, dtype=int)
for i_spec in xrange(n_spec[i_folder]):
number_of_new_grids = 0
for q in xrange(n_q):
grids_this_q = occupied_grids.get(q, {})
if not grids_this_q.get(binned_iqs[i_spec, q], 0):
grids_this_q[binned_iqs[i_spec, q]] = 1
number_of_new_grids += 1
occupied_grids[q] = grids_this_q
number_of_occupied_grids += number_of_new_grids
this_folder_occupied_grids[
i_spec, 1] = number_of_occupied_grids
this_folder_new_grids[i_spec, 1] = number_of_new_grids
# print "temporarily not saving output"
numpy.save(output_new_grids, this_folder_new_grids)
numpy.save(output_occupied_grids, this_folder_occupied_grids)
list_new_grids.append(this_folder_new_grids)
list_occupied_grids.append(this_folder_occupied_grids)
toc = time.time() - tic
logging.info("time used: {}".format(toc))
def old_count_sas_grids(sas_folders, iq_low, iq_high, iq_all, n_q, n_spec,
list_new_grids, list_occupied_grids, n_grids):
iq_low = numpy.array(iq_low).min(axis=0)
iq_high = numpy.array(iq_high).max(axis=0)
grid = numpy.zeros((n_q, n_grids + 1))
number_of_occupied_grids = 0
i_spec = 0
cwd = os.getcwd()
tic = time.time()
for (i_folder, this_folder) in enumerate(sas_folders):
print 'processing spec files from: %s\n' % this_folder
output_prefix = os.path.join(cwd, this_folder, '%d_of_%d' %
(i_folder + 1, len(sas_folders)))
output_new_grids = output_prefix + '_new_grids.npy'
output_occupied_grids = output_prefix + '_occupied_grids.npy'
try:
# try loading output from previous run
this_folder_new_grids = numpy.load(output_new_grids)
this_folder_occupied_grids = numpy.load(output_occupied_grids)
print('Successfully loaded new voxels and occupied voxels '
'for %s from:\n%s \n%s' % (this_folder,
output_new_grids,
output_occupied_grids))
except:
# calculate and create output
print('Calculating convergence. Did not find output files from '
'previous calculation. Storing the output to:\n%s \n%s' % (
output_new_grids,
output_occupied_grids))
this_folder_new_grids = numpy.zeros((n_spec[i_folder], 2),
dtype=int)
this_folder_occupied_grids = numpy.zeros((n_spec[i_folder], 2),
dtype=int)
for i_spec_folder in xrange(n_spec[i_folder]):
number_of_new_grids = 0
for q in xrange(n_q):
num = iq_all[i_folder][q, i_spec_folder] - iq_low[q]
den = iq_high[q] - iq_low[q]
try:
n = int(n_grids * (num / den))
except ValueError:
n = int(numpy.nan_to_num(n_grids * (num / den)))
if not grid[q, n]:
grid[q, n] = 1
number_of_new_grids += 1
number_of_occupied_grids += number_of_new_grids
this_folder_new_grids[i_spec_folder, :] = [
i_spec, number_of_new_grids]
this_folder_occupied_grids[i_spec_folder, :] = [
i_spec, number_of_occupied_grids]
i_spec += 1
numpy.save(output_new_grids, this_folder_new_grids)
numpy.save(output_occupied_grids, this_folder_occupied_grids)
list_new_grids.append(this_folder_new_grids)
list_occupied_grids.append(this_folder_occupied_grids)
toc = time.time() - tic
print "time used: ", toc
def count_spatial_voxels(pdb_fname, dcd_fnames, list_new_voxels,
list_occupied_voxels, voxel_size=5.0,
basis_filter=None, filter_label='', align_dcd=False,
**kwargs):
# initialize molecule and mask
mol = sasmol.SasMol(0)
mol.read_pdb(pdb_fname)
n_dcds = len(dcd_fnames)
cap_filter = '(name[i]=="CA" or name[i]=="P")'
if basis_filter:
error, mask = mol.get_subset_mask('%s and %s' % (
basis_filter, cap_filter))
else:
error, mask = mol.get_subset_mask(cap_filter)
assert not error, error
voxel_set = set([])
number_occupied_voxels = 0
tic = time.time()
for (i_dcd, dcd_fname) in enumerate(dcd_fnames):
print 'processing dcd: %s\n' % dcd_fname
dcd_output_prefix = '%s_%d_of_%d' % (dcd_fname[:-4], i_dcd + 1,
n_dcds)
output_new_voxels = '%s%s_new_voxels.npy' % (
dcd_output_prefix, filter_label)
output_occupied_voxels = '%s%s_occupied_voxels.npy' % (
dcd_output_prefix, filter_label)
try:
# try loading output from previous run
this_dcd_new_voxels = numpy.load(output_new_voxels)
this_dcd_occupied_voxels = numpy.load(output_occupied_voxels)
print('Successfully loaded new voxels and occupied voxels '
'for %s from:\n%s \n%s' % (dcd_fname,
output_new_voxels,
output_occupied_voxels))
except:
# calculate and create output
print('Calculating convergence. Did not find output files from '
'previous calculation. Storing the output to:\n%s \n%s' % (
output_new_voxels,
output_occupied_voxels))
if align_dcd:
inputs = AlignInputs(pdb_fname, dcd_fname, pdb_fname,
dcd_fname, **kwargs)
align(inputs)
dcd_file = mol.open_dcd_read(dcd_fname)
number_of_frames = dcd_file[2]
this_dcd_new_voxels = numpy.empty((number_of_frames, 2), dtype=int)
this_dcd_new_voxels[:, 0] = numpy.arange(number_of_frames) + 1
this_dcd_occupied_voxels = numpy.copy(this_dcd_new_voxels)
for nf in xrange(number_of_frames):
mol.read_dcd_step(dcd_file, nf)
error, coor = mol.get_coor_using_mask(0, mask)
assert not error, error
number_new_voxels = count_new_spatial_voxels(
coor[0], voxel_set, voxel_size)
number_occupied_voxels += number_new_voxels
this_dcd_occupied_voxels[nf, 1] = number_occupied_voxels
this_dcd_new_voxels[nf, 1] = number_new_voxels
numpy.save(output_new_voxels, this_dcd_new_voxels)
numpy.save(output_occupied_voxels, this_dcd_occupied_voxels)
list_new_voxels.append(this_dcd_new_voxels)
list_occupied_voxels.append(this_dcd_occupied_voxels)
toc = time.time() - tic
logging.info("time used: {}".format(toc))
def get_spatial_voxel_number(coor, delta):
idx = int(coor[0] / delta)
idy = int(coor[1] / delta)
idz = int(coor[2] / delta)
return (idx, idy, idz)
def load_iq(sas_folders, sas_ext, iq_all):
n_folders = len(sas_folders)
n_q = numpy.empty(n_folders, dtype=int)
n_spec = numpy.empty(n_folders, dtype=int)
cwd = os.getcwd()
for (i_folder, this_folder) in enumerate(sas_folders):
logging.info('loading spec files from: {}'.format(this_folder))
output_prefix = os.path.join(cwd, this_folder, '{}_of_{}'.format(
i_folder + 1, n_folders))
output_iq = output_prefix + '_iq.h5'
sas_search_path = os.path.join(cwd, this_folder, '*.' + sas_ext)
file_list = glob.glob(sas_search_path)
n_spec[i_folder] = len(file_list)
if n_spec[i_folder] < 1:
logging.info('No I(Q) files found in: {}'.format(sas_search_path))
else:
try:
# try loading iq_array from previous run
store = pandas.HDFStore(output_iq)
these_iqs_df = store['iq']
q_vals = store['q']
n_q[i_folder] = len(q_vals)
these_iqs = numpy.array(these_iqs_df)
logging.info(
'Successfully loaded iq_array for {} from:\n{}'.format(
this_folder, output_iq))
except:
logging.info(
'Loading in iq data from {}. Output stored to:\n{}'.format(
this_folder, output_iq))
file_list.sort()
ref_iq = numpy.loadtxt(file_list[0])
q_vals = pandas.Series(ref_iq[:, 0])
n_q[i_folder] = len(q_vals)
these_iqs = numpy.empty((n_spec[i_folder], n_q[i_folder]))
for (j, this_file) in enumerate(file_list):
this_iq = numpy.loadtxt(this_file)
if not numpy.all(0.0 == (this_iq[:, 0] - q_vals)):
logging.error(
'Q values do not match for iq file: {0}'.format(iq_file))
these_iqs[j] = this_iq[:, 1] / this_iq[0, 1] # I(0) = 1
these_iqs_df = pandas.DataFrame(these_iqs, columns=q_vals)
store['iq'] = these_iqs_df
store['q'] = q_vals
store.close()
iq_all.append(these_iqs)
assert n_q[i_folder] == n_q[0], (
'ERROR: inconsistent number of Q-grid points between spec '
'files in %s and %s' % (sas_folders[0], this_folder)
)
n_q = n_q[0]
return n_q, n_spec
def plot_convergence(new_voxels, dcd_fnames, occupied_voxels,
output_prefix, show=False, spatial=True):
fig = plt.figure(figsize=(6, 10))
gs = gridspec.GridSpec(2, 1, left=0.1, right=0.9, wspace=0, hspace=0)
ax = []
ax.append(plt.subplot(gs[0]))
ax.append(plt.subplot(gs[1]))
n_plots = new_voxels.shape[1] - 1
for i in xrange(n_plots):
if 1 < n_plots < 100:
label = dcd_fnames[i]
else:
label = ''
if i > 0:
# rows = (new_voxels[:, i+1] > 0)
ax[0].plot(new_voxels[1:, 0], new_voxels[1:, i + 1],
label=label)
else:
# rows = (new_voxels[:, i+1] > 0)[1:] # skip the initial frame
ax[0].plot(new_voxels[1:, 0], new_voxels[1:, i + 1],
label=label)
ax[0].xaxis.set_ticklabels([])
if n_plots > 1:
lg = ax[0].legend(bbox_to_anchor=(1, 1), loc=2)
# lg.draw_frame(False)
for i in xrange(n_plots):
if i > 0:
rows = (occupied_voxels[:, i + 1] > 0) # only plot non-zero values
ax[1].plot(occupied_voxels[rows, 0], occupied_voxels[rows, i + 1])
else:
rows = (
occupied_voxels[
:,
i +
1] > 0)[
1:] # skip the initial frame
ax[1].plot(occupied_voxels[rows, 0], occupied_voxels[rows, i + 1])
ax[1].set_xlabel('Structures')
ylim = ax[1].get_ylim()
ax[1].set_ylim((ylim[0], ylim[1] * 1.1))
if spatial:
ax[1].set_ylabel('Number of Occupied Voxels')
ax[0].set_ylabel('Number of New Voxels')
else:
ax[1].set_ylabel('Number of Occupied Grids')
ax[0].set_ylabel('Number of New Grids')
plot_name = output_prefix + '_convergence'
plot_name = os.path.join(os.getcwd(), plot_name)
plt.savefig(plot_name + '.eps', dpi=400, bbox_inches='tight')
plt.savefig(plot_name + '.png', dpi=400, bbox_inches='tight')
print 'Saving figure to: \nevince %s.eps &\neog %s.png &' % (plot_name,
plot_name)
if show:
plt.show()
else:
plt.close('all')
if __name__ == '__main__':
import sys
mol = sasmol.SasMol(0)
if len(sys.argv) < 3:
mol.read_pdb('min_dsDNA60.pdb')
# mol.read_dcd('run3_100k_ngb/monte_carlo/min_dsDNA60.dcd')
dcd_full_name = 'run3_100k_ngb/monte_carlo/min_dsDNA60_sparse.dcd'
else:
mol.read_pdb(sys.argv[1])
dcd_full_name = sys.argv[2]
voxel_set = set([])
delta = 5.0
list_number_new_voxels = []
list_number_occupied_voxels = []
number_occupied_voxels = 0
error, mask = mol.get_subset_mask('name[i]=="CA" or name[i]=="P"')
dcd_file = mol.open_dcd_read(dcd_full_name)
number_of_frames = dcd_file[2]
tic = time.time()
output_file = "number_of_occupied_voxels.txt"
fout = open(output_file, 'w')
fout.write("#frame_number, number_of_occupied_voxels\n")
for nf in xrange(number_of_frames):
mol.read_dcd_step(dcd_file, nf + 1)
error, coors = mol.get_coor_using_mask(0, mask)
assert not error, error
number_new_voxels = count_new_spatial_voxels(
coors[0], voxel_set, delta)
number_occupied_voxels += number_new_voxels
list_number_new_voxels.append(number_new_voxels)
list_number_occupied_voxels.append(number_occupied_voxels)
fout.write("%d %d\n" % (nf, number_occupied_voxels))
fout.close()
toc = time.time() - tic
print "\ntime used: ", toc
fig = plt.figure(figsize=(6, 6))
gs = gridspec.GridSpec(2, 1, left=0.2, right=0.95, wspace=0, hspace=0)
ax = []
ax.append(plt.subplot(gs[0]))
ax.append(plt.subplot(gs[1]))
ax[0].plot(range(len(list_number_new_voxels)), list_number_new_voxels)
ax[0].set_xlabel('Structure')
ax[0].set_ylabel('number of new voxels')
ax[0].set_yscale('log') # lim([0, max(list_number_new_voxels)*1.05])
ax[0].xaxis.set_ticklabels([])
ax[1].plot(
range(
len(list_number_occupied_voxels)),
list_number_occupied_voxels)
ax[1].set_xlabel('Structure')
ax[1].set_ylabel('number of occupied voxels')
ylim = ax[1].get_ylim()
ax[1].set_ylim((ylim[0], ylim[1] * 1.1))
plt.savefig('metric_convergence.eps', dpi=400, bbox_inches='tight')
plt.savefig('metric_convergence.png', dpi=400, bbox_inches='tight')
plt.show()
print '\m/ >.< \m/'
| gpl-3.0 |
Simclass/EDXD_Analysis | bin/data_creation.py | 3 | 3978 | import numpy as np
import matplotlib.pyplot as plt
import time
from pyxe.williams import sigma_xx, sigma_yy, sigma_xy, cart2pol
from pyxe.fitting_functions import strain_transformation, shear_transformation
def plane_strain_s2e(sigma_xx, sigma_yy, sigma_xy, E, v, G=None):
if G is None:
G = E / (2 * (1 - v))
e_xx = (1 / E) * (sigma_xx - v*sigma_yy)
e_yy = (1 / E) * (sigma_yy - v*sigma_xx)
e_xy = sigma_xy / G
return e_xx, e_yy, e_xy
class StrainField(object):
def __init__(self, x, y, K, E, v, G=None, state='plane strain'):
self.K = K
self.x = x
self.y = y
self.r, self.theta = cart2pol(x, y)
self.sig_xx = sigma_xx(self.K, self.r, self.theta)
self.sig_yy = sigma_yy(self.K, self.r, self.theta)
self.sig_xy = sigma_xy(self.K, self.r, self.theta)
sigma_comp = self.sig_xx, self.sig_yy, self.sig_xy
stress2strain = plane_strain_s2e if state == 'plane strain' else None
data = stress2strain(*sigma_comp, E, v, G)
self.e_xx, self.e_yy, self.e_xy = data
def extract_strain_map(self, phi=np.pi/2, shear=False):
trans = strain_transformation if not shear else shear_transformation
e = trans(phi, self.e_xx, self.e_yy, self.e_xy)
return e
def plot_strain_map(self, phi=np.pi/2, shear=False):
e = self.extract_strain_map(phi, shear)
plt.contourf(self.x, self.y, e, 21)
plt.show()
def extract_stress_map(self, phi=np.pi/2, shear=False):
trans = strain_transformation if not shear else shear_transformation
sig = trans(phi, self.sig_xx, self.sig_yy, self.sig_xy)
return sig
def plot_stress_map(self, phi=np.pi/2, shear=False):
sig = self.extract_stress_map(phi, shear)
plt.contourf(self.x, self.y, sig, 21)
plt.show()
def extract_strain_array(self, phi):
"""
Add valus for phi
:param phi:
:return:
"""
strain = np.nan * np.ones((self.x.shape + (1,) + phi.shape))
for idx, tt in enumerate(phi):
e_xx1 = strain_transformation(tt, self.e_xx, self.e_yy, self.e_xy)
strain[:, :, 0, idx] = e_xx1
return strain
def create_nxs_shell(x, y, phi):
group = None
ss2_x = x
ss2_y = y
ss2_x = None
scan_command = [b'ss2_x', b'ss2_y']
phi = phi
q = 0
I = 0
# create nxs
# h5py.save
# load nxs and fill with data
def add_strain_field(data, K, E, v, G=None, state='plane strain'):
crack_field = StrainField(data.ss2_x, data.ss2_y, K, E, v, G, state)
data.strain = crack_field.extract_strain_array(data.phi)
data.strain_err = np.zeros_like(data.strain)
return crack_field
x = np.linspace(-0.5, 1, 100)
y = np.linspace(-0.75, 0.75, 100)
X, Y = np.meshgrid(x, y)
data = StrainField(X, Y, 20*10**6, 200*10**9, 0.3)
data.create_nxs(np.linspace(0, np.pi, 10))
#sigma_array = np.nan * np.ones((y.size, x.size, 1, n_phi))
#for idx, tt in enumerate(np.linspace(0, np.pi, n_phi)):
# sigma_array[:, :, 0, idx] = strain_transformation(tt, *(sig_xx, sig_yy, sig_xy))
#e_xx, e_yy, e_xy = plane_strain_s2e(sig_xx, sig_yy, sig_xy, 200 * 10 **9, 0.3)
#strain_array = np.nan * np.ones((y.size, x.size, 1, n_phi))
#for idx, tt in enumerate(np.linspace(0, np.pi, n_phi)):
# e_xx1 = strain_transformation(tt, *(e_xx, e_yy, e_xy))
# strain_array[:, :, 0, idx] = e_xx1
# plt.figure()
# e_xx1[e_xx1>0.004]=0.004
# e_xx1[e_xx1 < -0.001] = -0.001
# plt.contourf(X, Y, e_xx1, np.linspace(-0.001, 0.004, 25))
# plt.colorbar()
# plt.contour(X, Y, e_xx1, np.linspace(-0.001, 0.004, 25), colors = 'k', linewidths=0.4, aplha=0.3)
# plt.savefig(r'C:\Users\lbq76018\Documents\Python Scripts\pyxe_fake\%03d.png' % idx)
#plt.show()
#plt.figure()
#c = plt.contourf(X, Y, sig_yy, 25)
#plt.colorbar()
#plt.figure()
#c = plt.contourf(X, Y, e_yy, 25)
#plt.colorbar()
#plt.show()
#print(sigma_array)
| mit |
jwlockhart/concept-networks | examples/draw_tripartite.py | 1 | 3581 | # @author Jeff Lockhart <[email protected]>
# Script for drawing the tripartite network underlying analysis.
# version 1.0
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import sys
#add the parent directory to the current session's path
sys.path.insert(0, '../')
from network_utils import *
#read our cleaned up data
df = pd.read_csv('../data/sgm_stud/merged.tsv', sep='\t')
#The list of codes we're interested in.
code_cols = ['culture_problem',
#'culture_absent',
'culture_solution',
'culture_helpless',
'culture_victim',
'cishet_problem',
'cishet_victim',
'cishet_solution',
#'cishet_absent',
'cishet_helpless',
'sgm_victim',
'sgm_problem',
'sgm_helpless',
#'sgm_absent',
'sgm_solution',
'school_problem',
'school_solution',
#'school_absent',
'school_victim',
'school_helpless',
'community_problem',
'community_solution',
'community_helpless',
#'community_absent',
'community_victim']
#generate unique ID keys for each student and excerpt
def s_id(row):
return row['uni'] + str(row['Participant'])
def e_id(row):
return row['s_id'] + '-' + str(row['Start'])
df['s_id'] = df.apply(s_id, axis=1)
df['e_id'] = df.apply(e_id, axis=1)
#make a graph
g = nx.Graph()
#add all of our codes as nodes
for c in code_cols:
g.add_node(c, t='code')
#add each excerpt of text as a node. Connect it with relevant
#students and codes.
st = []
ex = []
last = ''
for row in df.iterrows():
#add the student node
g.add_node(row[1]['s_id'], t='student')
#if we haven't seen this student before, save the order we saw them in
if last != row[1]['s_id']:
last = row[1]['s_id']
st.append(last)
#add this excerpt node. Save its order to our list.
g.add_node(row[1]['e_id'], t='excerpt')
ex.append(row[1]['e_id'])
#add the edge joining this student and excerpt.
g.add_edge(row[1]['s_id'], row[1]['e_id'])
#for each code this excerpt has, draw an edge to it
for c in code_cols:
if row[1][c]:
g.add_edge(row[1]['e_id'], c)
#get a dictionary of our code nodes' labels
l = {}
for c in code_cols:
l[c] = c
#fix the positions of each node type in columns
pos = dict()
#space out the student and code nodes to align with excerpt column height
pos.update( (n, (1, i*5.57)) for i, n in enumerate(st) )
pos.update( (n, (2, i)) for i, n in enumerate(ex) )
pos.update( (n, (3, i*90)) for i, n in enumerate(code_cols) )
#make our figure big so we can see
plt.figure(figsize=(20,20))
#draw our nodes
nx.draw_networkx_nodes(g, pos, nodelist=st, node_color='r',
node_shape='^')
nx.draw_networkx_nodes(g, pos, nodelist=ex, node_color='b',
node_shape='o', alpha=0.5)
#draw our edges with low alpha so we can see
nx.draw_networkx_edges(g, pos, alpha=0.2)
#axes look silly
plt.axis('off')
#save the edges and nodes as one image
plt.savefig('../data/tripartite_unlabeled.png')
#save the labels for the codes as a different image
#this lets me edit them in with GIMP so that they're better positioned.
plt.figure(figsize=(20,20))
nx.draw_networkx_labels(g, pos, labels=l, font_size=20)
nx.draw_networkx_edges(g, pos, alpha=0)
plt.axis('off')
plt.savefig('../data/tripartite_labeles.png')
| gpl-3.0 |
caperren/Archives | OSU Coursework/ROB 456 - Intelligent Robotics/Homework 4 - A Star Pathfinding/hw4.py | 1 | 7720 | import csv
from matplotlib import pyplot, patches
from math import sqrt
from heapq import *
CSV_PATH = "world.csv"
VAL_TO_COLOR = {
0: "green",
1: "red",
-1: "blue"
}
EDGE_COST = 1
START_POSITION = (0, 0)
END_POSITION = (19, 19)
def import_csv_as_array(csv_path):
csv_file = open(csv_path, "rU") # Open the file
csv_reader = csv.reader(csv_file) # Put it through the csv reader
# Loop through the csv lines and append them to an array
output_array = []
for line in csv_reader:
output_array.append([int(col_val) for col_val in line])
# Delete the csv reader and close the file
del csv_reader
csv_file.close()
# Return our world map array
return output_array
def plot_grid_map(grid_map, fig_save_path=None):
# Make the plot
figure_object, axes_object = pyplot.subplots()
# Plot appropriately colored rectangles for each point on the map
for y, row in enumerate(grid_map):
for x, col in enumerate(row):
axes_object.add_patch(patches.Rectangle((x, y), 1, 1, fill=True, color=VAL_TO_COLOR[col]))
# Plot some x and y dotted lines to make it nicer to view the underlying grid
for y in range(len(grid_map)):
axes_object.plot([0, len(grid_map[0])], [y, y], color="black", alpha=0.75, linestyle=":")
for x in range(len(grid_map[0])):
axes_object.plot([x, x], [0, len(grid_map)], color="black", alpha=0.75, linestyle=":")
# Set the y limit from len(grid_map) to 0 so it matches how the file looks in terms of the map
axes_object.set_ylim([len(grid_map), 0])
axes_object.autoscale(enable=True, tight=True)
# If the optional argument to save to a file is added, output that file
if fig_save_path:
figure_object.savefig(fig_save_path, bbox_inches="tight")
# Show the plot
pyplot.show()
class AStarSolver(object):
# Directions to be used for children
VALID_DIRECTIONS = \
[
[1, 0], # E
[0, 1], # N
[-1, 0], # W
[0, -1], # S
]
def __init__(self, world, start_position, end_position):
# Initialize all the class variables
self.world_map = world
self.world_limit_x = len(self.world_map[0])
self.world_limit_y = len(self.world_map)
self.start_position = start_position
self.end_position = end_position
self.open_set = []
self.closed_set = []
self.g_scores = {}
self.f_scores = {}
self.travel_path = {}
self.final_path = []
self.solution_map = list(self.world_map)
@staticmethod
def heuristic(start_point, end_point):
# Calculate the heuristic from point a to point b using the pythagorean theorem
delta_x = abs(end_point[0] - start_point[0])
delta_y = abs(end_point[1] - start_point[1])
return sqrt(pow(delta_x, 2) + pow(delta_y, 2))
def solve_path(self):
# Add the starting node, plus it's initial f_cost
self.g_scores[self.start_position] = 0
self.f_scores[self.start_position] = self.heuristic(self.start_position, self.end_position)
# Put the starting node into the open set as (f_score, position)
# It needs to be in this form for heap sorting by f_score
heappush(self.open_set, (self.f_scores[self.start_position], self.start_position))
while self.open_set:
# Pop off the most recent node in open set with the lowest f_score
current_node = heappop(self.open_set)
# Extract the current position from the node
current_position = current_node[1]
# If we've reached the end, break so we can compute the final path
if current_position == self.end_position:
break
# Now that we've reached this node, add it to the closed set
self.closed_set.append(current_position)
# Loop through the cardinal directions we can move to
for delta_x, delta_y in self.VALID_DIRECTIONS:
# Computer the child position based on the cardinal direction and teh current position
child_position = (current_position[0] + delta_x, current_position[1] + delta_y)
# Compute the child's g_score with an edge cost of 1
child_g_score = self.g_scores[current_position] + EDGE_COST
# Check if location is in the world
valid_x_limit = 0 <= child_position[0] < self.world_limit_x
valid_y_limit = 0 <= child_position[1] < self.world_limit_y
# If it's in the world, make sure the child location is not an obstacle
valid_not_obstacle = None
if valid_x_limit and valid_y_limit:
valid_not_obstacle = self.world_map[child_position[1]][child_position[0]] != 1
# If the child is in a valid location and not an obstacle:
if valid_x_limit and valid_y_limit and valid_not_obstacle:
# Skip to the next child if we've already seen this node and the current path is more costly than
# what we've seen previously
if child_position in self.closed_set and child_g_score >= self.g_scores.get(child_position, 0):
continue
# Get a list of all positions in our open set
open_set_positions = [x[1] for x in self.open_set]
# If the score is better than what we've seen, or if we've never seen this node before, add the node
# to our open set and add this as a potential path
if child_g_score < self.g_scores.get(child_position, 0) or child_position not in open_set_positions:
self.travel_path[child_position] = current_position # Add this jump to the travel path
self.g_scores[child_position] = child_g_score # Sets the new g_score
self.f_scores[child_position] = \
child_g_score + self.heuristic(child_position, self.end_position) # Sets the new f_score
heappush(self.open_set, (self.f_scores[child_position], child_position)) # Add to open set
# Work our way backwards from the end to find the proper path
final_path = [self.end_position] # Add our last hop manually so the loop below can include our start position
current_position = self.end_position # Set the current position to the end
while current_position != self.start_position: # Keep looping until we've reached the beginning of the path
current_position = self.travel_path[current_position] # Update the current to the last path location
final_path.append(current_position) # Append this location to our final array
self.final_path = final_path[::-1] # Now that we've found the path, reverse it so it's in order
# This applies modifications to the world map with the solution so you can see the path when plotting
for x, y in self.final_path:
self.solution_map[y][x] = -1
def get_solution_map(self):
# Gives us the solution map once we've found a solution
return self.solution_map
if __name__ == '__main__':
world_map = import_csv_as_array(CSV_PATH) # Import the map
solver = AStarSolver(world_map, START_POSITION, END_POSITION) # Initialize the solver
solver.solve_path() # Solve the path
solution_map = solver.get_solution_map() # Retrieve the solution map
plot_grid_map(solution_map, "final_path.pdf") # Plot and save the solution
| gpl-3.0 |
thirdwing/mxnet | python/mxnet/model.py | 4 | 39905 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = True
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device is 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names,
update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution latter
w, g = p
updater(index*num_device+k, g, w)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(optimizer)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
return
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**dict(input_shapes))
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **dict(input_shapes))
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
else:
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and not '_async' in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
| apache-2.0 |
LaDO-IOUSP/Curious | Python/pizzaplot.py | 1 | 1687 | # -*- coding: UTF-8 -*-
import matplotlib.pyplot as plt
from matplotlib.patches import Wedge
def pizzaplot(center, radius, angle=0,nb=2, ax=None, colors=[],**kwargs):
''' Plots circle with inputed number of divisions with different colors(multicolor scatter).
===========================================================================
Input :
-> center: center(x,y) of the scatter
-> radius: radius of the circle (float)
-> angle: angle of rotation of the color division (degrees [0...360])
-> nb: number of colors in the same plot (float)
-> colors: colors to fill the scatter (list)
===========================================================================
Output:
-> Returns Matplotlib Patch & plots the scatter
===========================================================================
Python version by:
Hélio Almeida ([email protected])
Dante Campagnoli Napolitano ([email protected])
@ LaDO-IOUSP in 11/01/2017
'''
w= []
if len(colors)!=nb:
raise ValueError('Number of colors and parts of scatter must be the same')
if ax is None:
ax = plt.gca()
for i in np.arange(1,nb+1):
exec('theta%s = angle+%i/%f*360.'%(str(i),i,float(nb)))
for i in np.arange(1,nb+1):
if i==nb:
exec('w%s = Wedge(center, radius, theta%i, theta%i, fc=colors[%i], **kwargs)'%(str(i),i,1,i-1))
else:
exec('w%s = Wedge(center, radius, theta%i, theta%i, fc=colors[%i], **kwargs)'%(str(i),i,i+1,i-1))
exec('ax.add_artist(w%i)'%i)
exec('w.append(w%i)'%i)
return w
| mit |
belteshassar/cartopy | lib/cartopy/tests/test_polygon.py | 3 | 17387 | # (C) British Crown Copyright 2011 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import unittest
import numpy as np
import shapely.geometry as sgeom
import shapely.wkt
import cartopy.crs as ccrs
class TestBoundary(unittest.TestCase):
def test_no_polygon_boundary_reversal(self):
# Check that polygons preserve their clockwise or counter-clockwise
# ordering when they are attached to the boundary.
# Failure to do so will result in invalid polygons (their boundaries
# cross-over).
polygon = sgeom.Polygon([(-10, 30), (10, 60), (10, 50)])
projection = ccrs.Robinson(170.5)
multi_polygon = projection.project_geometry(polygon)
for polygon in multi_polygon:
self.assertTrue(polygon.is_valid)
def test_polygon_boundary_attachment(self):
# Check the polygon is attached to the boundary even when no
# intermediate point for one of the crossing segments would normally
# exist.
polygon = sgeom.Polygon([(-10, 30), (10, 60), (10, 50)])
projection = ccrs.Robinson(170.6)
# This will raise an exception if the polygon/boundary intersection
# fails.
multi_polygon = projection.project_geometry(polygon)
def test_out_of_bounds(self):
# Check that a polygon that is completely out of the map boundary
# doesn't produce an empty result.
projection = ccrs.TransverseMercator(central_longitude=0)
polys = [
# All valid
([(86, -1), (86, 1), (88, 1), (88, -1)], 1),
# One out of backwards projection range
([(86, -1), (86, 1), (130, 1), (88, -1)], 1),
# An out of backwards projection range segment
([(86, -1), (86, 1), (130, 1), (130, -1)], 1),
# All out of backwards projection range
([(120, -1), (120, 1), (130, 1), (130, -1)], 0),
]
# Try all four combinations of valid/NaN vs valid/NaN.
for coords, expected_polys in polys:
polygon = sgeom.Polygon(coords)
multi_polygon = projection.project_geometry(polygon)
self.assertEqual(len(multi_polygon), expected_polys)
class TestMisc(unittest.TestCase):
def test_misc(self):
projection = ccrs.TransverseMercator(central_longitude=-90)
polygon = sgeom.Polygon([(-10, 30), (10, 60), (10, 50)])
multi_polygon = projection.project_geometry(polygon)
def test_small(self):
projection = ccrs.Mercator()
polygon = sgeom.Polygon([
(-179.7933201090486079, -16.0208822567412312),
(-180.0000000000000000, -16.0671326636424396),
(-179.9173693847652942, -16.5017831356493616),
])
multi_polygon = projection.project_geometry(polygon)
self.assertEqual(len(multi_polygon), 1)
self.assertEqual(len(multi_polygon[0].exterior.coords), 4)
def test_former_infloop_case(self):
# test a polygon which used to get stuck in an infinite loop
# see https://github.com/SciTools/cartopy/issues/60
coords = [(260.625, 68.90383337092122), (360.0, 79.8556091996901),
(360.0, 77.76848175458498), (0.0, 88.79068047337279),
(210.0, 90.0), (135.0, 88.79068047337279),
(260.625, 68.90383337092122)]
geom = sgeom.Polygon(coords)
target_projection = ccrs.PlateCarree()
source_crs = ccrs.Geodetic()
multi_polygon = target_projection.project_geometry(geom, source_crs)
# check the result is non-empty
self.assertFalse(multi_polygon.is_empty)
def test_project_previous_infinite_loop(self):
mstring1 = shapely.wkt.loads(
'MULTILINESTRING ('
'(-179.9999990464349651 -80.2000000000000171, '
'-179.5000000001111005 -80.2000000000000171, '
'-179.5000000001111005 -79.9000000000000199, '
'-179.9999995232739138 -79.9499999523163041, '
'-179.8000000001110550 -80.0000000000000000, '
'-179.8000000001110550 -80.0999999999999943, '
'-179.9999999047436177 -80.0999999999999943), '
'(179.9999995231628702 -79.9499999523163041, '
'179.5000000000000000 -79.9000000000000199, '
'179.5000000000000000 -80.0000000000000000, '
'179.9999995231628702 -80.0499999523162842, '
'179.5000000000000000 -80.0999999999999943, '
'179.5000000000000000 -80.2000000000000171, '
'179.9999990463256836 -80.2000000000000171))')
mstring2 = shapely.wkt.loads(
'MULTILINESTRING ('
'(179.9999996185302678 -79.9999999904632659, '
'179.5999999999999943 -79.9899999999999949, '
'179.5999999999999943 -79.9399999999999977, '
'179.9999996185302678 -79.9599999809265114), '
'(-179.9999999047436177 -79.9600000000000080, '
'-179.9000000001110777 -79.9600000000000080, '
'-179.9000000001110777 -80.0000000000000000, '
'-179.9999999047436177 -80.0000000000000000))')
multi_line_strings = [mstring1, mstring2]
src = ccrs.PlateCarree()
src._attach_lines_to_boundary(multi_line_strings, True)
def test_3pt_poly(self):
projection = ccrs.OSGB()
polygon = sgeom.Polygon([(-1000, -1000),
(-1000, 200000),
(200000, -1000)])
multi_polygon = projection.project_geometry(polygon, ccrs.OSGB())
self.assertEqual(len(multi_polygon), 1)
self.assertEqual(len(multi_polygon[0].exterior.coords), 4)
def test_self_intersecting_1(self):
# Geometry comes from a matplotlib contourf (see #537)
wkt = ('POLYGON ((366.22000122 -9.71489298, '
'366.73212393 -9.679999349999999, '
'366.77412634 -8.767753000000001, '
'366.17762962 -9.679999349999999, '
'366.22000122 -9.71489298), '
'(366.22000122 -9.692636309999999, '
'366.32998657 -9.603356099999999, '
'366.74765799 -9.019999500000001, '
'366.5094086 -9.63175386, '
'366.22000122 -9.692636309999999))')
geom = shapely.wkt.loads(wkt)
source, target = ccrs.RotatedPole(198.0, 39.25), ccrs.EuroPP()
projected = target.project_geometry(geom, source)
# Before handling self intersecting interiors, the area would be
# approximately 13262233761329.
area = projected.area
self.assertTrue(2.2e9 < area < 2.3e9,
msg='Got area {}, expecting ~2.2e9'.format(area))
def test_self_intersecting_2(self):
# Geometry comes from a matplotlib contourf (see #509)
wkt = ('POLYGON ((343 20, 345 23, 342 25, 343 22, '
'340 25, 341 25, 340 25, 343 20), (343 21, '
'343 22, 344 23, 343 21))')
geom = shapely.wkt.loads(wkt)
source = target = ccrs.RotatedPole(193.0, 41.0)
projected = target.project_geometry(geom, source)
# Before handling self intersecting interiors, the area would be
# approximately 64808.
self.assertTrue(7.9 < projected.area < 8.1)
def test_tiny_point_between_boundary_points(self):
# Geometry comes from #259.
target = ccrs.Orthographic(0, -75)
source = ccrs.PlateCarree()
wkt = 'POLYGON ((132 -40, 133 -6, 125.3 1, 115 -6, 132 -40))'
geom = shapely.wkt.loads(wkt)
target = ccrs.Orthographic(central_latitude=90., central_longitude=0)
source = ccrs.PlateCarree()
projected = target.project_geometry(geom, source)
area = projected.area
# Before fixing, this geometry used to fill the whole disk. Approx
# 1.2e14.
self.assertTrue(81330 < area < 81340,
msg='Got area {}, expecting ~81336'.format(area))
class TestQuality(unittest.TestCase):
def setUp(self):
projection = ccrs.RotatedPole(pole_longitude=177.5,
pole_latitude=37.5)
polygon = sgeom.Polygon([
(177.5, -57.38460319),
(180.0, -57.445077),
(175.0, -57.19913331),
])
self.multi_polygon = projection.project_geometry(polygon)
# from cartopy.tests.mpl import show
# show(projection, self.multi_polygon)
def test_split(self):
# Start simple ... there should be two projected polygons.
self.assertEqual(len(self.multi_polygon), 2)
def test_repeats(self):
# Make sure we don't have repeated points at the boundary, because
# they mess up the linear extrapolation to the boundary.
# Make sure there aren't any repeated points.
xy = np.array(self.multi_polygon[0].exterior.coords)
same = (xy[1:] == xy[:-1]).all(axis=1)
self.assertFalse(any(same), 'Repeated points in projected geometry.')
def test_symmetry(self):
# Make sure the number of points added on the way towards the
# boundary is similar to the number of points added on the way away
# from the boundary.
# Identify all the contiguous sets of non-boundary points.
xy = np.array(self.multi_polygon[0].exterior.coords)
boundary = np.logical_or(xy[:, 1] == 90, xy[:, 1] == -90)
regions = (boundary[1:] != boundary[:-1]).cumsum()
regions = np.insert(regions, 0, 0)
# For each region, check if the number of increasing steps is roughly
# equal to the number of decreasing steps.
for i in range(boundary[0], regions.max(), 2):
indices = np.where(regions == i)
x = xy[indices, 0]
delta = np.diff(x)
num_incr = np.count_nonzero(delta > 0)
num_decr = np.count_nonzero(delta < 0)
self.assertLess(abs(num_incr - num_decr), 3,
'Too much asymmetry.')
class PolygonTests(unittest.TestCase):
def _assert_bounds(self, bounds, x1, y1, x2, y2, delta=1):
self.assertAlmostEqual(bounds[0], x1, delta=delta)
self.assertAlmostEqual(bounds[1], y1, delta=delta)
self.assertAlmostEqual(bounds[2], x2, delta=delta)
self.assertAlmostEqual(bounds[3], y2, delta=delta)
class TestWrap(PolygonTests):
# Test that Plate Carree projection "does the right thing"(tm) with
# source data tha extends outside the [-180, 180] range.
def test_plate_carree_no_wrap(self):
proj = ccrs.PlateCarree()
poly = sgeom.box(0, 0, 10, 10)
multi_polygon = proj.project_geometry(poly, proj)
# Check the structure
self.assertEqual(len(multi_polygon), 1)
# Check the rough shape
polygon = multi_polygon[0]
self._assert_bounds(polygon.bounds, 0, 0, 10, 10)
def test_plate_carree_partial_wrap(self):
proj = ccrs.PlateCarree()
poly = sgeom.box(170, 0, 190, 10)
multi_polygon = proj.project_geometry(poly, proj)
# Check the structure
self.assertEqual(len(multi_polygon), 2)
# Check the rough shape
polygon = multi_polygon[0]
self._assert_bounds(polygon.bounds, 170, 0, 180, 10)
polygon = multi_polygon[1]
self._assert_bounds(polygon.bounds, -180, 0, -170, 10)
def test_plate_carree_wrap(self):
proj = ccrs.PlateCarree()
poly = sgeom.box(200, 0, 220, 10)
multi_polygon = proj.project_geometry(poly, proj)
# Check the structure
self.assertEqual(len(multi_polygon), 1)
# Check the rough shape
polygon = multi_polygon[0]
self._assert_bounds(polygon.bounds, -160, 0, -140, 10)
def ring(minx, miny, maxx, maxy, ccw):
box = sgeom.box(minx, miny, maxx, maxy, ccw)
return np.array(box.exterior.coords)
class TestHoles(PolygonTests):
def test_simple(self):
proj = ccrs.PlateCarree()
poly = sgeom.Polygon(ring(-40, -40, 40, 40, True),
[ring(-20, -20, 20, 20, False)])
multi_polygon = proj.project_geometry(poly)
# Check the structure
self.assertEqual(len(multi_polygon), 1)
self.assertEqual(len(multi_polygon[0].interiors), 1)
# Check the rough shape
polygon = multi_polygon[0]
self._assert_bounds(polygon.bounds, -40, -47, 40, 47)
self._assert_bounds(polygon.interiors[0].bounds, -20, -21, 20, 21)
def test_wrapped_poly_simple_hole(self):
proj = ccrs.PlateCarree(-150)
poly = sgeom.Polygon(ring(-40, -40, 40, 40, True),
[ring(-20, -20, 20, 20, False)])
multi_polygon = proj.project_geometry(poly)
# Check the structure
self.assertEqual(len(multi_polygon), 2)
self.assertEqual(len(multi_polygon[0].interiors), 1)
self.assertEqual(len(multi_polygon[1].interiors), 0)
# Check the rough shape
polygon = multi_polygon[0]
self._assert_bounds(polygon.bounds, 110, -47, 180, 47)
self._assert_bounds(polygon.interiors[0].bounds, 130, -21, 170, 21)
polygon = multi_polygon[1]
self._assert_bounds(polygon.bounds, -180, -43, -170, 43)
def test_wrapped_poly_wrapped_hole(self):
proj = ccrs.PlateCarree(-180)
poly = sgeom.Polygon(ring(-40, -40, 40, 40, True),
[ring(-20, -20, 20, 20, False)])
multi_polygon = proj.project_geometry(poly)
# Check the structure
self.assertEqual(len(multi_polygon), 2)
self.assertEqual(len(multi_polygon[0].interiors), 0)
self.assertEqual(len(multi_polygon[1].interiors), 0)
# Check the rough shape
polygon = multi_polygon[0]
self._assert_bounds(polygon.bounds, 140, -47, 180, 47)
polygon = multi_polygon[1]
self._assert_bounds(polygon.bounds, -180, -47, -140, 47)
def test_inverted_poly_simple_hole(self):
proj = ccrs.NorthPolarStereo()
poly = sgeom.Polygon([(0, 0), (-90, 0), (-180, 0), (-270, 0)],
[[(0, -30), (90, -30), (180, -30), (270, -30)]])
multi_polygon = proj.project_geometry(poly)
# Check the structure
self.assertEqual(len(multi_polygon), 1)
self.assertEqual(len(multi_polygon[0].interiors), 1)
# Check the rough shape
polygon = multi_polygon[0]
self._assert_bounds(polygon.bounds, -2.4e7, -2.4e7, 2.4e7, 2.4e7, 1e6)
self._assert_bounds(polygon.interiors[0].bounds,
- 1.2e7, -1.2e7, 1.2e7, 1.2e7, 1e6)
def test_inverted_poly_clipped_hole(self):
proj = ccrs.NorthPolarStereo()
poly = sgeom.Polygon([(0, 0), (-90, 0), (-180, 0), (-270, 0)],
[[(-135, -60), (-45, -60),
(45, -60), (135, -60)]])
multi_polygon = proj.project_geometry(poly)
# Check the structure
self.assertEqual(len(multi_polygon), 1)
self.assertEqual(len(multi_polygon[0].interiors), 1)
# Check the rough shape
polygon = multi_polygon[0]
self._assert_bounds(polygon.bounds, -5.0e7, -5.0e7, 5.0e7, 5.0e7, 1e6)
self._assert_bounds(polygon.interiors[0].bounds,
- 1.2e7, -1.2e7, 1.2e7, 1.2e7, 1e6)
self.assertAlmostEqual(polygon.area, 7.30e15, delta=1e13)
def test_inverted_poly_removed_hole(self):
proj = ccrs.NorthPolarStereo(globe=ccrs.Globe(ellipse='WGS84'))
poly = sgeom.Polygon([(0, 0), (-90, 0), (-180, 0), (-270, 0)],
[[(-135, -75), (-45, -75),
(45, -75), (135, -75)]])
multi_polygon = proj.project_geometry(poly)
# Check the structure
self.assertEqual(len(multi_polygon), 1)
self.assertEqual(len(multi_polygon[0].interiors), 1)
# Check the rough shape
polygon = multi_polygon[0]
self._assert_bounds(polygon.bounds, -5.0e7, -5.0e7, 5.0e7, 5.0e7, 1e6)
self._assert_bounds(polygon.interiors[0].bounds,
- 1.2e7, -1.2e7, 1.2e7, 1.2e7, 1e6)
self.assertAlmostEqual(polygon.area, 7.34e15, delta=1e13)
def test_multiple_interiors(self):
exterior = ring(0, 0, 12, 12, True)
interiors = [ring(1, 1, 2, 2, False), ring(1, 8, 2, 9, False)]
poly = sgeom.Polygon(exterior, interiors)
target = ccrs.PlateCarree()
source = ccrs.Geodetic()
assert len(list(target.project_geometry(poly, source))) == 1
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
bundgus/python-playground | matplotlib-playground/examples/pylab_examples/boxplot_demo.py | 2 | 1288 | import matplotlib.pyplot as plt
import numpy as np
# fake up some data
spread = np.random.rand(50) * 100
center = np.ones(25) * 50
flier_high = np.random.rand(10) * 100 + 100
flier_low = np.random.rand(10) * -100
data = np.concatenate((spread, center, flier_high, flier_low), 0)
# basic plot
plt.boxplot(data)
# notched plot
plt.figure()
plt.boxplot(data, 1)
# change outlier point symbols
plt.figure()
plt.boxplot(data, 0, 'gD')
# don't show outlier points
plt.figure()
plt.boxplot(data, 0, '')
# horizontal boxes
plt.figure()
plt.boxplot(data, 0, 'rs', 0)
# change whisker length
plt.figure()
plt.boxplot(data, 0, 'rs', 0, 0.75)
# fake up some more data
spread = np.random.rand(50) * 100
center = np.ones(25) * 40
flier_high = np.random.rand(10) * 100 + 100
flier_low = np.random.rand(10) * -100
d2 = np.concatenate((spread, center, flier_high, flier_low), 0)
data.shape = (-1, 1)
d2.shape = (-1, 1)
# data = concatenate( (data, d2), 1 )
# Making a 2-D array only works if all the columns are the
# same length. If they are not, then use a list instead.
# This is actually more efficient because boxplot converts
# a 2-D array into a list of vectors internally anyway.
data = [data, d2, d2[::2, 0]]
# multiple box plots on one figure
plt.figure()
plt.boxplot(data)
plt.show()
| mit |
RayMick/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
Jimmy-Morzaria/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
larsmans/seqlearn | seqlearn/datasets.py | 4 | 3045 | # Copyright 2013 Lars Buitinck
from contextlib import closing
from itertools import chain, groupby
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from sklearn.externals import six
def load_conll(f, features, n_features=(2 ** 16), split=False):
"""Load CoNLL file, extract features on the tokens and vectorize them.
The ConLL file format is a line-oriented text format that describes
sequences in a space-separated format, separating the sequences with
blank lines. Typically, the last space-separated part is a label.
Since the tab-separated parts are usually tokens (and maybe things like
part-of-speech tags) rather than feature vectors, a function must be
supplied that does the actual feature extraction. This function has access
to the entire sequence, so that it can extract context features.
A ``sklearn.feature_extraction.FeatureHasher`` (the "hashing trick")
is used to map symbolic input feature names to columns, so this function
dos not remember the actual input feature names.
Parameters
----------
f : {string, file-like}
Input file.
features : callable
Feature extraction function. Must take a list of tokens l that
represent a single sequence and an index i into this list, and must
return an iterator over strings that represent the features of l[i].
n_features : integer, optional
Number of columns in the output.
split : boolean, default=False
Whether to split lines on whitespace beyond what is needed to parse
out the labels. This is useful for CoNLL files that have extra columns
containing information like part of speech tags.
Returns
-------
X : scipy.sparse matrix, shape (n_samples, n_features)
Samples (feature vectors), as a single sparse matrix.
y : np.ndarray, dtype np.string, shape n_samples
Per-sample labels.
lengths : np.ndarray, dtype np.int32, shape n_sequences
Lengths of sequences within (X, y). The sum of these is equal to
n_samples.
"""
fh = FeatureHasher(n_features=n_features, input_type="string")
labels = []
lengths = []
with _open(f) as f:
raw_X = _conll_sequences(f, features, labels, lengths, split)
X = fh.transform(raw_X)
return X, np.asarray(labels), np.asarray(lengths, dtype=np.int32)
def _conll_sequences(f, features, labels, lengths, split):
# Divide input into blocks of empty and non-empty lines.
lines = (str.strip(line) for line in f)
groups = (grp for nonempty, grp in groupby(lines, bool) if nonempty)
for group in groups:
group = list(group)
obs, lbl = zip(*(ln.rsplit(None, 1) for ln in group))
if split:
obs = [x.split() for x in obs]
labels.extend(lbl)
lengths.append(len(lbl))
for i in six.moves.xrange(len(obs)):
yield features(obs, i)
def _open(f):
return closing(open(f) if isinstance(f, six.string_types) else f)
| mit |
smenon8/AnimalWildlifeEstimator | script/RegressionCapsuleClass.py | 1 | 1640 | # python-3
# Regression Capsule Class
# In the same lines as ClassifierCapsuleClass
from sklearn.metrics import mean_absolute_error, mean_squared_error
from BaseCapsuleClass import BaseCapsule
from collections import OrderedDict
import pandas as pd
class RegressionCapsule(BaseCapsule):
def __init__(self,clfObj,methodName,splitPercent,train_x,train_y,test_x,test_y):
BaseCapsule.__init__(self,clfObj,methodName,splitPercent,train_x,train_y,test_x,test_y)
self.residues = None
def evalClassifierPerf(self):
self.abserr = mean_absolute_error(self.test_y,self.preds)
self.sqerr = mean_squared_error(self.test_y,self.preds)
if not self.test_y.empty:
self.residues = [list(self.test_y)[i] - self.preds[i] for i in range(len(self.preds))]
def removeOutliers(self):
idxs = [i for i in range(len(self.preds)) if self.preds[i] > 100 or self.preds[i] < 0]
idxList = list(self.test_x.index)
predDict = OrderedDict()
for i in range(len(idxList)):
predDict[idxList[i]] = self.preds[i]
df = pd.DataFrame(predDict,index=['Predictions']).transpose()
self.test_x.drop(self.test_x.index[idxs], inplace=True)
if self.test_y != None:
if not self.test_y.empty:
self.test_y.drop(self.test_y.index[idxs], inplace=True)
df.drop(df.index[idxs], inplace=True)
self.preds = list(df['Predictions'])
print("Number of outliers identified: %d" %len(idxs))
print(len(self.test_x),len(self.preds))
def runRgr(self,computeMetrics=True,removeOutliers=True):
BaseCapsule.run(self)
if removeOutliers:
self.removeOutliers()
if computeMetrics:
return self.evalClassifierPerf()
else:
return 0 | bsd-3-clause |
zingale/hydro_examples | compressible/riemann-slow-shock.py | 1 | 1642 | # plot the Hugoniot loci for a compressible Riemann problem
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import riemann
import matplotlib as mpl
# Use LaTeX for rendering
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
mpl.rcParams['font.size'] = 12
mpl.rcParams['legend.fontsize'] = 'large'
mpl.rcParams['figure.titlesize'] = 'medium'
if __name__ == "__main__":
# setup the problem -- slow shock
# stationary shock
#left = riemann.State(p=100.0, u=-1.9336, rho=5.6698)
#right = riemann.State(p=1.0, u=-10.9636, rho=1.0)
# slow shock
left = riemann.State(p=100.0, u=-1.4701, rho=5.6698)
right = riemann.State(p=1.0, u=-10.5, rho=1.0)
rp = riemann.RiemannProblem(left, right)
rp.find_star_state()
x, rho, u, p = rp.sample_solution(1.0, 128)
plt.subplot(311)
plt.plot(x, rho)
plt.ylabel(r"$\rho$")
plt.xlim(0, 1)
plt.tick_params(axis="x", labelbottom="off")
plt.subplot(312)
plt.plot(x, u)
plt.ylabel(r"$u$")
plt.xlim(0, 1)
plt.tick_params(axis="x", labelbottom="off")
plt.subplot(313)
plt.plot(x, p)
plt.ylabel(r"$p$")
plt.xlabel(r"$x$")
plt.xlim(0, 1)
f = plt.gcf()
f.set_size_inches(6.0, 9.0)
plt.tight_layout()
plt.savefig("riemann-slowshock.pdf")
gamma = rp.gamma
e = p/rho/(gamma - 1.0)
# output the solution
with open("slowshock-exact.out", "w") as f:
for n in range(len(x)):
f.write("{:20.10g} {:20.10g} {:20.10g} {:20.10g} {:20.10g}\n".format(x[n], rho[n], u[n], p[n], e[n]))
| bsd-3-clause |
cybernet14/scikit-learn | sklearn/tree/tree.py | 59 | 34839 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"presort-best": _splitter.PresortBestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
qifeigit/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 105 | 26588 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
| bsd-3-clause |
chrisburr/scikit-learn | examples/covariance/plot_covariance_estimation.py | 99 | 5074 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.model_selection import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
wogsland/QSTK | build/lib.linux-x86_64-2.7/Bin/converter.py | 5 | 2926 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Jan 1, 2011
@author:Drew Bratcher
@contact: [email protected]
@summary: Contains tutorial for backtester and report.
'''
#
# fundsToPNG.py
#
# Short script which produces a graph of funds
# over time from a pickle file.
#
# Drew Bratcher
#
from pylab import *
from QSTK.qstkutil import DataAccess as da
from QSTK.qstkutil import tsutil as tsu
# from quicksim import quickSim
from copy import deepcopy
import math
from pandas import *
import matplotlib.pyplot as plt
import cPickle
def fundsToPNG(funds,output_file):
plt.clf()
if(type(funds)==type(list())):
for i in range(0,len(funds)):
plt.plot(funds[i].index,funds[i].values)
else:
plt.plot(funds.index,funds.values)
plt.ylabel('Fund Value')
plt.xlabel('Date')
plt.gcf().autofmt_xdate(rotation=45)
plt.draw()
savefig(output_file, format='png')
def fundsAnalysisToPNG(funds,output_file):
plt.clf()
if(type(funds)!=type(list())):
print 'fundsmatrix only contains one timeseries, not able to analyze.'
#convert to daily returns
count=list()
dates=list()
sum=list()
for i in range(0,len(funds)):
ret=tsu.daily(funds[i].values)
for j in range(0, len(ret)):
if (funds[i].index[j] in dates):
sum[dates.index(funds[i].index[j])]+=ret[j]
count[dates.index(funds[i].index[j])]+=1
else:
dates.append(funds[i].index[j])
count.append(1)
sum.append(ret[j])
#compute average
tot_ret=deepcopy(sum)
for i in range(0,len(sum)):
tot_ret[i]=sum[i]/count[i]
#compute std
std=zeros(len(sum))
for i in range(0,len(funds)):
temp=tsu.daily(funds[i].values)
for j in range(0,len(temp)):
std[dates.index(funds[i].index[j])]=0
std[dates.index(funds[i].index[j])]+=math.pow(temp[j]-tot_ret[dates.index(funds[i].index[j])],2)
for i in range(1, len(std)):
# std[i]=math.sqrt(std[i]/count[i])+std[i-1]
std[i]=math.sqrt(std[i]/count[i])
#compute total returns
lower=deepcopy(tot_ret)
upper=deepcopy(tot_ret)
tot_ret[0]=funds[0].values[0]
lower[0]=funds[0].values[0]
upper[0]=lower[0]
# for i in range(1,len(tot_ret)):
# tot_ret[i]=tot_ret[i-1]+(tot_ret[i])*tot_ret[i-1]
# lower[i]=tot_ret[i-1]-(std[i])*tot_ret[i-1]
# upper[i]=tot_ret[i-1]+(std[i])*tot_ret[i-1]
for i in range(1,len(tot_ret)):
lower[i]=(tot_ret[i]-std[i]+1)*lower[i-1]
upper[i]=(tot_ret[i]+std[i]+1)*upper[i-1]
tot_ret[i]=(tot_ret[i]+1)*tot_ret[i-1]
plt.clf()
plt.plot(dates,tot_ret)
plt.plot(dates,lower)
plt.plot(dates,upper)
plt.legend(('Tot_Ret','Lower','Upper'),loc='upper left')
plt.ylabel('Fund Total Return')
plt.ylim(ymin=0,ymax=2*tot_ret[0])
plt.draw()
savefig(output_file, format='png')
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
toobaz/pandas | pandas/tests/arithmetic/test_timedelta64.py | 2 | 76159 | # Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
NaT,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
timedelta_range,
)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range("2H", periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(["1 day", "2 days"])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, "a"), ("a", tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex(
[pd.Timedelta("1 days"), pd.NaT, pd.Timedelta("3 days")]
)
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta("3 days")])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(
[
"1 day",
pd.NaT,
"1 day 00:00:01",
pd.NaT,
"1 day 00:00:01",
"5 day 00:00:03",
]
)
tdidx2 = pd.TimedeltaIndex(
["2 day", "2 day", pd.NaT, pd.NaT, "1 day 00:00:02", "5 days 00:00:03"]
)
tdarr = np.array(
[
np.timedelta64(2, "D"),
np.timedelta64(2, "D"),
np.timedelta64("nat"),
np.timedelta64("nat"),
np.timedelta64(1, "D") + np.timedelta64(2, "s"),
np.timedelta64(5, "D") + np.timedelta64(3, "s"),
]
)
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range("1 days", periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(["2H", "4H", "6H", "8H", "10H"], freq="2H", name="x")
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["4H", "8H", "12H", "16H", "20H"], freq="4H", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "4H"
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["1H", "2H", "3H", "4H", "5H"], freq="H", name="x")
tm.assert_index_equal(result, exp)
assert result.freq == "H"
idx = TimedeltaIndex(["2H", "4H", "6H", "8H", "10H"], freq="2H", name="x")
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(
["-2H", "-4H", "-6H", "-8H", "-10H"], freq="-2H", name="x"
)
tm.assert_index_equal(result, exp)
assert result.freq == "-2H"
idx = TimedeltaIndex(["-2H", "-1H", "0H", "1H", "2H"], freq="H", name="x")
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(["2H", "1H", "0H", "1H", "2H"], freq=None, name="x")
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
td = Timedelta("1 days")
dt = Timestamp("20130101")
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (
r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'"
)
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"], name="bar")
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(["0 days", "1 days", "2 days"], name="bar")
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(["0 days", pd.NaT, "1 days"], name="foo")
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(["0 days", pd.NaT, "-1 days"], name="foo")
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(["20121231", "20130101", "20130102"], name="bar")
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(["20121231", pd.NaT, "20121230"], name="foo")
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range("20130101", periods=3)
ts = Timestamp("20130101")
dt = ts.to_pydatetime()
dti_tz = pd.date_range("20130101", periods=3).tz_localize("US/Eastern")
ts_tz = Timestamp("20130101").tz_localize("US/Eastern")
ts_tz2 = Timestamp("20130101").tz_localize("CET")
dt_tz = ts_tz.to_pydatetime()
td = Timedelta("1 days")
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta("0 days")
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta("0 days")
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta("0 days")
_check(result, expected)
# tz mismatches
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = "Timestamp subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(["0 days", "1 days", "2 days"])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(["0 days", "1 days", "2 days"])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta("0 days")
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(["20121231", "20130101", "20130102"], tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(["0 days", pd.NaT, "0 days"], name="foo")
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(["2 days", pd.NaT, "4 days"], name="foo")
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(["20121231", pd.NaT, "20130101"])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
dti = pd.date_range("20130101", periods=3, name="bar")
td = Timedelta("1 days")
dt = Timestamp("20130101")
result = tdi + dt
expected = DatetimeIndex(["20130102", pd.NaT, "20130103"], name="foo")
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(["20130102", pd.NaT, "20130103"], name="foo")
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(["2 days", pd.NaT, "3 days"], name="foo")
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(["2 days", pd.NaT, "3 days"], name="foo")
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(["20130102", pd.NaT, "20130105"])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(["20130102", pd.NaT, "20130105"])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp("20130102")
assert result == expected
result = td + dt
expected = Timestamp("20130102")
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize("freq", ["D", "B"])
def test_timedelta(self, freq):
index = pd.date_range("1/1/2000", periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == "D":
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range("2013", "2014")
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(["24658 days 11:15:00", "NaT"])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, "D") + Timestamp("2000")
with pytest.raises(OutOfBoundsDatetime):
Timestamp("2000") + pd.to_timedelta(106580, "D")
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], "D") + Timestamp("2000")
with pytest.raises(OverflowError, match=msg):
Timestamp("2000") + pd.to_timedelta([106580], "D")
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta("1 days")
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(["5 days", _NaT]) - Timedelta("1 days")
with pytest.raises(OverflowError, match=msg):
(
pd.to_timedelta([_NaT, "5 days", "1 hours"])
- pd.to_timedelta(["7 seconds", _NaT, "4 hours"])
)
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta("1 days")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(["4 days", pd.NaT])
result = pd.to_timedelta(["5 days", pd.NaT]) - Timedelta("1 days")
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, "5 hours"])
result = pd.to_timedelta([pd.NaT, "5 days", "1 hours"]) + pd.to_timedelta(
["7 seconds", pd.NaT, "4 hours"]
)
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range("1", periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(["00:00:01"]))
s2 = pd.to_timedelta(Series(["00:00:02"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(["00:00:01"]).apply(pd.to_timedelta)
df2 = pd.DataFrame(["00:00:02"]).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta("00:00:01")
scalar2 = pd.to_timedelta("00:00:02")
timedelta_NaT = pd.to_timedelta("NaT")
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range("2012-1-1", periods=3, freq="D")
v2 = pd.date_range("2012-1-2", periods=3, freq="D")
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24, rs.index).astype("int64").astype("timedelta64[ns]")
tm.assert_series_equal(rs, xp)
assert rs.dtype == "timedelta64[ns]"
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
# series on the rhs
result = df["A"] - df["A"].shift()
assert result.dtype == "timedelta64[ns]"
result = df["A"] + td
assert result.dtype == "M8[ns]"
# scalar Timestamp on rhs
maxa = df["A"].max()
assert isinstance(maxa, Timestamp)
resultb = df["A"] - df["A"].max()
assert resultb.dtype == "timedelta64[ns]"
# timestamp on lhs
result = resultb + df["A"]
values = [Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
expected = Series(values, name="A")
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df["A"] - datetime(2001, 1, 1)
expected = Series([timedelta(days=4017 + i) for i in range(3)], name="A")
tm.assert_series_equal(result, expected)
assert result.dtype == "m8[ns]"
d = datetime(2001, 1, 1, 3, 4)
resulta = df["A"] - d
assert resulta.dtype == "m8[ns]"
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df["A"], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df["A"] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df["A"])
assert resultb.dtype == "M8[ns]"
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df["A"] + td
resultb = resulta - td
tm.assert_series_equal(df["A"], resultb)
assert resultb.dtype == "M8[ns]"
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta("1s")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series, nat_series_dtype_timedelta)
tm.assert_series_equal(
timedelta_series - single_nat_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta
)
# addition
tm.assert_series_equal(
nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta
)
tm.assert_series_equal(
NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
tm.assert_series_equal(
timedelta_series + single_nat_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta
)
tm.assert_series_equal(
NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_timedelta,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timedelta,
nat_series_dtype_timedelta,
)
# multiplication
tm.assert_series_equal(
nat_series_dtype_timedelta * 1.0, nat_series_dtype_timedelta
)
tm.assert_series_equal(
1.0 * nat_series_dtype_timedelta, nat_series_dtype_timedelta
)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5, Series([NaT, Timedelta("1.5s")]))
tm.assert_series_equal(1.5 * timedelta_series, Series([NaT, Timedelta("1.5s")]))
tm.assert_series_equal(timedelta_series * np.nan, nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series, nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / 2.0, Series([NaT, Timedelta("0.5s")]))
tm.assert_series_equal(timedelta_series / np.nan, nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(["1 day", "2 days"])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + "a"
with pytest.raises(TypeError):
"a" + tdi
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(["-1 days", "-1 days"])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize("freq", [None, "H"])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period("2011-01-01", freq="D")
idx = TimedeltaIndex(["1 hours", "2 hours"], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("tdi_freq", [None, "H"])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq)
dti = Timestamp("2018-03-07 17:16:40") + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(["1 day", "2 day"])
idx = tm.box_expected(idx, box_with_array)
msg = (
"cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation"
)
with pytest.raises(TypeError, match=msg):
idx - Timestamp("2011-01-01")
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp("2011-01-01", tz=tz)
idx = TimedeltaIndex(["1 day", "2 day"])
expected = DatetimeIndex(["2011-01-02", "2011-01-03"], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp("2012-01-01")
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range("1 day", periods=3)
expected = pd.date_range("2012-01-02", periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range("2011-12-31", periods=3, freq="-1D")
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64("NaT")
tdi = timedelta_range("1 day", periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(["59 days", "59 days", "NaT"])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype="uint8")
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize("scalar", [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array, scalar):
box = box_with_array
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize(
"dtype",
[
"int64",
"int32",
"int16",
"uint64",
"uint32",
"uint16",
"uint8",
"float64",
"float32",
"float16",
],
)
@pytest.mark.parametrize(
"vec",
[
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
],
ids=lambda x: type(x).__name__,
)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith("float"):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize(
"scalar_td",
[
timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta("5m4s").to_timedelta64(),
],
)
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series(
[timedelta(seconds=1)] * 3
)
assert result.dtype == "m8[ns]"
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = Series([timedelta(seconds=1)] * 3) - Series(
[timedelta(seconds=0)] * 3
)
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(["00:05:03"] * 3))
td2 = pd.to_timedelta("00:05:04")
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series(
[timedelta(seconds=1)] * 3
)
assert result.dtype == "m8[ns]"
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = Series([timedelta(seconds=1)] * 3) - Series(
[timedelta(seconds=0)] * 3
)
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range("2016-01-01", periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize(
"names",
[
(None, None, None),
("Egon", "Venkman", None),
("NCC1701D", "NCC1701D", "NCC1701D"),
],
)
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == "Venkman":
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["0 days", "1 day"], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series(
[Timedelta(hours=3), Timedelta(days=1, hours=4)], name=names[2]
)
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == "timedelta64[ns]"
else:
assert result.dtypes[0] == "timedelta64[ns]"
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == "timedelta64[ns]"
else:
assert result.dtypes[0] == "timedelta64[ns]"
expected = Series(
[Timedelta(hours=-3), Timedelta(days=1, hours=-4)], name=names[2]
)
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == "timedelta64[ns]"
else:
assert result.dtypes[0] == "timedelta64[ns]"
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == "timedelta64[ns]"
else:
assert result.dtypes[0] == "timedelta64[ns]"
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta("1s")])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta("1s")])
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range("1 days", "10 days")
expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00")
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series(
[pd.offsets.Minute(1), pd.offsets.Second(3), pd.offsets.Hour(2)]
)
expected = Series(
[
timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3),
]
)
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ["Hour", "Minute", "Second", "Day", "Micro", "Milli", "Nano"]:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == "bar":
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer", name=names[2]
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex(
[tdi[n] + other[n] for n in range(len(tdi))], freq="infer"
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == "bar":
pytest.skip(
"Name propagation for DataFrame does not behave like "
"it does for Index/Series"
)
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
expected = TimedeltaIndex(
[tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=names[2]
)
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex(
[tdi[n] - other[n] for n in range(len(tdi))], freq="infer"
)
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], name=names[1])
expected_add = Series(
[tdi[n] + other[n] for n in range(len(tdi))], name=names[2]
)
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series(
[tdi[n] - other[n] for n in range(len(tdi))], name=names[2]
)
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize("obox", [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_with_array):
# GH#18824
tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range("2013-01-01", "2013-01-03"))
enddate = Series(pd.date_range("2013-03-01", "2013-03-03"))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range("1 days", "10 days", name="foo")
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype="int64")
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype="int64")
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype="int64")
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
expected = TimedeltaIndex(np.arange(5, dtype="int64") ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * pd.Series(np.arange(5, dtype="int64"))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype="float64")
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize(
"other",
[
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11),
],
ids=lambda x: type(x).__name__,
)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(["1 Day"] * 10)
expected = timedelta_range("1 days", "10 days")
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range("1 days", "10 days", name="foo")
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError, match="'?true_divide'? cannot use operands"):
rng / pd.NaT
with pytest.raises(TypeError, match="Cannot divide NaTType by"):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
rng = timedelta_range("1 days", "10 days")
rng = tm.box_expected(rng, box_with_array)
other = np.timedelta64("NaT")
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, box_with_array)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match="Cannot divide"):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
rng = timedelta_range("1 days", "10 days", name="foo")
expected = pd.Float64Index((np.arange(10) + 1) * 12, name="foo")
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours, box_with_array):
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"], name="foo")
expected = pd.Float64Index([12, np.nan, 24], name="foo")
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
other = np.array([2, 4, 2], dtype="m8[h]")
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box_with_array)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box_with_array) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(["1 days", pd.NaT, "2 days"])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError):
rng / other
with pytest.raises(ValueError):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
result = idx // 1
tm.assert_equal(result, idx)
pattern = "floor_divide cannot use operands|Cannot divide int by Timedelta*"
with pytest.raises(TypeError, match=pattern):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
tdi = timedelta_range("1 days", "10 days", name="foo")
expected = pd.Int64Index((np.arange(10) + 1) * 12, name="foo")
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi // two_hours
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize(
"scalar_td",
[
timedelta(minutes=10, seconds=7),
Timedelta("10m7s"),
Timedelta("10m7s").to_timedelta64(),
],
ids=lambda x: type(x).__name__,
)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
tdi = TimedeltaIndex(["00:05:03", "00:05:03", pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# mod, divmod
# TODO: operations with timedelta-like arrays, numeric arrays,
# reversed ops
def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range("1 Day", "9 days")
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(["1 Day", "2 Days", "0 Days"] * 3)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % three_days
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
def test_td64arr_mod_int(self, box_with_array):
tdi = timedelta_range("1 ns", "10 ns", periods=10)
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(["1 ns", "0 ns"] * 5)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % 2
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
2 % tdarr
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range("1 Day", "9 days")
tdarr = tm.box_expected(tdi, box_with_array)
expected = ["0 Days", "1 Day", "0 Days"] + ["3 Days"] * 6
expected = TimedeltaIndex(expected)
expected = tm.box_expected(expected, box_with_array)
result = three_days % tdarr
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(three_days, tdarr)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], three_days // tdarr)
# ------------------------------------------------------------------
# Operations with invalid others
def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = "operate|unsupported|cannot|not supported"
with pytest.raises(TypeError, match=pattern):
td1 * scalar_td
with pytest.raises(TypeError, match=pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype="int64"))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize("one", [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
expected = Series(["-59 Days", "-59 Days", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(["118 Days", "118 Days", "NaT"], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box_with_array, two):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser / two
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match="Cannot divide"):
two / tdser
@pytest.mark.parametrize(
"dtype",
[
"int64",
"int32",
"int16",
"uint64",
"uint32",
"uint16",
"uint8",
"float64",
"float32",
"float16",
],
)
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
ids=lambda x: type(x).__name__,
)
def test_td64arr_rmul_numeric_array(self, box_with_array, vector, dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
vector = vector.astype(dtype)
expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser * vector
tm.assert_equal(result, expected)
result = vector * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[
"int64",
"int32",
"int16",
"uint64",
"uint32",
"uint16",
"uint8",
"float64",
"float32",
"float16",
],
)
@pytest.mark.parametrize(
"vector",
[np.array([20, 30, 40]), pd.Index([20, 30, 40]), Series([20, 30, 40])],
ids=lambda x: type(x).__name__,
)
def test_td64arr_div_numeric_array(self, box_with_array, vector, dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]")
vector = vector.astype(dtype)
expected = Series(["2.95D", "1D 23H 12m", "NaT"], dtype="timedelta64[ns]")
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser / vector
tm.assert_equal(result, expected)
pattern = (
"true_divide cannot use operands|"
"cannot perform __div__|"
"cannot perform __truediv__|"
"unsupported operand|"
"Cannot divide"
)
with pytest.raises(TypeError, match=pattern):
vector / tdser
if not isinstance(vector, pd.Index):
# Index.__rdiv__ won't try to operate elementwise, just raises
result = tdser / vector.astype(object)
if box_with_array is pd.DataFrame:
expected = [tdser.iloc[0, n] / vector[n] for n in range(len(vector))]
else:
expected = [tdser[n] / vector[n] for n in range(len(tdser))]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match=pattern):
vector.astype(object) / tdser
@pytest.mark.parametrize(
"names",
[
(None, None, None),
("Egon", "Venkman", None),
("NCC1701D", "NCC1701D", "NCC1701D"),
],
)
def test_td64arr_mul_int_series(self, box_df_fail, names):
# GH#19042 test for correct name attachment
box = box_df_fail # broadcasts along wrong axis, but doesn't raise
tdi = TimedeltaIndex(
["0days", "1day", "2days", "3days", "4days"], name=names[0]
)
# TODO: Should we be parametrizing over types for `ser` too?
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(
["0days", "1day", "4days", "9days", "16days"],
dtype="timedelta64[ns]",
name=names[2],
)
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected = tm.box_expected(expected, box)
result = ser * tdi
tm.assert_equal(result, expected)
# The direct operation tdi * ser still needs to be fixed.
result = ser.__rmul__(tdi)
tm.assert_equal(result, expected)
# TODO: Should we be parametrizing over types for `ser` too?
@pytest.mark.parametrize(
"names",
[
(None, None, None),
("Egon", "Venkman", None),
("NCC1701D", "NCC1701D", "NCC1701D"),
],
)
def test_float_series_rdiv_td64arr(self, box_with_array, names):
# GH#19042 test for correct name attachment
# TODO: the direct operation TimedeltaIndex / Series still
# needs to be fixed.
box = box_with_array
tdi = TimedeltaIndex(
["0days", "1day", "2days", "3days", "4days"], name=names[0]
)
ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1])
xname = names[2] if box is not tm.to_array else names[1]
expected = Series(
[tdi[n] / ser[n] for n in range(len(ser))],
dtype="timedelta64[ns]",
name=xname,
)
xbox = box
if box in [pd.Index, tm.to_array] and type(ser) is Series:
xbox = Series
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = ser.__rdiv__(tdi)
if box is pd.DataFrame:
# TODO: Should we skip this case sooner or test something else?
assert result is NotImplemented
else:
tm.assert_equal(result, expected)
class TestTimedeltaArraylikeInvalidArithmeticOps:
def test_td64arr_pow_invalid(self, scalar_td, box_with_array):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = "operate|unsupported|cannot|not supported"
with pytest.raises(TypeError, match=pattern):
scalar_td ** td1
with pytest.raises(TypeError, match=pattern):
td1 ** scalar_td
| bsd-3-clause |
hansomesong/TracesAnalyzer | Plot/Plot_variable_time/Pie_Chart.py | 1 | 3974 | # -* coding:UTF-8 -*
# __author__ = 'yueli'
import numpy as np
import matplotlib.pyplot as plt
# All the codes in this python file can be referenced to
# http://matplotlib.org/1.2.1/examples/pylab_examples/pie_demo.html
# 由于此文件的input文件已不存在,所以此文件已被Pie_chart_v4.py替代
# Import the targeted raw CSV file
rawCSV_file1 = "/Users/yueli/Documents/Codes/TracesAnalyzer/log/comparison_time_liege.csv"
rawCSV_file2 = "/Users/yueli/Documents/Codes/TracesAnalyzer/log/comparison_time_temple.csv"
rawCSV_file3 = "/Users/yueli/Documents/Codes/TracesAnalyzer/log/comparison_time_ucl.csv"
rawCSV_file4 = "/Users/yueli/Documents/Codes/TracesAnalyzer/log/comparison_time_umass.csv"
rawCSV_file5 = "/Users/yueli/Documents/Codes/TracesAnalyzer/log/comparison_time_wiilab.csv"
rawCSV_files = [rawCSV_file1, rawCSV_file2, rawCSV_file3, rawCSV_file4, rawCSV_file5]
negativeReplyCount = 0
printSkippedCount = 0
reConfiguration1 = 0
reConfiguration2 = 0
flapping1 = 0
flapping2 = 0
mobility1 = 0
mobility2 = 0
for rawCSV_file in rawCSV_files:
for line in open(rawCSV_file):
lines = line.split(";")
if lines[0] == "Vantage":
continue
elif lines[4] == "True":
continue
else:
if "NegativeReply" in lines[5]:
print "Negative Reply", lines[1]
negativeReplyCount = negativeReplyCount + 1
elif "PrintSkipped" in lines[5]:
printSkippedCount = printSkippedCount + 1
else:
# in the 5 comparison_time_vp.csv files, we find out that in the situation of flapping, the max locator_count is 2 and the max locators is 10.
# So we use lines[6] == 2 and lines[8] == 10 as the boundaries between flappings and mobilities
if int(lines[6]) <= 2:
if int(lines[6]) == 1:
if int(lines[8]) <= 11:
if lines[11] == "False":
reConfiguration1 = reConfiguration1 + 1
else:
# print "flapping1 happens in:", lines[1]
flapping1 = flapping1 + 1
else:
mobility1 = mobility1 + 1
else:
if lines[10] == "False":
# print "reConfiguration2 happens in:", lines[1]
reConfiguration2 = reConfiguration2 + 1
else:
# print "flapping2 happens in:", lines[1]
flapping2 = flapping2 + 1
else:
mobility2 = mobility2 + 1
# The slices will be ordered and plotted counter-clockwise.
labels = 'Negative + Normal Reply', 'PrintSkipped + Normal Reply', 'Reconfiguration I', 'Reconfiguration II',\
'Flapping I', 'Flapping II', 'Mobility I', 'Mobility II'
fracs = [negativeReplyCount, printSkippedCount, reConfiguration1, reConfiguration2, flapping1, flapping2, mobility1, mobility2]
print fracs
colors = ['red', 'orange', 'yellow', 'green', 'lightskyblue', 'blue', 'purple', 'yellowgreen']
explode=(0, 0, 0, 0, 0, 0, 0, 0)
# autopct='%1.2f%%' means that the pie chart will display 2 decimal points
plt.pie(fracs, explode=explode, labels=labels, colors=colors, autopct='%1.2f%%', startangle=345)
# The default startangle is 0, which would start
# the Frogs slice on the x-axis. With startangle=90,
# everything is rotated counter-clockwise by 90 degrees,
# so the plotting starts on the positive y-axis.
# plt.title('Percentage of each False case', bbox={'facecolor':'0.8', 'pad':5})
plt.title('Percentage of each inconsistent case by the variable of time')
#plt.savefig("/Users/yueli/Documents/Codes/TracesAnalyzer/Plot_new/Plot_variable_time/Pie_chart_2_11.pdf")
plt.show() | gpl-2.0 |
ellisonbg/altair | altair/vegalite/v2/examples/stem_and_leaf.py | 1 | 1273 | """
Stem and Leaf Plot
------------------
This example shows how to make a stem and leaf plot.
"""
# category: other charts
import altair as alt
import pandas as pd
import numpy as np
np.random.seed(42)
# Generating random data
original_data = pd.DataFrame({'samples': np.array(np.random.normal(50, 15, 100), dtype=np.int)})
# Splitting stem and leaf
original_data['stem'] = original_data['samples'].apply(lambda x: str(x)[:-1])
original_data['leaf'] = original_data['samples'].apply(lambda x: str(x)[-1])
original_data.sort_values(by=['stem', 'leaf'], inplace=True)
original_data.reset_index(inplace=True, drop=True)
# Determining leaf position
get_position = lambda x: 1 + pd.Series(range(len(x)))
original_data['position'] = original_data.groupby('stem')\
.apply(get_position)\
.reset_index(drop=True)
# Creating stem and leaf plot
alt.Chart(original_data).mark_text(
align='left',
baseline='middle',
dx=-5
).encode(
alt.X('position:Q',
axis=alt.Axis(title='', ticks=False, labels=False, grid=False)
),
alt.Y('stem:N', axis=alt.Axis(title='', tickSize=0)),
text='leaf:N'
).configure_axis(
labelFontSize=20
).configure_text(
fontSize=20
)
| bsd-3-clause |
potash/scikit-learn | sklearn/grid_search.py | 6 | 38777 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. This module will be removed in 0.20.",
DeprecationWarning)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
.. versionchanged:: 0.17
Upgraded to joblib 0.9.3.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
blbarker/spark-tk | regression-tests/sparktkregtests/testcases/graph/betweenness_centrality_test.py | 4 | 5737 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests betweenness centrality algorithm for graphs"""
import unittest
from sparktkregtests.lib import sparktk_test
class BetweennessCentrality(sparktk_test.SparkTKTestCase):
def setUp(self):
edges = self.context.frame.create(
[(0, 1, 1),
(0, 2, 1),
(2, 3, 2),
(2, 4, 4),
(3, 4, 2),
(3, 5, 4),
(4, 5, 2),
(4, 6, 1)],
["src", "dst", "weights"])
vertices = self.context.frame.create(
[[0], [1], [2], [3], [4], [5], [6]], ["id"])
self.graph = self.context.graph.create(vertices, edges)
def test_default(self):
"""Test default settings"""
result_frame = self.graph.betweenness_centrality()
result = result_frame.to_pandas()
# validate centrality values
expected_value = {
0: 0.333,
1: 0.0,
2: 0.533,
3: 0.1,
4: 0.433,
5: 0.0,
6: 0.0}
for i, row in result.iterrows():
vertex_id = row['id']
self.assertAlmostEqual(
row["betweenness_centrality"],
expected_value[vertex_id],
delta=0.001)
def test_weights_single_shortest_path(self):
"""Tests weighted betweenness when only one shortest path present"""
edges = self.context.frame.create(
[(0, 1, 3), (0, 2, 2),
(0, 3, 6), (0, 4, 4),
(1, 3, 5), (1, 5, 5),
(2, 4, 1), (3, 4, 2),
(3, 5, 1), (4, 5, 4)],
["src", "dst", "weights"])
vertices = self.context.frame.create(
[[0], [1], [2], [3], [4], [5]], ["id"])
graph = self.context.graph.create(vertices, edges)
# validate against values from networkx betweenness centrality
result_frame = graph.betweenness_centrality("weights", False)
result = result_frame.to_pandas()
expected_values = {
0: 2.0,
1: 0.0,
2: 4.0,
3: 3.0,
4: 4.0,
5: 0.0}
for i, row in result.iterrows():
vertex_id = row['id']
self.assertAlmostEqual(
row["betweenness_centrality"],
expected_values[vertex_id],
delta=0.1)
def test_weights(self):
"""Test betweenness with weighted cost"""
result_frame = self.graph.betweenness_centrality("weights", False)
# validate betweenness centrality values
expected_values = {
1: 0.0,
0: 5.0,
5: 0.0,
6: 0.0,
2: 8.0,
3: 5.0,
4: 7.5}
result = result_frame.to_pandas()
for i, row in result.iterrows():
vertex_id = row['id']
self.assertAlmostEqual(
row["betweenness_centrality"],
expected_values[vertex_id],
delta=0.1)
def test_disconnected_edges(self):
"""Test betweenness on graph with disconnected edges"""
edges = self.context.frame.create(
[['a', 'b'], ['a', 'c'],
['c', 'd'], ['c', 'e'],
['f', 'g'], ['g', 'h']],
['src', 'dst'])
vertices = self.context.frame.create(
[['a'], ['b'], ['c'], ['d'], ['e'], ['f'], ['g'], ['h']],
['id'])
graph = self.context.graph.create(vertices, edges)
result_frame = graph.betweenness_centrality(normalize=False)
# validate betweenness centrality values
expected_values = {
'a': 3.0,
'b': 0.0,
'c': 5.0,
'd': 0.0,
'e': 0.0,
'f': 0.0,
'g': 1.0,
'h': 0.0}
result = result_frame.to_pandas()
for i, row in result.iterrows():
vertex_id = row['id']
self.assertAlmostEqual(
row["betweenness_centrality"],
expected_values[vertex_id],
delta=0.1)
def test_normalize(self):
"""Test unnomallized betweenness crentrality"""
result_frame = self.graph.betweenness_centrality(normalize=False)
result = result_frame.to_pandas()
# validate centrality values
expected_values = {
0: 5.0,
1: 0.0,
2: 8.0,
3: 1.5,
4: 6.5,
5: 0.0,
6: 0.0}
for i, row in result.iterrows():
vertex_id = row['id']
self.assertAlmostEqual(
row["betweenness_centrality"],
expected_values[vertex_id],
delta=0.1)
def test_bad_weights_column_name(self):
"""Should throw exception when bad weights column name given"""
with self.assertRaisesRegexp(
Exception, "Field \"BAD\" does not exist"):
self.graph.betweenness_centrality("BAD")
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
sightmachine/SimpleCV2 | SimpleCV/examples/machine-learning/machine-learning_nuts-vs-bolts.py | 12 | 2782 | '''
This Example uses scikits-learn to do a binary classfication of images
of nuts vs. bolts. Only the area, height, and width are used to classify
the actual images but data is extracted from the images using blobs.
This is a very crude example and could easily be built upon, but is just
meant to give an introductory example for using machine learning
The data set should auto download, if not you can get it from:
https://github.com/downloads/sightmachine/SimpleCV/nuts_bolts.zip
'''
print __doc__
from SimpleCV import *
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
import numpy as np
#Download the dataset
machine_learning_data_set = 'https://github.com/downloads/sightmachine/SimpleCV/nuts_bolts.zip'
data_path = download_and_extract(machine_learning_data_set)
print 'Test Images Downloaded at:', data_path
display = Display((800,600)) #Display to show the images
target_names = ['bolt', 'nut']
print 'Loading Bolts for Training'
bolts = ImageSet(data_path + '/data/supervised/bolts') #Load Bolts for training
bolt_blobs = [b.findBlobs()[0] for b in bolts] #exact the blobs for our features
tmp_data = [] #array to store data features
tmp_target = [] #array to store targets
for b in bolt_blobs: #Format Data for SVM
tmp_data.append([b.area(), b.height(), b.width()])
tmp_target.append(0)
print 'Loading Nuts for Training'
nuts = ImageSet(data_path + '/data/supervised/nuts')
nut_blobs = [n.invert().findBlobs()[0] for n in nuts]
for n in nut_blobs:
tmp_data.append([n.area(), n.height(), n.width()])
tmp_target.append(1)
dataset = np.array(tmp_data)
targets = np.array(tmp_target)
print 'Training Machine Learning'
clf = LinearSVC()
clf = clf.fit(dataset, targets)
clf2 = LogisticRegression().fit(dataset, targets)
print 'Running prediction on bolts now'
untrained_bolts = ImageSet(data_path + '/data/unsupervised/bolts')
unbolt_blobs = [b.findBlobs()[0] for b in untrained_bolts]
for b in unbolt_blobs:
ary = [b.area(), b.height(), b.width()]
name = target_names[clf.predict(ary)[0]]
probability = clf2.predict_proba(ary)[0]
img = b.image
img.drawText(name)
img.save(display)
print "Predicted:",name,", Guess:",probability[0], target_names[0],",", probability[1], target_names[1]
print 'Running prediction on nuts now'
untrained_nuts = ImageSet(data_path + '/data/unsupervised/nuts')
unnut_blobs = [n.invert().findBlobs()[0] for n in untrained_nuts]
for n in unnut_blobs:
ary = [n.area(), n.height(), n.width()]
name = target_names[clf.predict(ary)[0]]
probability = clf2.predict_proba(ary)[0]
img = n.image
img.drawText(name)
img.save(display)
print "Predicted:",name,", Guess:",probability[0], target_names[0],",", probability[1], target_names[1]
| bsd-3-clause |
Gezerj/Data-Analysis | Task-Problems/TASK 6.py | 1 | 1263 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 04 10:57:34 2017
@author: Gerwyn
"""
from __future__ import division
import numpy as np
import scipy.stats as sc
import matplotlib.pyplot as plt
P = np.array([79, 82, 85, 88, 90])
T = np.array([8, 17, 30, 37, 52])
n = len(T)
N = 5000
Tmin = -500
Tmax = 0
sigma_T = 2.0
A = np.linspace(Tmin, Tmax, N)
B = np.linspace(3, 4, N)
X_2 = np.zeros((len(A), len(B)))
def X_sum(A, B):
Num = (T - A - B*P)**2.0
Den = sigma_T**2.0
Sum = np.sum(Num/Den)
return Sum
for i in range(len(A)):
for j in range(len(B)):
X_2[i, j] = X_sum(A[i], B[j])
for i in range(len(A)):
found = False
for j in range(len(B)):
if X_2[i, j] == np.min(X_2):
print A[i]
print B[j]
found = True
break
else:
continue
if found:
break
Analytical_A = ((np.sum(P**2.0)*np.sum(T)) - (np.sum(P)*np.sum(P*T)))/((n*np.sum(P**2.0)) - (np.sum(P))**2.0)
Analytical_B = ((n*np.sum(T*P)) - (np.sum(P)*np.sum(T)))/((n*np.sum(P**2.0)) - (np.sum(P))**2.0)
print Analytical_A
print Analytical_B
plt.figure()
plt.plot(P, T, '.')
plt.ylabel('Temperature (C)')
plt.xlabel('Pressure (mm)')
plt.show()
| gpl-3.0 |
CVML/scikit-learn | sklearn/cluster/mean_shift_.py | 106 | 14056 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None, got %f" %
bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
stop_thresh = 1e-3 * bandwidth # when mean has converged
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# For each seed, climb gradient until convergence or max_iter
for my_mean in seeds:
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
center_intensity_dict[tuple(my_mean)] = len(points_within)
break
completed_iterations += 1
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f, using data"
" points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
andim/evolimmune | figSIaltphases/figure-SIaltphases.py | 1 | 6607 |
# coding: utf-8
# # Influence of parameter choice on the phase diagram
# To study to what extend the phase diagram depends on the cost of infection $c_{\rm inf}$, and on the trade-off shapes $c_{\rm def}(c_{\rm con}), c_{\rm uptake}(p_{\rm uptake})$ we plot the phase diagram for a number of different choices in the following.
# Import packages.
# In[6]:
from cycler import cycler
import sys
sys.path.append('../lib')
import numpy as np
import matplotlib.colors
import matplotlib.pyplot as plt
from matplotlib import transforms, gridspec, ticker
import palettable
import shapely.ops
import plotting
import evolimmune
import misc
import analysis
plt.style.use(['paper'])
plt.rc('lines', linewidth=1.0)
plt.rc('axes', labelpad=1.0)
eps = 1e-8
# Read in and summarize data
# In[7]:
df = analysis.loadnpz('data/phases.npz')
analysis.intelligent_describe(df, nunique=10)
dfg = df.groupby(['lambda_', 'mus', 'cup'])
nparams = len(dfg)
# define colors used in plot and phasenames
# In[8]:
black = matplotlib.rcParams['text.color']
colors = np.asarray(palettable.colorbrewer.qualitative.Set3_6.mpl_colors)[[4, 0, 2, 3, 5, 1]]
strategies_s = ['a', 'p', 'o', 'i', 'm', 'c']
color_dict = dict(zip(strategies_s, colors))
linecolors = palettable.colorbrewer.qualitative.Dark2_6.mpl_colors
plt.rc('axes', prop_cycle=cycler('color', linecolors))
phasenames = misc.DefaultIdentityDict(o='$i$', i='$ib$')
# Define plotting functions
# In[9]:
def plotmus(ax, musstr, alpha=1.0, label=True):
epsilon = np.linspace(0.0, 1.0, 100)
mus = evolimmune.mus_from_str(musstr)
mu1, mu2 = mus(epsilon)
if label:
ax.plot(mu1, mu2, c=linecolors[1], alpha=alpha, label='defense')
else:
ax.plot(mu1, mu2, c=linecolors[1], alpha=alpha)
ax.plot(mu1[0], mu2[0], 'o', markeredgecolor='none', markersize=3, c=linecolors[1], alpha=alpha)
def plotstatecosts(ax, musstr, musstrref=None, lambda_=None):
if lambda_:
ax.text(1, 1, '${0}={1}$'.format(r'c_{\rm inf}', lambda_),
transform=ax.transAxes, ha='right', va='top')
if musstrref is not None:
plotmus(ax, musstrref, alpha=0.25, label=False)
plotmus(ax, musstr)
ax.set_xlabel(evolimmune.varname_to_tex['cconstitutive'])
ax.set_ylabel(evolimmune.varname_to_tex['cdefense'])
ax.set_xlim(0, 1.5)
ax.set_ylim(0, 2.7)
ax.locator_params(nbins=3)
def plotcup(ax, cupstr, cupstrref=None):
pup = np.linspace(0.0, 0.2, 100)
if cupstrref is not None:
cup = evolimmune.cup_from_str(cupstrref)
ax.plot(pup, cup(pup), c=linecolors[2], alpha=.25)
cup = evolimmune.cup_from_str(cupstr)
ax.plot(pup, cup(pup), c=linecolors[2])
ax.set_xlabel(evolimmune.varname_to_tex['pup'])
ax.set_ylabel(evolimmune.varname_to_tex['cup'])
ax.set_ylim(0, 0.1)
ax.locator_params(nbins=1)
# Putting it all together into one figure
# In[10]:
fig = plt.figure(figsize=(6, 7))
nrow = 4
nsubrow = 3
height_ratios = [1, 10, 10]
gsglobal = gridspec.GridSpec(4, 2)
import param1
lambdaref, musref, cupref = param1.lambda_, param1.mus, param1.cup
label_axes = []
for i in range(1, 9):
p = __import__('param{}'.format(i))
lambda_ = p.lambda_
mus = p.mus
cup = p.cup
dfg = df[(df.mus==mus)&(df.cup==cup)&(df.lambda_==lambda_)]
print lambda_, mus, cup
gs = gridspec.GridSpecFromSubplotSpec(3, 2, subplot_spec=gsglobal[(i-1)%nrow, (i-1)//nrow],
width_ratios=[1, 2], height_ratios=[1, 30, 20],
hspace=1.5, wspace=0.6)
axlambda = fig.add_subplot(gs[0, 0])
axlambda.text(0.5, -3.0, '${0}={1}$'.format(r'c_{\rm inf}', lambda_),
transform=axlambda.transAxes, ha='center', va='top')
axlambda.axis('off')
axmu = fig.add_subplot(gs[1, 0])
plotstatecosts(axmu, mus, musref)
axcup = fig.add_subplot(gs[2, 0])
plotcup(axcup, cup, cupref)
for ax in [axmu, axcup]:
plotting.despine(ax)
axm = fig.add_subplot(gs[:, 1])
try:
polygons = evolimmune.polygons_from_boundaries(dfg, yconv=evolimmune.to_tau)
phases = evolimmune.phases_from_polygons(polygons)
except:
pass
else:
for phasename, phase in phases.iteritems():
try:
axm.add_patch(analysis.shapely_to_mpl(phase, ec='None',
fc=color_dict[phasename],
lw=1.0))
phaset = shapely.ops.transform(lambda x, y, z=None: (x, np.log(y+eps)), phase)
axm.text(phaset.centroid.x, np.exp(phaset.centroid.y),
r'$\mathbf{%s}$'%phasenames[phasename][1:-1],
ha='center', va='center')
except:
pass
axm.set_ylim(evolimmune.to_tau(df.aenv.min()), evolimmune.to_tau(df.aenv.max()))
axm.set_yscale('log')
axm.yaxis.set_major_formatter(ticker.ScalarFormatter())
axm.set_xlabel('$\pi_{env}$')
axm.set_ylabel(r'$\tau_{env}$')
axm.grid(which='major', alpha=0.75)
axm.grid(which='minor', lw=0.4, alpha=0.5)
axm.set_axisbelow(False)
plotting.despine(axm, spines='all')
label_axes.append((i, axlambda))
label_axes = [ax for i, ax in sorted(label_axes)]
plotting.label_axes(label_axes, xy=(-0.6, 1.0), fontsize='large', va='top')
gsglobal.tight_layout(fig, h_pad=1.0, w_pad=2.0)
fig.savefig('SIaltphases.pdf')
fig.savefig('SIaltphases.svg')
# Fig.S2: **Influence of parameter choice on the phase diagram presented in Fig. 2.**
# For every panel the parameter choices are shown on the left and the phase boundaries between **p**roto-adaptive, **i**nnate, **i**nnate **b**et hedging, **m**ixed and **C**RISPR-like strategies are shown on the right. As a reference, lines in lighter color show trade-off and uptake cost for parameter set used in Fig. 2.
# **(A)** Phase diagram for parameters used in Fig. 2.
# **(B)** More expensive active acquisition ($c_{\rm uptake}$ multiplied by a factor of two).
# **(C)** Different functional form for cost of active acqusition: $c_{\rm uptake} = 0.05 \times p_{\rm uptake} + 2 \times p_{\rm uptake}^2$.
# **(D)** More permissive state-dependent costs (costs multiplied by a factor of 0.5).
# **(E)** Less permissive state-dependent costs (costs multiplied by a factor of 1.5).
# **(F)** Higher cost of infection.
# **(G)** Higher cost of immune protection.
# **(H)** Different functional form for cost trade-off, $c_{\rm defense} = 1.4-0.6\times c_{\rm constitutive}+0.2 \times c_{\rm constitutive}^2$
# In[ ]:
| mit |
Clyde-fare/scikit-learn | sklearn/calibration.py | 137 | 18876 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import inspect
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .cross_validation import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjpint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach. It is not advised to use isotonic calibration
with too few calibration samples (<<1000) since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer or cross-validation generator or "prefit", optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, X, y, classifier=True)
arg_names = inspect.getargspec(base_estimator.fit)[0]
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in arg_names):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv:
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classfiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
ottermegazord/ottermegazord.github.io | sortify-master/seaborn/rcmod.py | 3 | 16173 | """Functions that alter the matplotlib rc dictionary on the fly."""
from distutils.version import LooseVersion
import functools
import numpy as np
import matplotlib as mpl
from . import palettes, _orig_rc_params
mpl_ge_150 = LooseVersion(mpl.__version__) >= '1.5.0'
__all__ = ["set", "reset_defaults", "reset_orig",
"axes_style", "set_style", "plotting_context", "set_context",
"set_palette"]
_style_keys = (
"axes.facecolor",
"axes.edgecolor",
"axes.grid",
"axes.axisbelow",
"axes.linewidth",
"axes.labelcolor",
"figure.facecolor",
"grid.color",
"grid.linestyle",
"text.color",
"xtick.color",
"ytick.color",
"xtick.direction",
"ytick.direction",
"xtick.major.size",
"ytick.major.size",
"xtick.minor.size",
"ytick.minor.size",
"legend.frameon",
"legend.numpoints",
"legend.scatterpoints",
"lines.solid_capstyle",
"image.cmap",
"font.family",
"font.sans-serif",
)
_context_keys = (
"figure.figsize",
"font.size",
"axes.labelsize",
"axes.titlesize",
"xtick.labelsize",
"ytick.labelsize",
"legend.fontsize",
"grid.linewidth",
"lines.linewidth",
"patch.linewidth",
"lines.markersize",
"lines.markeredgewidth",
"xtick.major.width",
"ytick.major.width",
"xtick.minor.width",
"ytick.minor.width",
"xtick.major.pad",
"ytick.major.pad"
)
def set(context="notebook", style="darkgrid", palette="deep",
font="sans-serif", font_scale=1, color_codes=False, rc=None):
"""Set aesthetic parameters in one step.
Each set of parameters can be set directly or temporarily, see the
referenced functions below for more information.
Parameters
----------
context : string or dict
Plotting context parameters, see :func:`plotting_context`
style : string or dict
Axes style parameters, see :func:`axes_style`
palette : string or sequence
Color palette, see :func:`color_palette`
font : string
Font family, see matplotlib font manager.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
rc : dict or None
Dictionary of rc parameter mappings to override the above.
"""
set_context(context, font_scale)
set_style(style, rc={"font.family": font})
set_palette(palette, color_codes=color_codes)
if rc is not None:
mpl.rcParams.update(rc)
def reset_defaults():
"""Restore all RC params to default settings."""
mpl.rcParams.update(mpl.rcParamsDefault)
def reset_orig():
"""Restore all RC params to original settings (respects custom rc)."""
mpl.rcParams.update(_orig_rc_params)
def axes_style(style=None, rc=None):
"""Return a parameter dict for the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
This function returns an object that can be used in a ``with`` statement
to temporarily change the style parameters.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
Examples
--------
>>> st = axes_style("whitegrid")
>>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
>>> import matplotlib.pyplot as plt
>>> with axes_style("white"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_style : set the matplotlib parameters for a seaborn theme
plotting_context : return a parameter dict to to scale plot elements
color_palette : define the color palette for a plot
"""
if style is None:
style_dict = {k: mpl.rcParams[k] for k in _style_keys}
elif isinstance(style, dict):
style_dict = style
else:
styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"]
if style not in styles:
raise ValueError("style must be one of %s" % ", ".join(styles))
# Define colors here
dark_gray = ".15"
light_gray = ".8"
# Common parameters
style_dict = {
"figure.facecolor": "white",
"text.color": dark_gray,
"axes.labelcolor": dark_gray,
"legend.frameon": False,
"legend.numpoints": 1,
"legend.scatterpoints": 1,
"xtick.direction": "out",
"ytick.direction": "out",
"xtick.color": dark_gray,
"ytick.color": dark_gray,
"axes.axisbelow": True,
"image.cmap": "Greys",
"font.family": ["sans-serif"],
"font.sans-serif": ["Arial", "Liberation Sans",
"Bitstream Vera Sans", "sans-serif"],
"grid.linestyle": "-",
"lines.solid_capstyle": "round",
}
# Set grid on or off
if "grid" in style:
style_dict.update({
"axes.grid": True,
})
else:
style_dict.update({
"axes.grid": False,
})
# Set the color of the background, spines, and grids
if style.startswith("dark"):
style_dict.update({
"axes.facecolor": "#EAEAF2",
"axes.edgecolor": "white",
"axes.linewidth": 0,
"grid.color": "white",
})
elif style == "whitegrid":
style_dict.update({
"axes.facecolor": "white",
"axes.edgecolor": light_gray,
"axes.linewidth": 1,
"grid.color": light_gray,
})
elif style in ["white", "ticks"]:
style_dict.update({
"axes.facecolor": "white",
"axes.edgecolor": dark_gray,
"axes.linewidth": 1.25,
"grid.color": light_gray,
})
# Show or hide the axes ticks
if style == "ticks":
style_dict.update({
"xtick.major.size": 6,
"ytick.major.size": 6,
"xtick.minor.size": 3,
"ytick.minor.size": 3,
})
else:
style_dict.update({
"xtick.major.size": 0,
"ytick.major.size": 0,
"xtick.minor.size": 0,
"ytick.minor.size": 0,
})
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _style_keys}
style_dict.update(rc)
# Wrap in an _AxesStyle object so this can be used in a with statement
style_object = _AxesStyle(style_dict)
return style_object
def set_style(style=None, rc=None):
"""Set the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
Examples
--------
>>> set_style("whitegrid")
>>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
See Also
--------
axes_style : return a dict of parameters or use in a ``with`` statement
to temporarily set the style.
set_context : set parameters to scale plot elements
set_palette : set the default color palette for figures
"""
style_object = axes_style(style, rc)
mpl.rcParams.update(style_object)
def plotting_context(context=None, font_scale=1, rc=None):
"""Return a parameter dict to scale elements of the figure.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
This function returns an object that can be used in a ``with`` statement
to temporarily change the context parameters.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> c = plotting_context("poster")
>>> c = plotting_context("notebook", font_scale=1.5)
>>> c = plotting_context("talk", rc={"lines.linewidth": 2})
>>> import matplotlib.pyplot as plt
>>> with plotting_context("paper"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_context : set the matplotlib parameters to scale plot elements
axes_style : return a dict of parameters defining a figure style
color_palette : define the color palette for a plot
"""
if context is None:
context_dict = {k: mpl.rcParams[k] for k in _context_keys}
elif isinstance(context, dict):
context_dict = context
else:
contexts = ["paper", "notebook", "talk", "poster"]
if context not in contexts:
raise ValueError("context must be in %s" % ", ".join(contexts))
# Set up dictionary of default parameters
base_context = {
"figure.figsize": np.array([8, 5.5]),
"font.size": 12,
"axes.labelsize": 11,
"axes.titlesize": 12,
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"legend.fontsize": 10,
"grid.linewidth": 1,
"lines.linewidth": 1.75,
"patch.linewidth": .3,
"lines.markersize": 7,
"lines.markeredgewidth": 0,
"xtick.major.width": 1,
"ytick.major.width": 1,
"xtick.minor.width": .5,
"ytick.minor.width": .5,
"xtick.major.pad": 7,
"ytick.major.pad": 7,
}
# Scale all the parameters by the same factor depending on the context
scaling = dict(paper=.8, notebook=1, talk=1.3, poster=1.6)[context]
context_dict = {k: v * scaling for k, v in base_context.items()}
# Now independently scale the fonts
font_keys = ["axes.labelsize", "axes.titlesize", "legend.fontsize",
"xtick.labelsize", "ytick.labelsize", "font.size"]
font_dict = {k: context_dict[k] * font_scale for k in font_keys}
context_dict.update(font_dict)
# Implement hack workaround for matplotlib bug
# See https://github.com/mwaskom/seaborn/issues/344
# There is a bug in matplotlib 1.4.2 that makes points invisible when
# they don't have an edgewidth. It will supposedly be fixed in 1.4.3.
if mpl.__version__ == "1.4.2":
context_dict["lines.markeredgewidth"] = 0.01
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _context_keys}
context_dict.update(rc)
# Wrap in a _PlottingContext object so this can be used in a with statement
context_object = _PlottingContext(context_dict)
return context_object
def set_context(context=None, font_scale=1, rc=None):
"""Set the plotting context parameters.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> set_context("paper")
>>> set_context("talk", font_scale=1.4)
>>> set_context("talk", rc={"lines.linewidth": 2})
See Also
--------
plotting_context : return a dictionary of rc parameters, or use in
a ``with`` statement to temporarily set the context.
set_style : set the default parameters for figure style
set_palette : set the default color palette for figures
"""
context_object = plotting_context(context, font_scale, rc)
mpl.rcParams.update(context_object)
class _RCAesthetics(dict):
def __enter__(self):
rc = mpl.rcParams
self._orig = {k: rc[k] for k in self._keys}
self._set(self)
def __exit__(self, exc_type, exc_value, exc_tb):
self._set(self._orig)
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
class _AxesStyle(_RCAesthetics):
"""Light wrapper on a dict to set style temporarily."""
_keys = _style_keys
_set = staticmethod(set_style)
class _PlottingContext(_RCAesthetics):
"""Light wrapper on a dict to set context temporarily."""
_keys = _context_keys
_set = staticmethod(set_context)
def set_palette(palette, n_colors=None, desat=None, color_codes=False):
"""Set the matplotlib color cycle using a seaborn palette.
Parameters
----------
palette : hls | husl | matplotlib colormap | seaborn color palette
Palette definition. Should be something that :func:`color_palette`
can process.
n_colors : int
Number of colors in the cycle. The default number of colors will depend
on the format of ``palette``, see the :func:`color_palette`
documentation for more information.
desat : float
Proportion to desaturate each color by.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
Examples
--------
>>> set_palette("Reds")
>>> set_palette("Set1", 8, .75)
See Also
--------
color_palette : build a color palette or set the color cycle temporarily
in a ``with`` statement.
set_context : set parameters to scale plot elements
set_style : set the default parameters for figure style
"""
colors = palettes.color_palette(palette, n_colors, desat)
if mpl_ge_150:
from cycler import cycler
cyl = cycler('color', colors)
mpl.rcParams['axes.prop_cycle'] = cyl
else:
mpl.rcParams["axes.color_cycle"] = list(colors)
mpl.rcParams["patch.facecolor"] = colors[0]
if color_codes:
palettes.set_color_codes(palette)
| mit |
wdurhamh/statsmodels | statsmodels/examples/tsa/ex_var.py | 33 | 1280 |
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
from statsmodels.tsa.api import VAR
# some example data
mdata = sm.datasets.macrodata.load().data
mdata = mdata[['realgdp','realcons','realinv']]
names = mdata.dtype.names
data = mdata.view((float,3))
use_growthrate = False #True #False
if use_growthrate:
data = 100 * 4 * np.diff(np.log(data), axis=0)
model = VAR(data, names=names)
res = model.fit(4)
nobs_all = data.shape[0]
#in-sample 1-step ahead forecasts
fc_in = np.array([np.squeeze(res.forecast(model.y[t-20:t], 1))
for t in range(nobs_all-6,nobs_all)])
print(fc_in - res.fittedvalues[-6:])
#out-of-sample 1-step ahead forecasts
fc_out = np.array([np.squeeze(VAR(data[:t]).fit(2).forecast(data[t-20:t], 1))
for t in range(nobs_all-6,nobs_all)])
print(fc_out - data[nobs_all-6:nobs_all])
print(fc_out - res.fittedvalues[-6:])
#out-of-sample h-step ahead forecasts
h = 2
fc_out = np.array([VAR(data[:t]).fit(2).forecast(data[t-20:t], h)[-1]
for t in range(nobs_all-6-h+1,nobs_all-h+1)])
print(fc_out - data[nobs_all-6:nobs_all]) #out-of-sample forecast error
print(fc_out - res.fittedvalues[-6:])
import matplotlib.pyplot as plt
res.plot_forecast(20)
#plt.show()
| bsd-3-clause |
QInfer/python-qinfer | doc/source/conf.py | 3 | 12884 | # -*- coding: utf-8 -*-
#
# QInfer documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 14 21:12:57 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# Monkey patch in a field type for columns.
# try:
from sphinx.util.docfields import Field, GroupedField, TypedField
from sphinx.domains.python import PythonDomain, PyObject, l_, PyField, PyTypedField
PyObject.doc_field_types += [
GroupedField('modelparam', label='Model Parameters', names=('modelparam', ), can_collapse=True,
rolename='math'
),
PyTypedField('expparam',
label=l_('Experiment Parameters'), names=('expparam', ), can_collapse=False,
rolename='obj'
),
PyField('scalar-expparam',
label=l_('Experiment Parameter'), names=('scalar-expparam', ),
has_arg=True, rolename='obj'
),
GroupedField('columns', label=l_('Columns'), names=('column', ), can_collapse=True),
]
# except:
# pass
###############################################################################
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../src'))
# The LaTeX preamble is placed here so that it can be used both by pngmath
# and by the LaTeX output plugin.
with open('abstract.txt', 'r') as f:
abstract = f.read()
preamble = r"""
\usepackage{amsfonts}
\usepackage{bbm}
\usepackage[bold]{hhtensor}
\newcommand{\T}{\mathrm{T}}
\newcommand{\Tr}{\mathrm{Tr}}
\newcommand{\ident}{\mathbbm{1}}
\newcommand{\ave}{\mathrm{ave}}
\newcommand{\ii}{\mathrm{i}}
\newcommand{\expect}{\mathbb{E}}
\usepackage{braket}
\makeatletter
\renewcommand{\maketitle}{%
\begin{titlepage}%
\let\footnotesize\small
\let\footnoterule\relax
\rule{\textwidth}{1pt}%
\begingroup
% These \defs are required to deal with multi-line authors; it
% changes \\ to ', ' (comma-space), making it pass muster for
% generating document info in the PDF file.
\def\\{, }
\def\and{and }
\pdfinfo{
/Author (\@author)
/Title (\@title)
}
\endgroup
\begin{flushright}%
\sphinxlogo%
{\rm\Huge\py@HeaderFamily \@title \par}%
% {\em\LARGE\py@HeaderFamily \py@release\releaseinfo \par}
\vfill
{\LARGE\py@HeaderFamily
\begin{tabular}[t]{c}
\@author
\end{tabular}
\par}
\vfill
{\large
\@date \par
\vfill
\py@authoraddress \par
}%
{\bf\sffamily ABSTRACT }
ABSTRACT_HERE%
\vfill
\end{flushright}%\par
\@thanks
\end{titlepage}%
%\cleardoublepage%
\setcounter{footnote}{0}%
\let\thanks\relax\let\maketitle\relax
%\gdef\@thanks{}\gdef\@author{}\gdef\@title{}
}
\makeatother
""".replace("ABSTRACT_HERE", abstract)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode', 'sphinx.ext.mathjax', 'sphinx.ext.extlinks',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'QInfer'
copyright = u'2012, Christopher Ferrie and Christopher Granade'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0b4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "obj"
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['qinfer']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'QInferdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'QInfer.tex', u'QInfer: Bayesian Inference for Quantum Information',
u'Christopher Granade and Christopher Ferrie', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
latex_preamble = preamble
# In Sphinx 1.5, this now appears as latex_elements, so we pack the
# preamble that way, too.
latex_elements = {
'preamble': preamble
}
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# latex_elements = {
# 'maketitle': r"""
# \begin{abstract}
# Lorem ipsum
# \end{abstract}
# \maketitle
# """
# }
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'qinfer', u'QInfer Documentation',
[u'Christopher Ferrie and Christopher Granade'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'QInfer'
epub_author = u'Christopher Ferrie and Christopher Granade'
epub_publisher = u'Christopher Ferrie and Christopher Granade'
epub_copyright = u'2012, Christopher Ferrie and Christopher Granade'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
## EXTLINKS CONFIGURATION ######################################################
extlinks = {
'arxiv': ('http://arxiv.org/abs/%s', 'arXiv:'),
'doi': ('https://dx.doi.org/%s', 'doi:'),
'example_nb': ('https://nbviewer.jupyter.org/github/qinfer/qinfer-examples/blob/master/%s.ipynb', ''),
'hdl': ('https://hdl.handle.net/%s', 'hdl:')
}
## OTHER CONFIGURATION PARAMETERS ##############################################
plot_pre_code = """
import numpy as np
from qinfer import *
import matplotlib.pyplot as plt
try: plt.style.use('ggplot')
except: pass
"""
plot_include_source = True
plot_formats = [
'svg', 'pdf',
('hires.png', 250),
('png', 125)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'https://docs.python.org/3/': None,
'numpy': ('https://docs.scipy.org/doc/numpy',None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference',None),
'IPython': ('https://ipython.org/ipython-doc/stable/', None),
'ipyparallel': ('https://ipyparallel.readthedocs.io/en/latest/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
# NB: change this to 3.2.0 when that is released, as we will need random object
# support from that version.
'qutip': ('http://qutip.org/docs/3.1.0/', None)
}
autodoc_member_order = 'bysource'
autodoc_default_flags = ['show-inheritance', 'undoc-members']
pngmath_latex_preamble = preamble
doctest_global_setup = '''
from __future__ import division, print_function
import numpy as np
'''
| bsd-3-clause |
soundcloud/essentia | src/examples/tutorial/essentia_tutorial.py | 10 | 6577 | # Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
"""Demo of Essentia 'standard' mode.
This first demo will show how to use Essentia in standard mode.
This will require a little bit of knowledge of python (not that much!) and
will look like an interactive session in matlab.
We will have a look at some basic functionality:
- how to load an audio
- how to perform some numerical operations, such as FFT et al.
- how to plot results
- how to output results to a file
To run this demo interactively, open IPython and type in the following commands:
from IPython.lib.demo import Demo
essentia_demo = Demo('essentia_tutorial.py')
Type command
essentia_demo()
to show and execute each block of the demo. Each block of code will be printed to
the screen before it is run. This is another nifty feature of the IPython
interpreter. As we go along the demo, we will also be looking at a few IPython
features that make your life easier.
So, let's start!
"""
# <demo> --- stop ---
# first, we need to import our essentia module. It is aptly named 'essentia'!
import essentia
# as there are 2 operating modes in essentia which have the same algorithms,
# these latter are dispatched into 2 submodules:
import essentia.standard
import essentia.streaming
# let's have a look at what's in there
print dir(essentia.standard)
# <demo> --- stop ---
# let's define a small utility function
def play(audiofile):
import os, sys
# NB: this only works with linux!! mplayer rocks!
if sys.platform == 'linux2':
os.system('mplayer %s' % audiofile)
else:
print 'Not playing audio...'
# So, first things first, let's load an audio
# to make sure it's not a trick, let's show the original "audio" to you:
play('../../../test/audio/recorded/dubstep.wav')
# <demo> --- stop ---
# Essentia has a selection of audio loaders:
#
# - AudioLoader: the basic one, returns the audio samples, sampling rate and number of channels
# - MonoLoader: which returns audio, down-mixed and resampled to a given sampling rate
# - EasyLoader: a MonoLoader which can optionally trim start/end slices and rescale according
# to a ReplayGain value
# - EqloudLoader: an EasyLoader that applies an equal-loudness filtering on the audio
#
# we start by instantiating the audio loader:
loader = essentia.standard.MonoLoader(filename = '../../../test/audio/recorded/dubstep.wav')
# and then we actually perform the loading:
audio = loader()
# <demo> --- stop ---
# OK, let's make sure the loading process actually worked
from pylab import *
plot(audio[1*44100:2*44100])
show()
# <demo> --- stop ---
# So, let's get down to business:
# Let's say we want to analyze the audio frame by frame, and we want to compute
# the MFCC for each frame. We will need the following algorithms:
# Windowing, FFT, MFCC
from essentia.standard import *
w = Windowing(type = 'hann')
spectrum = Spectrum() # FFT() would give the complex FFT, here we just want the magnitude spectrum
mfcc = MFCC()
help(MFCC)
# <demo> --- stop ---
# once algorithms have been instantiated, they work like functions:
frame = audio[5*44100 : 5*44100 + 1024]
spec = spectrum(w(frame))
plot(spec)
show()
# <demo> --- stop ---
# let's try to compute the MFCCs for all the frames in the audio:
mfccs = []
frameSize = 1024
hopSize = 512
for fstart in range(0, len(audio)-frameSize, hopSize):
frame = audio[fstart:fstart+frameSize]
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
# and plot them...
# as this is a 2D array, we need to use imshow() instead of plot()
imshow(mfccs, aspect = 'auto')
show()
# <demo> --- stop ---
# and let's do it in a more essentia-like way:
mfccs = []
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512):
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
# transpose to have it in a better shape
mfccs = essentia.array(mfccs).T
imshow(mfccs[1:,:], aspect = 'auto')
show()
# <demo> --- stop ---
# Introducing the Pool: a good-for-all container
#
# A Pool can contain any type of values (easy in Python, not as much in C++ :-) )
# They need to be given a name, which represent the full path to these values;
# dot '.' characters are used as separators. You can think of it as a directory
# tree, or as namespace(s) + local name.
#
# Examples of valid names are: bpm, lowlevel.mfcc, highlevel.genre.rock.probability, etc...
# So let's redo the previous using a Pool
pool = essentia.Pool()
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512):
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
pool.add('lowlevel.mfcc', mfcc_coeffs)
pool.add('lowlevel.mfcc_bands', mfcc_bands)
imshow(pool['lowlevel.mfcc'].T[1:,:], aspect = 'auto')
figure()
# Let's plot mfcc bands on a log-scale so that the energy values will be better
# differentiated by color
from matplotlib.colors import LogNorm
imshow(pool['lowlevel.mfcc_bands'].T, aspect = 'auto', interpolation = 'nearest', norm = LogNorm())
show()
# <demo> --- stop ---
# In essentia there is mostly 1 way to output your data in a file: the YamlOutput
# although, as all of this is done in python, it should be pretty easy to output to
# any type of data format.
output = YamlOutput(filename = 'mfcc.sig')
output(pool)
# <demo> --- stop ---
# Say we're not interested in all the MFCC frames, but just their mean & variance.
# To this end, we have the PoolAggregator algorithm, that can do all sorts of
# aggregation: mean, variance, min, max, etc...
aggrPool = PoolAggregator(defaultStats = [ 'mean', 'var' ])(pool)
print 'Original pool descriptor names:'
print pool.descriptorNames()
print
print 'Aggregated pool descriptor names:'
print aggrPool.descriptorNames()
output = YamlOutput(filename = 'mfccaggr.sig')
output(aggrPool)
| agpl-3.0 |
zaxtax/scikit-learn | sklearn/preprocessing/tests/test_label.py | 12 | 17807 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
le.fit(["apple", "orange"])
msg = "bad input shape"
assert_raise_message(ValueError, msg, le.transform, "apple")
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
kernc/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
laurentgo/arrow | dev/archery/archery/lang/python.py | 3 | 7570 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import tokenize
from contextlib import contextmanager
try:
from numpydoc.validate import Docstring, validate
except ImportError:
have_numpydoc = False
else:
have_numpydoc = True
from ..utils.command import Command, capture_stdout, default_bin
class Flake8(Command):
def __init__(self, flake8_bin=None):
self.bin = default_bin(flake8_bin, "flake8")
class Autopep8(Command):
def __init__(self, autopep8_bin=None):
self.bin = default_bin(autopep8_bin, "autopep8")
@capture_stdout()
def run_captured(self, *args, **kwargs):
return self.run(*args, **kwargs)
def _tokenize_signature(s):
lines = s.encode('ascii').splitlines()
generator = iter(lines).__next__
return tokenize.tokenize(generator)
def _convert_typehint(tokens):
names = []
opening_bracket_reached = False
for token in tokens:
# omit the tokens before the opening bracket
if not opening_bracket_reached:
if token.string == '(':
opening_bracket_reached = True
else:
continue
if token.type == 1: # type 1 means NAME token
names.append(token)
else:
if len(names) == 1:
yield (names[0].type, names[0].string)
elif len(names) == 2:
# two "NAME" tokens follow each other which means a cython
# typehint like `bool argument`, so remove the typehint
# note that we could convert it to python typehints, but hints
# are not supported by _signature_fromstr
yield (names[1].type, names[1].string)
elif len(names) > 2:
raise ValueError('More than two NAME tokens follow each other')
names = []
yield (token.type, token.string)
def inspect_signature(obj):
"""
Custom signature inspection primarily for cython generated callables.
Cython puts the signatures to the first line of the docstrings, which we
can reuse to parse the python signature from, but some gymnastics are
required, like removing the cython typehints.
It converts the cython signature:
array(obj, type=None, mask=None, size=None, from_pandas=None,
bool safe=True, MemoryPool memory_pool=None)
To:
<Signature (obj, type=None, mask=None, size=None, from_pandas=None,
safe=True, memory_pool=None)>
"""
cython_signature = obj.__doc__.splitlines()[0]
cython_tokens = _tokenize_signature(cython_signature)
python_tokens = _convert_typehint(cython_tokens)
python_signature = tokenize.untokenize(python_tokens)
return inspect._signature_fromstr(inspect.Signature, obj, python_signature)
class NumpyDoc:
def __init__(self, symbols=None):
if not have_numpydoc:
raise RuntimeError(
'Numpydoc is not available, install the development version '
'with command: pip install '
'git+https://github.com/numpy/numpydoc'
)
self.symbols = set(symbols or {'pyarrow'})
def traverse(self, fn, obj, from_package):
"""Apply a function on publicly exposed API components.
Recursively iterates over the members of the passed object. It omits
any '_' prefixed and thirdparty (non pyarrow) symbols.
Parameters
----------
obj : Any
from_package : string, default 'pyarrow'
Predicate to only consider objects from this package.
"""
todo = [obj]
seen = set()
while todo:
obj = todo.pop()
if obj in seen:
continue
else:
seen.add(obj)
fn(obj)
for name in dir(obj):
if name.startswith('_'):
continue
member = getattr(obj, name)
module = getattr(member, '__module__', None)
if not (module and module.startswith(from_package)):
continue
todo.append(member)
@contextmanager
def _apply_patches(self):
"""
Patch Docstring class to bypass loading already loaded python objects.
"""
orig_load_obj = Docstring._load_obj
orig_signature = inspect.signature
@staticmethod
def _load_obj(obj):
# By default it expects a qualname and import the object, but we
# have already loaded object after the API traversal.
if isinstance(obj, str):
return orig_load_obj(obj)
else:
return obj
def signature(obj):
# inspect.signature tries to parse __text_signature__ if other
# properties like __signature__ doesn't exists, but cython
# doesn't set that property despite that embedsignature cython
# directive is set. The only way to inspect a cython compiled
# callable's signature to parse it from __doc__ while
# embedsignature directive is set during the build phase.
# So path inspect.signature function to attempt to parse the first
# line of callable.__doc__ as a signature.
try:
return orig_signature(obj)
except Exception as orig_error:
try:
return inspect_signature(obj)
except Exception:
raise orig_error
try:
Docstring._load_obj = _load_obj
inspect.signature = signature
yield
finally:
Docstring._load_obj = orig_load_obj
inspect.signature = orig_signature
def validate(self, from_package='', allow_rules=None,
disallow_rules=None):
results = []
def callback(obj):
result = validate(obj)
errors = []
for errcode, errmsg in result.get('errors', []):
if allow_rules and errcode not in allow_rules:
continue
if disallow_rules and errcode in disallow_rules:
continue
errors.append((errcode, errmsg))
if len(errors):
result['errors'] = errors
results.append((obj, result))
with self._apply_patches():
for symbol in self.symbols:
try:
obj = Docstring._load_obj(symbol)
except (ImportError, AttributeError):
print('{} is not available for import'.format(symbol))
else:
self.traverse(callback, obj, from_package=from_package)
return results
| apache-2.0 |
chetan51/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/bezier.py | 70 | 14387 | """
A module providing some utility functions regarding bezier path manipulation.
"""
import numpy as np
from math import sqrt
from matplotlib.path import Path
from operator import xor
# some functions
def get_intersection(cx1, cy1, cos_t1, sin_t1,
cx2, cy2, cos_t2, sin_t2):
""" return a intersecting point between a line through (cx1, cy1)
and having angle t1 and a line through (cx2, cy2) and angle t2.
"""
# line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.
# line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1
line1_rhs = sin_t1 * cx1 - cos_t1 * cy1
line2_rhs = sin_t2 * cx2 - cos_t2 * cy2
# rhs matrix
a, b = sin_t1, -cos_t1
c, d = sin_t2, -cos_t2
ad_bc = a*d-b*c
if ad_bc == 0.:
raise ValueError("Given lines do not intersect")
#rhs_inverse
a_, b_ = d, -b
c_, d_ = -c, a
a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]
x = a_* line1_rhs + b_ * line2_rhs
y = c_* line1_rhs + d_ * line2_rhs
return x, y
def get_normal_points(cx, cy, cos_t, sin_t, length):
"""
For a line passing through (*cx*, *cy*) and having a angle *t*,
return locations of the two points located along its perpendicular line at the distance of *length*.
"""
if length == 0.:
return cx, cy, cx, cy
cos_t1, sin_t1 = sin_t, -cos_t
cos_t2, sin_t2 = -sin_t, cos_t
x1, y1 = length*cos_t1 + cx, length*sin_t1 + cy
x2, y2 = length*cos_t2 + cx, length*sin_t2 + cy
return x1, y1, x2, y2
## BEZIER routines
# subdividing bezier curve
# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html
def _de_casteljau1(beta, t):
next_beta = beta[:-1] * (1-t) + beta[1:] * t
return next_beta
def split_de_casteljau(beta, t):
"""split a bezier segment defined by its controlpoints *beta*
into two separate segment divided at *t* and return their control points.
"""
beta = np.asarray(beta)
beta_list = [beta]
while True:
beta = _de_casteljau1(beta, t)
beta_list.append(beta)
if len(beta) == 1:
break
left_beta = [beta[0] for beta in beta_list]
right_beta = [beta[-1] for beta in reversed(beta_list)]
return left_beta, right_beta
def find_bezier_t_intersecting_with_closedpath(bezier_point_at_t, inside_closedpath,
t0=0., t1=1., tolerence=0.01):
""" Find a parameter t0 and t1 of the given bezier path which
bounds the intersecting points with a provided closed
path(*inside_closedpath*). Search starts from *t0* and *t1* and it
uses a simple bisecting algorithm therefore one of the end point
must be inside the path while the orther doesn't. The search stop
when |t0-t1| gets smaller than the given tolerence.
value for
- bezier_point_at_t : a function which returns x, y coordinates at *t*
- inside_closedpath : return True if the point is insed the path
"""
# inside_closedpath : function
start = bezier_point_at_t(t0)
end = bezier_point_at_t(t1)
start_inside = inside_closedpath(start)
end_inside = inside_closedpath(end)
if not xor(start_inside, end_inside):
raise ValueError("the segment does not seemed to intersect with the path")
while 1:
# return if the distance is smaller than the tolerence
if (start[0]-end[0])**2 + (start[1]-end[1])**2 < tolerence**2:
return t0, t1
# calculate the middle point
middle_t = 0.5*(t0+t1)
middle = bezier_point_at_t(middle_t)
middle_inside = inside_closedpath(middle)
if xor(start_inside, middle_inside):
t1 = middle_t
end = middle
end_inside = middle_inside
else:
t0 = middle_t
start = middle
start_inside = middle_inside
class BezierSegment:
"""
A simple class of a 2-dimensional bezier segment
"""
# Highrt order bezier lines can be supported by simplying adding
# correcponding values.
_binom_coeff = {1:np.array([1., 1.]),
2:np.array([1., 2., 1.]),
3:np.array([1., 3., 3., 1.])}
def __init__(self, control_points):
"""
*control_points* : location of contol points. It needs have a
shpae of n * 2, where n is the order of the bezier line. 1<=
n <= 3 is supported.
"""
_o = len(control_points)
self._orders = np.arange(_o)
_coeff = BezierSegment._binom_coeff[_o - 1]
_control_points = np.asarray(control_points)
xx = _control_points[:,0]
yy = _control_points[:,1]
self._px = xx * _coeff
self._py = yy * _coeff
def point_at_t(self, t):
"evaluate a point at t"
one_minus_t_powers = np.power(1.-t, self._orders)[::-1]
t_powers = np.power(t, self._orders)
tt = one_minus_t_powers * t_powers
_x = sum(tt * self._px)
_y = sum(tt * self._py)
return _x, _y
def split_bezier_intersecting_with_closedpath(bezier,
inside_closedpath,
tolerence=0.01):
"""
bezier : control points of the bezier segment
inside_closedpath : a function which returns true if the point is inside the path
"""
bz = BezierSegment(bezier)
bezier_point_at_t = bz.point_at_t
t0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,
inside_closedpath,
tolerence=tolerence)
_left, _right = split_de_casteljau(bezier, (t0+t1)/2.)
return _left, _right
def find_r_to_boundary_of_closedpath(inside_closedpath, xy,
cos_t, sin_t,
rmin=0., rmax=1., tolerence=0.01):
"""
Find a radius r (centered at *xy*) between *rmin* and *rmax* at
which it intersect with the path.
inside_closedpath : function
cx, cy : center
cos_t, sin_t : cosine and sine for the angle
rmin, rmax :
"""
cx, cy = xy
def _f(r):
return cos_t*r + cx, sin_t*r + cy
find_bezier_t_intersecting_with_closedpath(_f, inside_closedpath,
t0=rmin, t1=rmax, tolerence=tolerence)
## matplotlib specific
def split_path_inout(path, inside, tolerence=0.01, reorder_inout=False):
""" divide a path into two segment at the point where inside(x, y)
becomes False.
"""
path_iter = path.iter_segments()
ctl_points, command = path_iter.next()
begin_inside = inside(ctl_points[-2:]) # true if begin point is inside
bezier_path = None
ctl_points_old = ctl_points
concat = np.concatenate
iold=0
i = 1
for ctl_points, command in path_iter:
iold=i
i += len(ctl_points)/2
if inside(ctl_points[-2:]) != begin_inside:
bezier_path = concat([ctl_points_old[-2:], ctl_points])
break
ctl_points_old = ctl_points
if bezier_path is None:
raise ValueError("The path does not seem to intersect with the patch")
bp = zip(bezier_path[::2], bezier_path[1::2])
left, right = split_bezier_intersecting_with_closedpath(bp,
inside,
tolerence)
if len(left) == 2:
codes_left = [Path.LINETO]
codes_right = [Path.MOVETO, Path.LINETO]
elif len(left) == 3:
codes_left = [Path.CURVE3, Path.CURVE3]
codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
elif len(left) == 4:
codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]
codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
else:
raise ValueError()
verts_left = left[1:]
verts_right = right[:]
#i += 1
if path.codes is None:
path_in = Path(concat([path.vertices[:i], verts_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]))
else:
path_in = Path(concat([path.vertices[:iold], verts_left]),
concat([path.codes[:iold], codes_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]),
concat([codes_right, path.codes[i:]]))
if reorder_inout and begin_inside == False:
path_in, path_out = path_out, path_in
return path_in, path_out
def inside_circle(cx, cy, r):
r2 = r**2
def _f(xy):
x, y = xy
return (x-cx)**2 + (y-cy)**2 < r2
return _f
# quadratic bezier lines
def get_cos_sin(x0, y0, x1, y1):
dx, dy = x1-x0, y1-y0
d = (dx*dx + dy*dy)**.5
return dx/d, dy/d
def get_parallels(bezier2, width):
"""
Given the quadraitc bezier control points *bezier2*, returns
control points of quadrativ bezier lines roughly parralel to given
one separated by *width*.
"""
# The parallel bezier lines constructed by following ways.
# c1 and c2 are contol points representing the begin and end of the bezier line.
# cm is the middle point
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c2x, c2y = bezier2[2]
# t1 and t2 is the anlge between c1 and cm, cm, c2.
# They are also a angle of the tangential line of the path at c1 and c2
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c2_left and
# c2_right with respect to c2.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width)
c2x_left, c2y_left, c2x_right, c2y_right = \
get_normal_points(c2x, c2y, cos_t2, sin_t2, width)
# find cm_left which is the intersectng point of a line through
# c1_left with angle t1 and a line throught c2_left with angle
# t2. Same with cm_right.
cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1, sin_t1,
c2x_left, c2y_left, cos_t2, sin_t2)
cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1, sin_t1,
c2x_right, c2y_right, cos_t2, sin_t2)
# the parralel bezier lines are created with control points of
# [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]
path_left = [(c1x_left, c1y_left), (cmx_left, cmy_left), (c2x_left, c2y_left)]
path_right = [(c1x_right, c1y_right), (cmx_right, cmy_right), (c2x_right, c2y_right)]
return path_left, path_right
def make_wedged_bezier2(bezier2, length, shrink_factor=0.5):
"""
Being similar to get_parallels, returns
control points of two quadrativ bezier lines having a width roughly parralel to given
one separated by *width*.
"""
xx1, yy1 = bezier2[2]
xx2, yy2 = bezier2[1]
xx3, yy3 = bezier2[0]
cx, cy = xx3, yy3
x0, y0 = xx2, yy2
dist = sqrt((x0-cx)**2 + (y0-cy)**2)
cos_t, sin_t = (x0-cx)/dist, (y0-cy)/dist,
x1, y1, x2, y2 = get_normal_points(cx, cy, cos_t, sin_t, length)
xx12, yy12 = (xx1+xx2)/2., (yy1+yy2)/2.,
xx23, yy23 = (xx2+xx3)/2., (yy2+yy3)/2.,
dist = sqrt((xx12-xx23)**2 + (yy12-yy23)**2)
cos_t, sin_t = (xx12-xx23)/dist, (yy12-yy23)/dist,
xm1, ym1, xm2, ym2 = get_normal_points(xx2, yy2, cos_t, sin_t, length*shrink_factor)
l_plus = [(x1, y1), (xm1, ym1), (xx1, yy1)]
l_minus = [(x2, y2), (xm2, ym2), (xx1, yy1)]
return l_plus, l_minus
def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
""" Find control points of the bezier line throught c1, mm, c2. We
simply assume that c1, mm, c2 which have parameteric value 0, 0.5, and 1.
"""
cmx = .5 * (4*mmx - (c1x + c2x))
cmy = .5 * (4*mmy - (c1y + c2y))
return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):
"""
Being similar to get_parallels, returns
control points of two quadrativ bezier lines having a width roughly parralel to given
one separated by *width*.
"""
# c1, cm, c2
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c3x, c3y = bezier2[2]
# t1 and t2 is the anlge between c1 and cm, cm, c3.
# They are also a angle of the tangential line of the path at c1 and c3
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c3_left and
# c3_right with respect to c3.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width*w1)
c3x_left, c3y_left, c3x_right, c3y_right = \
get_normal_points(c3x, c3y, cos_t2, sin_t2, width*w2)
# find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and c12-c23
c12x, c12y = (c1x+cmx)*.5, (c1y+cmy)*.5
c23x, c23y = (cmx+c3x)*.5, (cmy+c3y)*.5
c123x, c123y = (c12x+c23x)*.5, (c12y+c23y)*.5
# tangential angle of c123 (angle between c12 and c23)
cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)
c123x_left, c123y_left, c123x_right, c123y_right = \
get_normal_points(c123x, c123y, cos_t123, sin_t123, width*wm)
path_left = find_control_points(c1x_left, c1y_left,
c123x_left, c123y_left,
c3x_left, c3y_left)
path_right = find_control_points(c1x_right, c1y_right,
c123x_right, c123y_right,
c3x_right, c3y_right)
return path_left, path_right
if 0:
path = Path([(0, 0), (1, 0), (2, 2)],
[Path.MOVETO, Path.CURVE3, Path.CURVE3])
left, right = divide_path_inout(path, inside)
clf()
ax = gca()
| gpl-3.0 |
gclenaghan/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
nikitasingh981/scikit-learn | sklearn/ensemble/tests/test_forest.py | 9 | 43013 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.94, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", "mae", "friedman_mse")):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse", "mae"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Using a Python 2.x list as the sample_weight parameter used to raise
# an exception. This test makes sure such code will now run correctly.
clf = ForestClassifier()
sample_weight = [1.] * len(iris.data)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='balanced', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
def test_min_impurity_split():
# Test if min_impurity_split of base estimators is set
# Regression test for #8006
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [RandomForestClassifier, RandomForestRegressor,
ExtraTreesClassifier, ExtraTreesRegressor]
for Estimator in all_estimators:
est = Estimator(min_impurity_split=0.1)
est = assert_warns_message(DeprecationWarning, "min_impurity_decrease",
est.fit, X, y)
for tree in est.estimators_:
assert_equal(tree.min_impurity_split, 0.1)
def test_min_impurity_decrease():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [RandomForestClassifier, RandomForestRegressor,
ExtraTreesClassifier, ExtraTreesRegressor]
for Estimator in all_estimators:
est = Estimator(min_impurity_decrease=0.1)
est.fit(X, y)
for tree in est.estimators_:
# Simply check if the parameter is passed on correctly. Tree tests
# will suffice for the actual working of this param
assert_equal(tree.min_impurity_decrease, 0.1)
| bsd-3-clause |
alphacsc/alphacsc | examples/csc/plot_simulate_randomstate.py | 1 | 3040 | """
==============================
Selecting random state for CSC
==============================
The CSC problem is non-convex. Therefore, the solution depends
on the initialization. Here, we show how to select the
best atoms amongst different initializations.
"""
# Authors: Mainak Jas <[email protected]>
# Tom Dupre La Tour <[email protected]>
# Umut Simsekli <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
###############################################################################
# As before, let us first define the parameters of our model.
n_times_atom = 64 # L
n_times = 512 # T
n_atoms = 2 # K
n_trials = 100 # N
n_iter = 50
reg = 0.1
###############################################################################
# Here, we simulate the data
from alphacsc.simulate import simulate_data # noqa
from scipy.stats import levy_stable # noqa
from alphacsc import check_random_state # noqa
random_state_simulate = 1
X, ds_true, z_true = simulate_data(n_trials, n_times, n_times_atom,
n_atoms, random_state_simulate)
# Add stationary noise:
fraction_corrupted = 0.02
n_corrupted_trials = int(fraction_corrupted * n_trials)
rng = check_random_state(random_state_simulate)
X += 0.01 * rng.randn(*X.shape)
idx_corrupted = rng.randint(0, n_trials,
size=n_corrupted_trials)
###############################################################################
# Now, we run vanilla CSC on the data but with different initializations.
from alphacsc import learn_d_z # noqa
pobjs, d_hats = list(), list()
for random_state in range(5):
print('\nRandom state: %d' % random_state)
pobj, times, d_hat, z_hat, reg = learn_d_z(
X, n_atoms, n_times_atom, reg=reg, n_iter=n_iter,
solver_d_kwargs=dict(factr=100), random_state=random_state,
n_jobs=1, verbose=1)
pobjs.append(pobj[-1])
d_hats.append(d_hat)
###############################################################################
# As we loop through the random states, we save the objective value `pobj`
# at the last iteration of the algorithm.
#
# Now, let us look at the atoms for different initializations.
import matplotlib.pyplot as plt # noqa
fig, axes = plt.subplots(1, 5, figsize=(17, 3), sharex=True, sharey=True)
for ax, this_pobjs, d_hat in zip(axes, pobjs, d_hats):
ax.plot(d_hat.T)
ax.plot(ds_true.T, 'k--')
ax.set_title('pobj: %0.2f' % this_pobjs)
###############################################################################
# Note that lower the objective value, the better is the recovered atom.
# This is one reason why using a concrete mathematical objective function as in
# convolutional sparse coding is superior to heuristic methods.
# Now, we select the best atom amongst them.
import numpy as np # noqa
plt.figure()
plt.plot(d_hats[np.argmin(pobjs)].T)
plt.plot(ds_true.T, 'k--')
plt.show()
| bsd-3-clause |